query
stringlengths
7
3.85k
document
stringlengths
11
430k
metadata
dict
negatives
sequencelengths
0
101
negative_scores
sequencelengths
0
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
GinLogrus is a logger middleware, which use the logrus replace gin default logger
func GinLogrus() gin.HandlerFunc { return func(c *gin.Context) { start := time.Now() // some evil middlewares modify this values path := c.Request.URL.Path c.Next() end := time.Since(start) status := c.Writer.Status() entry := logrus.WithFields(logrus.Fields{ "path": path, "method": c.Request.Method, "clientIP": c.ClientIP(), "userAgent": c.Request.UserAgent(), "requestID": c.MustGet(RequestIDKey), "status": status, "size": c.Writer.Size(), "latency": fmt.Sprintf("%fms", float64(end.Seconds())*1000.0), }) if len(c.Errors) > 0 { // Append error field if this is an erroneous request. entry.Error(c.Errors.String()) } else { if status > 499 { entry.Error() } else { entry.Info() } } } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Logrus() echo.MiddlewareFunc {\n\treturn LogrusDefaultConfig(DefaultLoggerConfig)\n}", "func GinLogger(log *logrus.Logger) gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\n\t\t// other handler can change c.Path so:\n\t\tpath := c.Request.URL.Path\n\t\tmethod := c.Request.Method\n\t\tstart := time.Now()\n\t\tc.Next()\n\t\tstop := time.Since(start)\n\t\tlatency := int(math.Ceil(float64(stop.Nanoseconds()) / 1000000.0))\n\t\tstatusCode := c.Writer.Status()\n\t\tclientIP := c.ClientIP()\n\t\tclientUserAgent := c.Request.UserAgent()\n\t\treferer := c.Request.Referer()\n\t\trequestS := c.Request.ContentLength\n\t\tresponseS := c.Writer.Size()\n\t\tif requestS < 0 {\n\t\t\trequestS = 0\n\t\t}\n\t\trequest := &HTTPRequest{\n\t\t\tRequestMethod: method,\n\t\t\tRequestURL: path,\n\t\t\tRemoteIP: clientIP,\n\t\t\tReferer: referer,\n\t\t\tUserAgent: clientUserAgent,\n\t\t\tResponseSize: strconv.Itoa(responseS),\n\t\t\tLatency: strconv.Itoa(latency),\n\t\t\tStatus: strconv.Itoa(statusCode),\n\t\t\tRequestSize: strconv.FormatInt(requestS, 10),\n\t\t}\n\n\t\tfields := logrus.Fields{\"httpRequest\": request}\n\n\t\ttraceHeader := c.GetHeader(\"X-Request-ID\")\n\t\tif traceHeader != \"\" {\n\t\t\tfields[\"trace\"] = traceHeader\n\t\t}\n\n\t\tentry := log.WithFields(fields)\n\n\t\tif len(c.Errors) > 0 {\n\t\t\tentry.Error(c.Errors.ByType(gin.ErrorTypePrivate).String())\n\t\t} else {\n\t\t\tmsg := fmt.Sprintf(\"[%s - %s] %d\", c.Request.Method, path, statusCode)\n\t\t\tif statusCode > 399 {\n\t\t\t\tentry.Error(msg)\n\t\t\t} else {\n\t\t\t\tentry.Info(msg)\n\t\t\t}\n\t\t}\n\t}\n}", "func (r *RouterGroup) UseLogger() {\n\tr.gin.Use(gin.Logger())\n}", "func LoggerM(opt LoggerOptions) gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\tstart := time.Now()\n\t\tpath := c.Request.URL.Path\n\t\tmethod := c.Request.Method\n\t\tclientIP := c.ClientIP()\n\n\t\tlogS := NewLogger()\n\t\tlogS.SetFormatter(&logrus.JSONFormatter{})\n\t\tlogS.SetOutput(os.Stdout)\n\t\tlogS.SetLevel(logrus.InfoLevel)\n\n\t\tif opt.EnableDebug {\n\t\t\tlogS.SetLevel(logrus.DebugLevel)\n\t\t}\n\t\tc.Set(\"logger\", logS)\n\n\t\tinfo := logrus.Fields{\n\t\t\t\"start\": start,\n\t\t\t\"path\": path,\n\t\t\t\"method\": method,\n\t\t\t\"client_ip\": clientIP,\n\t\t\t\"version\": opt.Version,\n\t\t\t\"application\": opt.Application,\n\t\t}\n\n\t\t// Records request parameters\n\t\tparams := c.Request.URL.Query()\n\t\tif len(params) != 0 {\n\t\t\tinfo[\"params\"] = params\n\t\t}\n\n\t\t// Records request body\n\t\trequestBody, err := ioutil.ReadAll(c.Request.Body)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tif len(requestBody) != 0 {\n\t\t\tc.Request.Body = ioutil.NopCloser(bytes.NewBuffer(requestBody))\n\t\t\tif c.Request.Header.Get(\"Content-Type\") == \"application/json\" {\n\t\t\t\tinfo[\"request_body\"] = json.RawMessage(requestBody)\n\t\t\t} else {\n\t\t\t\tinfo[\"request_body\"] = string(requestBody)\n\t\t\t}\n\t\t}\n\n\t\t// Replace gin writer for backup writer stream\n\t\twriter := new(multiWriter)\n\t\twriter.ctx = c\n\t\twriter.ResponseWriter = c.Writer\n\t\tc.Writer = writer\n\n\t\tc.Next()\n\n\t\tstatusCode := c.Writer.Status()\n\t\trequestID := c.GetString(\"request_id\")\n\t\tduration := Milliseconds(time.Since(start))\n\t\tinfo[\"status_code\"] = statusCode\n\t\tinfo[\"request_id\"] = requestID\n\t\tinfo[\"runtime\"] = duration\n\n\t\t// Get response from multiWriter\n\t\tresp, _ := c.Get(\"response\")\n\t\tif buf, ok := resp.(map[string]interface{}); ok {\n\t\t\tinfo[\"response\"] = buf\n\t\t} else {\n\t\t\tinfo[\"response\"] = resp\n\t\t}\n\n\t\t// Writes pipeline from handlers\n\t\tpipeline := make(map[string]interface{})\n\t\tfor k, v := range logS.pipeline {\n\t\t\tpipeline[k] = v\n\t\t}\n\t\tif len(pipeline) != 0 {\n\t\t\tinfo[\"pipeline\"] = pipeline\n\t\t}\n\n\t\tfilterBodyTooLong(info)\n\n\t\tif err, ok := c.Get(\"error\"); ok {\n\t\t\tinfo[\"error\"] = fmt.Sprintf(\"%v\", err)\n\t\t\tif opt.EnableDebug {\n\t\t\t\tif e, ok := err.(*errors.Error); ok && e.Stack() != nil {\n\t\t\t\t\tinfo[\"error\"] = fmt.Sprintf(\"%+v\", err.(*errors.Error).Stack())\n\t\t\t\t}\n\t\t\t}\n\t\t\tlogS.WithFields(info).Error(\"error occurred\")\n\t\t\treturn\n\t\t}\n\n\t\tif opt.EnableOutput {\n\t\t\tlogS.WithFields(info).Info(\"finished\")\n\t\t}\n\t}\n}", "func (e *Engine) UseLogger() {\n\te.gin.Use(gin.Logger())\n}", "func Logger() HandlerFunc {\n\treturn gin.Logger()\n}", "func RequestLogger(timeFormat string, utc bool) gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\tstart := time.Now()\n\t\t// some evil middlewares modify this values\n\t\tpath := c.Request.URL.Path\n\t\tc.Next()\n\n\t\tend := time.Now()\n\t\tlatency := end.Sub(start)\n\t\tif utc {\n\t\t\tend = end.UTC()\n\t\t}\n\n\t\tuuid, _ := c.Get(ContextKey)\n\n\t\tentry := Logger(c).WithFields(logrus.Fields{\n\t\t\t\"status\": c.Writer.Status(),\n\t\t\t\"method\": c.Request.Method,\n\t\t\t\"path\": path,\n\t\t\t\"ip\": c.ClientIP(),\n\t\t\t\"latency\": latency,\n\t\t\t\"user-agent\": c.Request.UserAgent(),\n\t\t\t\"time\": end.Format(timeFormat),\n\t\t\t\"uuid\": uuid,\n\t\t})\n\n\t\tif len(c.Errors) > 0 {\n\t\t\t// Append error field if this is an erroneous request.\n\t\t\tentry.Error(c.Errors.String())\n\t\t} else {\n\t\t\tentry.Info()\n\t\t}\n\t}\n}", "func Logger(l log.Logger) gin.HandlerFunc {\n return func(c *gin.Context) {\n // Start timer\n start := time.Now()\n path := c.Request.URL.Path\n\n // Process request\n c.Next()\n\n // Stop timer\n end := time.Now()\n latency := end.Sub(start)\n\n // clientIP := c.ClientIP()\n method := c.Request.Method\n statusCode := c.Writer.Status()\n comment := c.Errors.ByType(gin.ErrorTypePrivate).String()\n\n l.Info(\"Request\", \"method\", method, \"status\", statusCode, \"latency\", latency, \"path\", path, \"comment\", comment)\n }\n}", "func WithGinLog(f string) option {\n\treturn func(s *httpServer) {\n\t\tif f != \"\" {\n\t\t\ts.ginLog = f\n\t\t}\n\t}\n}", "func logger(c *gin.Context) {\n\tvar start time.Time\n\tconst logFormat = \"%s \" + // Timestamp\n\t\t\"%s \" + // Client ip\n\t\t\"%d \" + // Response code\n\t\t\"%v \" + // Response Duration\n\t\t`\"%s %s %s\" ` + // Request method, path and protocol\n\t\t\"%d \" // Response size\n\n\tdefer func() {\n\t\tconst INTERNAL_SERVER_ERROR = 500\n\t\tif err := recover(); err != nil {\n\t\t\tduration := time.Now().Sub(start)\n\t\t\tlog.Printf(logFormat+\"\\n%v\\n%s\",\n\t\t\t\tstart.Format(timeFormat),\n\t\t\t\tc.ClientIP(),\n\t\t\t\tINTERNAL_SERVER_ERROR,\n\t\t\t\tduration,\n\t\t\t\tc.Request.Method,\n\t\t\t\tc.Request.URL.Path,\n\t\t\t\tc.Request.Proto,\n\t\t\t\tc.Writer.Size(),\n\t\t\t\terr,\n\t\t\t\tdebug.Stack(),\n\t\t\t)\n\t\t\tc.AbortWithStatus(INTERNAL_SERVER_ERROR)\n\t\t}\n\t}()\n\tstart = time.Now()\n\tc.Next()\n\tduration := time.Now().Sub(start)\n\tlog.Printf(logFormat+\"\\n%s\",\n\t\tstart.Format(timeFormat),\n\t\tc.ClientIP(),\n\t\tc.Writer.Status(),\n\t\tduration,\n\t\tc.Request.Method,\n\t\tc.Request.URL.Path,\n\t\tc.Request.Proto,\n\t\tc.Writer.Size(),\n\t\tc.Errors.String(),\n\t)\n}", "func GorillaLogger(w io.Writer) middleware.MiddleWare {\r\n\treturn func(next http.Handler) http.Handler {\r\n\t\treturn handlers.LoggingHandler(w, next)\r\n\t}\r\n}", "func setupLogger() {\n\tsl := logrus.New()\n\tsrvLog = sl.WithField(\"context\", \"server\")\n}", "func Logger() gin.HandlerFunc {\n\treturn LoggerWithWriter(gin.DefaultWriter)\n}", "func Logger() gin.HandlerFunc {\n\treturn LoggerWithWriter(gin.DefaultWriter)\n}", "func LoggerMiddleware(skippers ...SkipperFunc) gin.HandlerFunc {\n\treturn func (c *gin.Context) {\n\t\tif skipHandler(c, skippers...) {\n\t\t\tc.Next()\n\t\t\treturn\n\t\t}\n\n\t\tmethod := c.Request.Method\n\n\t\tfields := make(logger.Fields)\n\n\t\tfields[\"ip\"] = c.ClientIP()\n\t\tfields[\"method\"] = method\n\t\tfields[\"url\"] = c.Request.URL.String()\n\t\tfields[\"proto\"] = c.Request.Proto\n\n\t\tif method == http.MethodPost || method == http.MethodPut {\n\t\t\tmediaType, _, _ := mime.ParseMediaType(c.GetHeader(\"Content-Type\"))\n\t\t\tif mediaType == \"application/json\" {\n\t\t\t\tbody, err := ioutil.ReadAll(c.Request.Body)\n\t\t\t\tc.Request.Body.Close()\n\t\t\t\tif err == nil {\n\t\t\t\t\tbuf := bytes.NewBuffer(body)\n\t\t\t\t\tc.Request.Body = ioutil.NopCloser(buf)\n\t\t\t\t\tfields[\"request_body\"] = string(body)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tstart := time.Now()\n\t\tc.Next()\n\t\ttimeConsuming := time.Since(start).Nanoseconds() / 1e6\n\t\tfields[\"time_consuming(ms)\"] = timeConsuming\n\n\t\tfields[\"res_status\"] = c.Writer.Status()\n\t\tif id := ginhelper.GetUserID(c); id != \"\" {\n\t\t\tfields[\"user_id\"] = id\n\t\t}\n\t\tif r := ginhelper.GetResponseBody(c); r != \"\" {\n\t\t\tfields[\"response_body\"] = r\n\t\t}\n\n\t\tlogger.InfoWithFields(\"API Log\", fields)\n\t}\n}", "func LoggerWithWriter(out io.Writer, notlogged ...string) HandlerFunc {\n\tvar skip map[string]struct{}\n\n\tif length := len(notlogged); length > 0 {\n\t\tskip = make(map[string]struct{}, length)\n\n\t\tfor _, path := range notlogged {\n\t\t\tskip[path] = struct{}{}\n\t\t}\n\t}\n\n\treturn func(c *Context) {\n\t\t// Start timer\n\t\tstart := time.Now()\n\t\tpath := c.Request.URL.Path\n\n\t\t// Process request\n\t\tc.Next()\n\n\t\t// Log only when path is not being skipped\n\t\tif _, ok := skip[path]; !ok {\n\t\t\t// Stop timer\n\t\t\tend := time.Now()\n\t\t\tlatency := end.Sub(start)\n\n\t\t\tclientIP := c.ClientIP()\n\t\t\tmethod := c.Request.Method\n\t\t\tstatusCode := c.Writer.Status()\n\t\t\tcomment := c.Errors.ByType(ErrorTypePrivate).String()\n\n\t\t\tfmt.Fprintf(out, \"[GIN] %v %3d %13v | %s %-7s %s\\n%s\",\n\t\t\t\tend.Format(\"2006/01/02 - 15:04:05\"),\n\t\t\t\tstatusCode,\n\t\t\t\tlatency,\n\t\t\t\tclientIP,\n\t\t\t\tmethod,\n\t\t\t\tpath,\n\t\t\t\tcomment,\n\t\t\t)\n\t\t}\n\t}\n}", "func Logger() gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\t// before request\n\t\tt := time.Now()\n\t\tlog.Println(\"on before request\")\n\t\tc.Next()\n\t\tlog.Println(\"on after request\")\n\t\t// after request\n\t\tlatency := time.Since(t)\n\t\tlog.Println(\"*** Latency :\", latency.Milliseconds())\n\t}\n}", "func Logger(log *logger.Logger) gin.HandlerFunc {\n\thostname, err := os.Hostname()\n\tif err != nil {\n\t\thostname = \"unknow\"\n\t}\n\n\tgin.Logger()\n\n\treturn func(c *gin.Context) {\n\t\tpath := c.Request.URL.Path\n\t\tstart := time.Now()\n\t\trequestTime := start.Format(timeFormat)\n\n\t\tc.Next()\n\n\t\tstop := time.Now()\n\t\tlatency := stop.Sub(start)\n\t\tstatusCode := c.Writer.Status()\n\t\tclientIP := c.ClientIP()\n\t\tclientUserAgent := c.Request.UserAgent()\n\t\treferer := c.Request.Referer()\n\n\t\tdataLength := c.Writer.Size()\n\t\tif dataLength < 0 {\n\t\t\tdataLength = 0\n\t\t}\n\n\t\treqID := c.GetString(\"Request-ID\")\n\t\tmethod := c.Request.Method\n\n\t\tentry := log.WithFields(logrus.Fields{\n\t\t\t\"Hostname\": hostname,\n\t\t\t\"Path\": path,\n\t\t\t\"Date\": requestTime,\n\t\t\t\"Latency\": latency,\n\t\t\t\"Code\": statusCode,\n\t\t\t\"IP\": clientIP,\n\t\t\t\"User-Agent\": clientUserAgent,\n\t\t\t\"Referer\": referer,\n\t\t\t\"Data-Length\": dataLength,\n\t\t\t\"Request-ID\": reqID,\n\t\t\t\"Method\": method,\n\t\t})\n\n\t\tif len(c.Errors) > 0 {\n\t\t\tentry.Error(c.Errors.ByType(gin.ErrorTypePrivate).String())\n\t\t} else {\n\t\t\tmsg := \"HTTP Request\"\n\t\t\tif statusCode >= 500 {\n\t\t\t\tentry.Error(msg)\n\t\t\t} else if statusCode >= 400 {\n\t\t\t\tentry.Warn(msg)\n\t\t\t} else {\n\t\t\t\tentry.Info(msg)\n\t\t\t}\n\t\t}\n\t}\n}", "func Logger(context *gin.Context) {\n\trec := newInterceptor(context)\n\tvar err string\n\n\tdefer func() {\n\t\tgomol.Infom(gomol.NewAttrsFromMap(map[string]interface{}{\n\t\t\t\"url\": context.Request.URL,\n\t\t\t\"error\": err,\n\t\t\t\"status\": rec.Status(),\n\t\t\t\"durationTime\": rec.Duration(),\n\t\t\t\"request\": rec.Request(),\n\t\t\t\"response\": rec.Response(),\n\t\t}), \"request received\")\n\t}()\n\n\tif e := rec.StartTrack(); e != nil {\n\t\terr = e.Error()\n\t}\n\n\tcontext.Next()\n}", "func RegisterChangeLogLevelToGin(engine *gin.Engine) error {\n\tif backend_info_leveld == nil {\n\t\treturn errors.New(\"logger backend is nil\")\n\t}\n\tloggerInfos := []gin.LoggerInfo{\n\t\tgin.LoggerInfo{\n\t\t\tName: \"info_log\",\n\t\t\tLLogger: backend_info_leveld,\n\t\t},\n\t}\n\tengine.RegisterLoggerInfo(loggerInfos)\n\treturn nil\n}", "func setupLogging(level logging.Level) *logging.Logger {\n\tlogger := logging.MustGetLogger(\"libsteg\")\n\tformat := logging.MustStringFormatter(\n\t\t`%{color}%{time:15:04:05.000} %{shortfunc} ▶ %{level:.4s} %{id:03x}%{color:reset} %{message}`,\n\t)\n\n\tbackend1 := logging.NewLogBackend(os.Stdout, \"\", 0)\n\tbackend1Formatter := logging.NewBackendFormatter(backend1, format)\n\tbackend1Leveled := logging.AddModuleLevel(backend1Formatter)\n\tbackend1Leveled.SetLevel(level, \"\")\n\tlogging.SetBackend(backend1Leveled)\n\n\treturn logger\n}", "func initGin(ginEngine *gin.Engine) {\n\tginEngine.Use(logrusLogger())\n\tginEngine.POST(\"/assignment\", putAssignment)\n\tginEngine.POST(\"/submission\", putSubmission)\n\tginEngine.GET(\"/plugin/langs\", getSupportedLangs)\n\tginEngine.GET(\"/debug/vars\", expvarGin.Handler())\n\tginEngine.GET(\"/health\", healthCheck)\n}", "func setupLogging() {\n\tlogger := logrus.New()\n\tlogger.Formatter = &logrus.JSONFormatter{}\n\n\t// Log everything and colorize logs in development mode\n\t// You can customize to any level but \"info\" in development mode\n\tif viper.GetString(\"environment\") == \"development\" {\n\t\tlogger.Formatter = &logrus.TextFormatter{}\n\t\tlogger.Level = logrus.DebugLevel\n\t}\n\n\tmiddleware.SetLogger(logger)\n}", "func LoggerMiddleware(logger *logrus.Logger) gin.HandlerFunc {\n\tlogger.SetLevel(log.ErrorLevel)\n\tlogger.SetFormatter(&log.TextFormatter{})\n\n\treturn func(c *gin.Context) {\n\t\t// Start timer\n\t\tstart := time.Now().UTC()\n\n\t\t// log.Printf(\"%s %s %s %s\", c.Request.Method, c.Request.Host, c.Request.RequestURI, c.Request.Proto)\n\t\t// Process Request\n\t\tc.Next()\n\n\t\tif status := c.Writer.Status(); status != 200 {\n\t\t\tentry := logger.WithFields(log.Fields{\n\t\t\t\t\"client_ip\": https.GetClientIP(c),\n\t\t\t\t\"duration\": start,\n\t\t\t\t\"method\": c.Request.Method,\n\t\t\t\t\"path\": c.Request.RequestURI,\n\t\t\t\t\"status\": c.Writer.Status(),\n\t\t\t\t\"referrer\": c.Request.Referer(),\n\t\t\t\t\"request_id\": c.Writer.Header().Get(\"X-Request-Id\"),\n\t\t\t\t\"user_id\": https.GetUserID(c),\n\t\t\t})\n\n\t\t\tentry.Error(c.Errors.String())\n\t\t}\n\t}\n}", "func NewFromLogrus(logger logrus.FieldLogger) Logger {\n\treturn logrusLogger{logger: logger}\n}", "func LogLogger( l *log.Logger ) mux.MiddlewareFunc {\n return FormatLogger( l.Printf )\n}", "func SupermarketLogger(inner http.Handler, name string) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tstart := time.Now()\n\t\tinner.ServeHTTP(w, r)\n\t\toutput := SuperMarketLog{\n\t\t\tr.Method,\n\t\t\tr.RequestURI,\n\t\t\tname,\n\t\t\ttime.Since(start),\n\t\t}\n\t\tJsonFileLogger(output)\n\t\tStandardOutLogger(output)\n\t})\n}", "func LogrusDefaultLogger() Logger {\n\t// TODO control verbosity\n\treturn &lruLogger{jl: logrus.NewEntry(logrus.New())}\n}", "func LogrusDefaultConfig(config LoggerConfig) echo.MiddlewareFunc {\n\t// Defaults\n\tif config.Skipper == nil {\n\t\tconfig.Skipper = DefaultLoggerConfig.Skipper\n\t}\n\tif config.Output == nil {\n\t\tconfig.Output = DefaultLoggerConfig.Output\n\t}\n\n\tconfig.pool = &sync.Pool{\n\t\tNew: func() interface{} {\n\t\t\treturn bytes.NewBuffer(make([]byte, 256))\n\t\t},\n\t}\n\n\treturn func(next echo.HandlerFunc) echo.HandlerFunc {\n\t\treturn func(c echo.Context) (err error) {\n\t\t\tif config.Skipper(c) {\n\t\t\t\treturn next(c)\n\t\t\t}\n\n\t\t\treq := c.Request()\n\t\t\tres := c.Response()\n\t\t\tstart := time.Now()\n\t\t\tif err = next(c); err != nil {\n\t\t\t\tc.Error(err)\n\t\t\t}\n\t\t\tstop := time.Now()\n\n\t\t\tbytesIn := req.Header.Get(echo.HeaderContentLength)\n\t\t\tif bytesIn == \"\" {\n\t\t\t\tbytesIn = \"0\"\n\t\t\t}\n\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"status\": strconv.Itoa(res.Status),\n\t\t\t\t\"time\": time.Now().Format(time.RFC3339),\n\t\t\t\t\"latency_human\": stop.Sub(start).String(),\n\t\t\t\t\"remote_ip\": c.RealIP(),\n\t\t\t\t\"bytes_in\": bytesIn,\n\t\t\t\t\"bytes_out\": strconv.FormatInt(res.Size, 10),\n\t\t\t}).Info(req.Method + \" \" + req.RequestURI)\n\n\t\t\treturn\n\t\t}\n\t}\n}", "func (logger *logger) newLogrus() {\n\tlogger.logrus = &logrus.Logger{\n\t\tHooks: make(logrus.LevelHooks),\n\t}\n\n\tlogLevel, err := logrus.ParseLevel(logger.cfg.LogLevel)\n\tif err != nil {\n\t\tlogLevel = defaultLogLevel\n\t}\n\tlogger.logrus.Level = logLevel\n\n\tswitch logger.cfg.LogFormat {\n\tcase jsonLogFormat:\n\t\tlogger.logrus.SetFormatter(&logrus.JSONFormatter{})\n\tdefault:\n\t\tlogger.logrus.SetFormatter(&logrus.TextFormatter{})\n\t}\n\n\tif logger.cfg.LogFilePath == \"\" {\n\t\tlogger.logrus.Out = os.Stdout\n\t\tlogger.logrus.Errorf(\"[%s]:: empty log file. Set 'Stdout' as default \\n\", PackageName)\n\t\tlogger.logrus.Infof(\"[%s]:: initialized logx successfully \\n\", PackageName)\n\t\treturn\n\t}\n\n\tlogfile, err := os.OpenFile(logger.cfg.LogFilePath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0755)\n\tif err != nil {\n\t\tlogger.logrus.Errorln(\"[%s]:: failed to set log file. Error : '%v'. Set 'Stdout' as default\", PackageName, err)\n\t\treturn\n\t}\n\n\tlogger.logfile = logfile\n\tlogger.logrus.Out = logger.logfile\n\n\tlogger.logrus.Infof(\"[%s]:: initialized logx successfully\", PackageName)\n}", "func LoggerWithWriter(out io.Writer, notlogged ...string) gin.HandlerFunc {\n\tvar skip map[string]struct{}\n\n\tif length := len(notlogged); length > 0 {\n\t\tskip = make(map[string]struct{}, length)\n\n\t\tfor _, path := range notlogged {\n\t\t\tskip[path] = struct{}{}\n\t\t}\n\t}\n\n\treturn func(c *gin.Context) {\n\t\t// Start timer\n\t\tstart := time.Now()\n\t\tpath := c.Request.URL.Path\n\t\traw := c.Request.URL.RawQuery\n\n\t\t// Process request\n\t\tc.Next()\n\n\t\t// Log only when path is not being skipped\n\t\tif _, ok := skip[path]; !ok {\n\t\t\t// Stop timer\n\t\t\tend := time.Now()\n\t\t\tlatency := end.Sub(start)\n\n\t\t\tclientIP := c.ClientIP()\n\t\t\tmethod := c.Request.Method\n\t\t\tstatusCode := c.Writer.Status()\n\t\t\tcomment := c.Errors.ByType(gin.ErrorTypePrivate).String()\n\n\t\t\tif raw != \"\" {\n\t\t\t\tpath = path + \"?\" + raw\n\t\t\t}\n\n\t\t\tlogger.Debug().\n\t\t\t\tStr(\"requestTime\", end.Format(\"2006/01/02 - 15:04:05\")).\n\t\t\t\tInt(\"code\", statusCode).\n\t\t\t\tStr(\"latency\", fmt.Sprintf(\"%13v\", latency)).\n\t\t\t\tStr(\"clientIp\", clientIP).\n\t\t\t\tStr(\"method\", method).\n\t\t\t\tStr(\"path\", path).\n\t\t\t\tMsg(comment)\n\t\t}\n\t}\n}", "func Logger() gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\tstart := time.Now()\n\t\tpath := c.Request.URL.Path\n\t\trequestID := GetRequestID(c)\n\t\trequestLog.Info(fmt.Sprintf(\"Incomming request: %s %s\", c.Request.Method, path), zap.String(\"requestId\", requestID))\n\n\t\tc.Next()\n\n\t\tend := time.Now()\n\t\tlatency := end.Sub(start)\n\n\t\trequestLog.Info(fmt.Sprintf(\"Outgoing request: %s %s\", c.Request.Method, path),\n\t\t\tzap.Int(\"status\", c.Writer.Status()),\n\t\t\tzap.String(\"requestId\", requestID),\n\t\t\tzap.Duration(\"latency\", latency))\n\t}\n}", "func RequestLogger(next http.Handler) http.Handler {\n\tfn := func(w http.ResponseWriter, r *http.Request) {\n\t\tstart := time.Now()\n\t\tctx := r.Context()\n\t\t// Parse request information\n\t\trequestURIparts := append(strings.SplitN(r.RequestURI, \"?\", 2), \"\")\n\n\t\t// Instantiate verbose logger\n\t\tlogger := logrus.\n\t\t\tWithField(\"request\", uuid.New().String()).\n\t\t\tWithField(\"route\", r.Method+\" \"+requestURIparts[0]).\n\t\t\tWithField(\"query\", requestURIparts[1]).\n\t\t\tWithField(\"instance\", instanceID).\n\t\t\tWithField(\"ip\", r.RemoteAddr).\n\t\t\tWithField(\"referer\", r.Referer()).\n\t\t\tWithField(\"agent\", r.UserAgent())\n\n\t\tctx = loglib.SetLogger(ctx, logger.Logger)\n\t\tlogger.Infof(\"START\")\n\n\t\tr = r.WithContext(ctx)\n\t\tnext.ServeHTTP(w, r)\n\n\t\tlogger.\n\t\t\tWithField(\"duration\", time.Since(start)).\n\t\t\tInfof(\"END\")\n\n\t}\n\treturn http.HandlerFunc(fn)\n}", "func LogrusLogger(log *logrus.Logger) Logger {\n\t// TODO control verbosity\n\treturn &lruLogger{jl: logrus.NewEntry(log)}\n}", "func RequestLogger(req *http.Request) Logger {\n\tif l, _ := req.Context().Value(ctxKeyLogger).(*zap.SugaredLogger); l != nil {\n\t\treturn l\n\t}\n\n\treturn stdLogger\n}", "func WithLogger(ctx *gin.Context, logger *logrus.Entry) {\n\tctx.Set(constants.LOGGER, logger)\n}", "func BgLogger() *zap.Logger {\n\treturn log.L()\n}", "func ReplaceGrpcLogger(logger *zap.Logger) {\n\tzgl := &zapGrpcLogger{logger.With(SystemField, zap.Bool(\"grpc_log\", true))}\n\tgrpclog.SetLogger(zgl)\n}", "func makeLogger(basePath string) middlewareFunc {\n\treturn func(next http.Handler) http.Handler {\n\t\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\tlog := structlog.FromContext(r.Context(), nil)\n\t\t\tlog.SetDefaultKeyvals(\n\t\t\t\tdef.LogRemote, r.RemoteAddr,\n\t\t\t\tdef.LogHTTPStatus, \"\",\n\t\t\t\tdef.LogHTTPMethod, r.Method,\n\t\t\t\tdef.LogFunc, path.Join(\"/\", strings.TrimPrefix(r.URL.Path, basePath)),\n\t\t\t)\n\t\t\tr = r.WithContext(structlog.NewContext(r.Context(), log))\n\n\t\t\tnext.ServeHTTP(w, r)\n\t\t})\n\t}\n}", "func DefaultLogger(level LogLevel) Logger {\n\tlogger := logrus.New()\n\tlogger.SetLevel(logrus.Level(level))\n\treturn logger\n}", "func LoggerWithConfig(config LoggerConfig) echo.MiddlewareFunc {\n\n\treturn func(next echo.HandlerFunc) echo.HandlerFunc {\n\t\treturn func(c echo.Context) (err error) {\n\t\t\tif config.Skipper(c) {\n\t\t\t\treturn next(c)\n\t\t\t}\n\n\t\t\treq := c.Request()\n\t\t\tres := c.Response()\n\t\t\tstart := time.Now()\n\t\t\tif err = next(c); err != nil {\n\t\t\t\tc.Error(err)\n\t\t\t}\n\t\t\tstop := time.Now()\n\n\t\t\ttenant := c.Param(\"tenant\")\n\t\t\tif tenant == \"\" {\n\t\t\t\ttenant = \"none\"\n\t\t\t}\n\n\t\t\tpath := req.URL.Path\n\t\t\tif path == \"\" {\n\t\t\t\tpath = \"/\"\n\t\t\t}\n\n\t\t\tlatency := stop.Sub(start).Nanoseconds() / int64(time.Microsecond)\n\t\t\tlatencyHuman := stop.Sub(start).String()\n\t\t\tbytesIn := req.Header.Get(echo.HeaderContentLength)\n\t\t\tif bytesIn == \"\" {\n\t\t\t\tbytesIn = \"0\"\n\t\t\t}\n\n\t\t\tlog.Info().\n\t\t\t\tStr(\"tenant\", tenant).\n\t\t\t\tStr(\"type\", \"request\").\n\t\t\t\tStr(\"remote_ip\", c.RealIP()).\n\t\t\t\tStr(\"host\", req.Host).\n\t\t\t\tStr(\"uri\", req.RequestURI).\n\t\t\t\tStr(\"path\", path).\n\t\t\t\tStr(\"method\", req.Method).\n\t\t\t\tStr(\"referer\", req.Referer()).\n\t\t\t\tStr(\"user_agent\", req.UserAgent()).\n\t\t\t\tInt(\"status\", res.Status).\n\t\t\t\tInt64(\"latency\", latency).\n\t\t\t\tStr(\"latency_human\", latencyHuman).\n\t\t\t\tStr(\"bytes_in\", bytesIn).\n\t\t\t\tStr(\"bytes_out\", strconv.FormatInt(res.Size, 10)).\n\t\t\t\tMsg(\"Request handled\")\n\n\t\t\t// Todo: add body and header to log\n\n\t\t\treturn nil\n\t\t}\n\t}\n}", "func Logger(logger *onelog.Logger) gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\t// Start timer\n\t\tstart := time.Now()\n\t\tpath := c.Request.URL.Path\n\t\thost := c.Request.Host\n\t\traw := c.Request.URL.RawQuery\n\n\t\t// Process request\n\t\tc.Next()\n\n\t\t// Stop timer\n\t\ttimeStamp := time.Now()\n\t\tlatency := timeStamp.Sub(start)\n\n\t\tclientIP := c.ClientIP()\n\t\tmethod := c.Request.Method\n\t\tstatusCode := c.Writer.Status()\n\t\thandlerName := c.HandlerName()\n\n\t\terrorMessage := c.Errors.ByType(gin.ErrorTypeAny).String()\n\n\t\tbodySize := c.Writer.Size()\n\n\t\tif raw != \"\" {\n\t\t\tpath = path + \"?\" + raw\n\t\t}\n\n\t\tvar chain onelog.ChainEntry\n\t\tif statusCode >= 400 {\n\t\t\tchain = logger.ErrorWith(errorMessage)\n\t\t} else {\n\t\t\tchain = logger.InfoWith(errorMessage)\n\t\t}\n\n\t\tchain.\n\t\t\tString(\"took\", latency.String()).\n\t\t\tString(\"ip\", clientIP).\n\t\t\tString(\"host\", host).\n\t\t\tString(\"method\", method).\n\t\t\tString(\"path\", path).\n\t\t\tString(\"handler\", handlerName).\n\t\t\tInt(\"status\", statusCode).\n\t\t\tInt(\"send-size\", bodySize).\n\t\t\tWrite()\n\t}\n}", "func ConsoleLogger() mux.MiddlewareFunc {\n return FormatLogger( log.Printf )\n}", "func LoggerWithConfig(config LoggerConfig) gin.HandlerFunc {\n\tif config.Format == \"\" {\n\t\tconfig.Format = DefaultLoggerConfig.Format\n\t}\n\tif config.Output == nil {\n\t\tconfig.Output = DefaultLoggerConfig.Output\n\t}\n\tconfig.template = fasttemplate.New(config.Format, \"${\", \"}\")\n\tconfig.colorer = color.New()\n\tconfig.colorer.SetOutput(config.Output)\n\tconfig.pool = &sync.Pool{\n\t\tNew: func() interface{} {\n\t\t\treturn bytes.NewBuffer(make([]byte, 256))\n\t\t},\n\t}\n\treturn func(ctx *gin.Context) {\n\t\tbodyBytes, _ := ctx.GetRawData()\n\t\t// Restore the io.ReadCloser to its original state\n\t\tctx.Request.Body = ioutil.NopCloser(bytes.NewBuffer(bodyBytes))\n\t\tpath := ctx.Request.URL.Path\n\t\traw := ctx.Request.URL.RawQuery\n\t\tstart := time.Now()\n\t\tresBody := &bodyLogWriter{body: bytes.NewBufferString(\"\"), ResponseWriter: ctx.Writer}\n\t\tctx.Writer = resBody\n\n\t\tctx.Next()\n\t\tlevel := \"info\"\n\t\terr, ok := ctx.Get(ContextError)\n\t\tif ok {\n\t\t\tlevel = \"error\"\n\t\t}\n\t\terrInfo, _ := json.Marshal(err)\n\t\tif _, ok := config.Skip[path]; !ok {\n\t\t\tstop := time.Now()\n\n\t\t\tbuf := config.pool.Get().(*bytes.Buffer)\n\t\t\tbuf.Reset()\n\t\t\tdefer config.pool.Put(buf)\n\t\t\tre := regexp.MustCompile(\"\\n *|\\\"password.*\\\":\\\".+?\\\",*\")\n\t\t\tif _, err := config.template.ExecuteFunc(buf, func(w io.Writer, tag string) (int, error) {\n\t\t\t\tswitch tag {\n\t\t\t\tcase \"time_unix\":\n\t\t\t\t\treturn buf.WriteString(strconv.FormatInt(time.Now().Unix(), 10))\n\t\t\t\tcase \"time_unix_nano\":\n\t\t\t\t\treturn buf.WriteString(strconv.FormatInt(time.Now().UnixNano(), 10))\n\t\t\t\tcase \"time_rfc3339\":\n\t\t\t\t\treturn buf.WriteString(time.Now().Format(time.RFC3339))\n\t\t\t\tcase \"time_rfc3339_nano\":\n\t\t\t\t\treturn buf.WriteString(time.Now().Format(time.RFC3339Nano))\n\t\t\t\tcase \"time_custom\":\n\t\t\t\t\treturn buf.WriteString(time.Now().Format(config.CustomTimeFormat))\n\t\t\t\tcase \"remote_ip\":\n\t\t\t\t\treturn buf.WriteString(ctx.ClientIP())\n\t\t\t\tcase \"host\":\n\t\t\t\t\treturn buf.WriteString(ctx.Request.Host)\n\t\t\t\tcase \"uri\":\n\t\t\t\t\treturn buf.WriteString(ctx.Request.RequestURI)\n\t\t\t\tcase \"method\":\n\t\t\t\t\treturn buf.WriteString(ctx.Request.Method)\n\t\t\t\tcase \"path\":\n\t\t\t\t\tif path == \"\" {\n\t\t\t\t\t\tpath = \"/\"\n\t\t\t\t\t}\n\t\t\t\t\treturn buf.WriteString(path)\n\t\t\t\tcase \"query\":\n\t\t\t\t\treturn buf.WriteString(raw)\n\t\t\t\tcase \"protocol\":\n\t\t\t\t\treturn buf.WriteString(ctx.Request.Proto)\n\t\t\t\tcase \"referer\":\n\t\t\t\t\treturn buf.WriteString(ctx.Request.Referer())\n\t\t\t\tcase \"user_agent\":\n\t\t\t\t\treturn buf.WriteString(ctx.Request.UserAgent())\n\t\t\t\tcase \"status\":\n\t\t\t\t\tn := ctx.Writer.Status()\n\t\t\t\t\ts := config.colorer.Green(n)\n\t\t\t\t\tswitch {\n\t\t\t\t\tcase n >= 500:\n\t\t\t\t\t\ts = config.colorer.Red(n)\n\t\t\t\t\tcase n >= 400:\n\t\t\t\t\t\ts = config.colorer.Yellow(n)\n\t\t\t\t\tcase n >= 300:\n\t\t\t\t\t\ts = config.colorer.Cyan(n)\n\t\t\t\t\t}\n\t\t\t\t\treturn buf.WriteString(s)\n\t\t\t\tcase \"app_id\":\n\t\t\t\t\tappID, _ := ctx.Get(ContextError)\n\t\t\t\t\treturn buf.WriteString(appID.(string))\n\t\t\t\tcase \"level\":\n\t\t\t\t\treturn buf.WriteString(level)\n\t\t\t\tcase \"error\":\n\t\t\t\t\treturn buf.Write(errInfo)\n\t\t\t\tcase \"latency\":\n\t\t\t\t\tl := stop.Sub(start)\n\t\t\t\t\treturn buf.WriteString(strconv.FormatInt(int64(l), 10))\n\t\t\t\tcase \"latency_human\":\n\t\t\t\t\treturn buf.WriteString(stop.Sub(start).String())\n\t\t\t\tcase \"body\":\n\t\t\t\t\treturn buf.WriteString(re.ReplaceAllString(string(bodyBytes), \"\"))\n\t\t\t\tcase \"response\":\n\t\t\t\t\treturn buf.WriteString(re.ReplaceAllString(resBody.body.String(), \"\"))\n\t\t\t\tdefault:\n\t\t\t\t\tswitch {\n\t\t\t\t\tcase strings.HasPrefix(tag, \"header:\"):\n\t\t\t\t\t\treturn buf.Write([]byte(ctx.Request.Header.Get(tag[7:])))\n\t\t\t\t\tcase strings.HasPrefix(tag, \"query:\"):\n\t\t\t\t\t\treturn buf.Write([]byte(ctx.Query(tag[6:])))\n\t\t\t\t\tcase strings.HasPrefix(tag, \"form:\"):\n\t\t\t\t\t\treturn buf.Write([]byte(ctx.Request.FormValue(tag[5:])))\n\t\t\t\t\tcase strings.HasPrefix(tag, \"cookie:\"):\n\t\t\t\t\t\tcookie, err := ctx.Cookie(tag[7:])\n\t\t\t\t\t\tif err == nil {\n\t\t\t\t\t\t\treturn buf.Write([]byte(cookie))\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treturn 0, nil\n\t\t\t}); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t_, _ = config.Output.Write(buf.Bytes())\n\t\t\treturn\n\t\t}\n\t}\n}", "func NewPkgLogger(envPrefix string) *logrus.Logger {\n\n\ttheLogger := logrus.New()\n\tSetup(envPrefix, theLogger)\n\n\treturn theLogger\n}", "func LoggerMiddleware() echo.MiddlewareFunc {\n\treturn func(next echo.HandlerFunc) echo.HandlerFunc {\n\t\treturn func(c echo.Context) (err error) {\n\n\t\t\tif logRequests {\n\n\t\t\t\treq := c.Request()\n\t\t\t\tres := c.Response()\n\t\t\t\tstart := time.Now()\n\t\t\t\tif err := next(c); err != nil {\n\t\t\t\t\tc.Error(err)\n\t\t\t\t}\n\t\t\t\tstop := time.Now()\n\n\t\t\t\tp := req.URL.Path\n\t\t\t\tif p == \"\" {\n\t\t\t\t\tp = \"/\"\n\t\t\t\t}\n\n\t\t\t\tbytesIn := req.Header.Get(echo.HeaderContentLength)\n\t\t\t\tif bytesIn == \"\" {\n\t\t\t\t\tbytesIn = \"0\"\n\t\t\t\t}\n\n\t\t\t\tlogContext := logrus.WithFields(map[string]interface{}{\n\t\t\t\t\t\"time_rfc3339\": time.Now().Format(time.RFC3339),\n\t\t\t\t\t\"remote_ip\": c.RealIP(),\n\t\t\t\t\t\"host\": req.Host,\n\t\t\t\t\t\"uri\": req.RequestURI,\n\t\t\t\t\t\"method\": req.Method,\n\t\t\t\t\t\"path\": p,\n\t\t\t\t\t\"referer\": req.Referer(),\n\t\t\t\t\t\"user_agent\": req.UserAgent(),\n\t\t\t\t\t\"status\": res.Status,\n\t\t\t\t\t\"latency\": strconv.FormatInt(stop.Sub(start).Nanoseconds()/1000, 10),\n\t\t\t\t\t\"latency_human\": stop.Sub(start).String(),\n\t\t\t\t\t\"bytes_in\": bytesIn,\n\t\t\t\t\t\"bytes_out\": strconv.FormatInt(res.Size, 10),\n\t\t\t\t})\n\n\t\t\t\tmsg := fmt.Sprintf(\"%s %s [ %d ]\", req.Method, p, res.Status)\n\t\t\t\tif res.Status > 499 {\n\t\t\t\t\tlogContext.Error(msg)\n\t\t\t\t} else if res.Status > 399 {\n\t\t\t\t\tlogContext.Warn(msg)\n\t\t\t\t} else {\n\t\t\t\t\tlogContext.Info(msg)\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t}\n}", "func initLogger() *logrus.Logger {\n\tlogger := logrus.New()\n\n\tlogger.SetFormatter(&logrus.TextFormatter{\n\t\tFullTimestamp: true,\n\t\tDisableLevelTruncation: true,\n\t})\n\n\treturn logger\n}", "func Logging(opts ...Option) gin.HandlerFunc {\n\to := defaultOptions()\n\to.apply(opts...)\n\n\treturn func(c *gin.Context) {\n\t\tstart := time.Now()\n\n\t\t// 忽略打印指定的路由\n\t\tif _, ok := o.ignoreRoutes[c.Request.URL.Path]; ok {\n\t\t\tc.Next()\n\t\t\treturn\n\t\t}\n\n\t\t// 处理前打印输入信息\n\t\tbuf := bytes.Buffer{}\n\t\t_, _ = buf.ReadFrom(c.Request.Body)\n\n\t\tfields := []zap.Field{\n\t\t\tzap.String(\"method\", c.Request.Method),\n\t\t\tzap.String(\"url\", c.Request.URL.String()),\n\t\t}\n\t\tif c.Request.Method == http.MethodPost || c.Request.Method == http.MethodPut || c.Request.Method == http.MethodPatch || c.Request.Method == http.MethodDelete {\n\t\t\tfields = append(fields,\n\t\t\t\tzap.Int(\"size\", buf.Len()),\n\t\t\t\tzap.String(\"body\", getBodyData(&buf, o.maxLength)),\n\t\t\t)\n\t\t}\n\t\treqID := \"\"\n\t\tif o.requestIDFrom == 1 {\n\t\t\treqID = c.Request.Header.Get(o.requestIDName)\n\t\t\tfields = append(fields, zap.String(o.requestIDName, reqID))\n\t\t} else if o.requestIDFrom == 2 {\n\t\t\tif v, isExist := c.Get(o.requestIDName); isExist {\n\t\t\t\tif requestID, ok := v.(string); ok {\n\t\t\t\t\treqID = requestID\n\t\t\t\t\tfields = append(fields, zap.String(o.requestIDName, reqID))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\to.log.Info(\"<<<<\", fields...)\n\n\t\tc.Request.Body = io.NopCloser(&buf)\n\n\t\t// 替换writer\n\t\tnewWriter := &bodyLogWriter{body: &bytes.Buffer{}, ResponseWriter: c.Writer}\n\t\tc.Writer = newWriter\n\n\t\t// 处理请求\n\t\tc.Next()\n\n\t\t// 处理后打印返回信息\n\t\tfields = []zap.Field{\n\t\t\tzap.Int(\"code\", c.Writer.Status()),\n\t\t\tzap.String(\"method\", c.Request.Method),\n\t\t\tzap.String(\"url\", c.Request.URL.Path),\n\t\t\tzap.Int64(\"time_us\", time.Since(start).Nanoseconds()/1000),\n\t\t\tzap.Int(\"size\", newWriter.body.Len()),\n\t\t\tzap.String(\"response\", strings.TrimRight(getBodyData(newWriter.body, o.maxLength), \"\\n\")),\n\t\t}\n\t\tif o.requestIDName != \"\" {\n\t\t\tfields = append(fields, zap.String(o.requestIDName, reqID))\n\t\t}\n\t\to.log.Info(\">>>>\", fields...)\n\t}\n}", "func InitLogger(config *LoggerConfig) *Logger {\n\tlog := logrus.New()\n\tlog.SetFormatter(config.Format.Get())\n\tlog.SetLevel(config.Level.Get())\n\tfields := logrus.Fields{\n\t\t\"module\": config.Name,\n\t}\n\tif config.AdditionalFields != nil {\n\t\tfor key, value := range *config.AdditionalFields {\n\t\t\tfields[key] = value\n\t\t}\n\t}\n\tlogger := &Logger{\n\t\tconfig: config,\n\t\tinstanceRaw: log,\n\t\tinstance: log.WithFields(fields),\n\t}\n\treturn logger\n}", "func New(logger *logrus.Logger, skipPaths ...string) gin.HandlerFunc {\n\tvar skip map[string]struct{}\n\n\tif length := len(skipPaths); length > 0 {\n\t\tskip = make(map[string]struct{}, length)\n\n\t\tfor _, path := range skipPaths {\n\t\t\tskip[path] = struct{}{}\n\t\t}\n\t}\n\n\treturn func(c *gin.Context) {\n\t\tstart := time.Now()\n\t\t// some evil middlewares modify this values\n\t\tpath := c.Request.URL.Path\n\t\tc.Next()\n\n\t\tstatusCode := c.Writer.Status()\n\t\tlatency := time.Now().Sub(start)\n\n\t\tentry := logger.WithFields(logrus.Fields{\n\t\t\t\"status\": statusCode,\n\t\t\t\"method\": c.Request.Method,\n\t\t\t\"path\": path,\n\t\t\t\"ip\": c.ClientIP(),\n\t\t\t\"latency\": latency,\n\t\t\t\"latency_string\": latency.String(),\n\t\t\t\"user-agent\": c.Request.UserAgent(),\n\t\t})\n\n\t\tif len(c.Errors) > 0 {\n\t\t\tentry.Error(c.Errors.String())\n\t\t\treturn\n\t\t}\n\n\t\tif statusCode > 499 {\n\t\t\tentry.Error()\n\t\t} else if statusCode > 399 {\n\t\t\tentry.Warn()\n\t\t} else {\n\t\t\tif _, ok := skip[path]; ok {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tentry.Info()\n\t\t}\n\n\t}\n}", "func GLogger() *logs.Logger {\n\tif logger == nil {\n\t\t// defer creation to first call, give opportunity to customize log target\n\t\tlogger = App().Log().Logger(App().name, util.GenUniqueId())\n\t}\n\n\treturn logger\n}", "func DefaultLogger() gin.HandlerFunc {\n\treturn gin.LoggerWithFormatter(func(param gin.LogFormatterParams) string {\n\t\treturn fmt.Sprintf(\"%s [GOFLOW] - \\\"%s %s %s %d %s \\\"%s\\\" %s\\\"\\n\",\n\t\t\tparam.TimeStamp.Format(time.RFC3339),\n\t\t\tparam.Method,\n\t\t\tparam.Path,\n\t\t\tparam.Request.Proto,\n\t\t\tparam.StatusCode,\n\t\t\tparam.Latency,\n\t\t\tparam.Request.UserAgent(),\n\t\t\tparam.ErrorMessage,\n\t\t)\n\t})\n}", "func (s *Server) configureLogger(dbg bool) *lgr.Logger {\n\tif dbg {\n\t\treturn lgr.New(lgr.Msec, lgr.Debug, lgr.CallerFile, lgr.CallerFunc, lgr.LevelBraces)\n\t}\n\n\treturn lgr.New(lgr.Msec, lgr.LevelBraces)\n}", "func NewWithLogger(l *logrus.Logger) echo.MiddlewareFunc {\n\treturn func(next echo.HandlerFunc) echo.HandlerFunc {\n\t\treturn func(c echo.Context) error {\n\t\t\tstart := time.Now()\n\t\t\tisError := false\n\n\t\t\tbody := c.Request().Body\n\t\t\tdataIn, _ := ioutil.ReadAll(body)\n\t\t\tif len(dataIn) > 0 {\n\t\t\t\tc.Request().Body = ioutil.NopCloser(bytes.NewReader(dataIn))\n\t\t\t}\n\n\t\t\tif err := next(c); err != nil {\n\t\t\t\tc.Error(err)\n\t\t\t\tisError = true\n\t\t\t}\n\n\t\t\tlatency := time.Since(start)\n\n\t\t\thost, _ := os.Hostname()\n\n\t\t\t// TODO 完善access日志的field\n\t\t\tentry := l.WithFields(logrus.Fields{\n\t\t\t\t\"type\": \"access\",\n\t\t\t\t\"server\": host,\n\t\t\t\t\"method\": c.Request().Method,\n\t\t\t\t\"ip\": c.Request().RemoteAddr,\n\t\t\t\t\"status\": c.Response().Status,\n\t\t\t\t\"latency\": latency.Nanoseconds() / int64(time.Millisecond),\n\t\t\t\t\"body\": string(dataIn),\n\t\t\t\t\"route\": c.Path(),\n\t\t\t})\n\n\t\t\tif c.Response().Status != http.StatusNotFound {\n\t\t\t\tentry = entry.WithField(\"url\", c.Request().URL)\n\t\t\t} else {\n\t\t\t\tentry = entry.WithField(\"illegalURL\", c.Request().URL)\n\t\t\t}\n\n\t\t\tif reqID := c.Request().Header.Get(\"X-Request-Id\"); reqID != \"\" {\n\t\t\t\tentry = entry.WithField(\"request_id\", reqID)\n\t\t\t}\n\n\t\t\t// Check middleware error\n\t\t\tif isError {\n\t\t\t\tentry.Error(\"error by handling request\")\n\t\t\t} else {\n\t\t\t\tentry.Info(\"completed handling request\")\n\t\t\t}\n\n\t\t\treturn nil\n\t\t}\n\t}\n}", "func (s *Setup) LoggerMiddleware(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tstart := time.Now()\n\n\t\ts.logger.Infow(\n\t\t\t\"request_logging\",\n\t\t\t\"method\", r.Method,\n\t\t\t\"url\", r.URL.String(),\n\t\t\t\"agent\", r.UserAgent(),\n\t\t\t\"referer\", r.Referer(),\n\t\t\t\"proto\", r.Proto,\n\t\t\t\"remote_address\", r.RemoteAddr,\n\t\t\t\"latency\", time.Since(start),\n\t\t)\n\n\t\tnext.ServeHTTP(w, r)\n\t})\n}", "func GetLogger() *log.Logger { return std.GetLogger() }", "func Use(c context.Context) context.Context {\n\treturn logging.SetFactory(c, func(context.Context) logging.Logger { return Get() })\n}", "func configureLogrus(verbosity int) {\n\tlogrus.SetFormatter(&logrus.TextFormatter{DisableTimestamp: true})\n\tlogrus.SetOutput(os.Stderr)\n\n\tif verbosity >= thirdPartyVerboseLevel {\n\t\tlogrus.SetLevel(logrus.DebugLevel)\n\t}\n}", "func SetLogger(logger *xlog.Logger) {\n\tlogger = logger.Named(ecode.ModClientGrpc).WithOptions(zap.AddCallerSkip(defaultCallerSkip))\n\tgrpclog.SetLoggerV2(&loggerWrapper{logger: logger, sugar: logger.Sugar()})\n}", "func Logger(next http.Handler) http.Handler {\n\tl := New(Prefix(\"REST\"))\n\treturn l.Handler(next)\n\n}", "func UseLogger(backend *logs.Backend, level logs.Level) {\n\tlog = backend.Logger(logSubsytem)\n\tlog.SetLevel(level)\n\tspawn = panics.GoroutineWrapperFunc(log)\n}", "func Sugar() *zap.SugaredLogger {\n\tinitLogger()\n\n\treturn logger.Sugar()\n}", "func (s *Status) WrapLogrus(logger *logrus.Logger) {\n\tlogger.SetOutput(s.WrapWriter(logger.Out))\n}", "func SetMiddleWareLogger(next http.HandlerFunc) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\t// fmt.Println(\"\")\n\t\tlog.Printf(\"%s %s%s %s\", r.Method, r.Host, r.RequestURI, r.Proto)\n\t\tnext(w, r)\n\t}\n}", "func RequestLoggerMiddleware(next http.Handler) http.Handler {\r\n\treturn http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {\r\n\t\tstart := time.Now()\r\n\r\n\t\tif config.LogLevel == \"error\" {\r\n\t\t\tlog.Printf(\r\n\t\t\t\t\"%s\\t%#v\\t%s\\t%s\",\r\n\t\t\t\treq.Method,\r\n\t\t\t\treq.RequestURI,\r\n\t\t\t\ttime.Since(start),\r\n\t\t\t)\r\n\t\t}\r\n\t\tif config.LogLevel == \"debug\" {\r\n\t\t\tlog.Printf(\r\n\t\t\t\t\"%s\\t%#v\\t%s\\t%dB\\t%s\\tHeaders: %s\\tPayload: %s\",\r\n\t\t\t\treq.Method,\r\n\t\t\t\treq.RequestURI,\r\n\t\t\t\ttime.Since(start),\r\n\t\t\t\treq.ContentLength,\r\n\t\t\t\treq.TransferEncoding,\r\n\t\t\t\treq.Header,\r\n\t\t\t\treq.Body,\r\n\t\t\t)\r\n\t\t}\r\n\t\tnext.ServeHTTP(w, req)\r\n\t})\r\n}", "func RequestLogger(f httprouter.Handle) httprouter.Handle {\n\treturn httprouter.Handle(func(w http.ResponseWriter, r *http.Request, p httprouter.Params) {\n\t\tt := time.Now()\n\t\tf(w, r, p)\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"method\": r.Method,\n\t\t\t\"resource\": r.URL.Path,\n\t\t\t\"took\": fmt.Sprintf(\"%d%s\", time.Since(t).Nanoseconds()/1000000, \"ms\"),\n\t\t}).Info(fmt.Sprintf(\"%s %s\", r.Method, r.URL.Path))\n\t})\n}", "func WrapSugaredLogger(sugaredLogger *zap.SugaredLogger) yetlog.Logger {\n\treturn SugaredLogger{\n\t\tzapLogger: sugaredLogger,\n\t}\n}", "func initLogger() {\n\tlogdir := viper.GetString(\"log.log_dir\")\n\tstdout := viper.GetBool(\"log_stdout\")\n\n\tvar writer io.Writer\n\n\tif logdir != \"\" {\n\t\tfolderPath, err := filepath.Abs(logdir)\n\t\tpanicIfError(err, fmt.Sprintf(\"Error on parsing log path: %s\", logdir))\n\n\t\tabspath, err := filepath.Abs(path.Join(logdir, \"run.log\"))\n\t\tpanicIfError(err, fmt.Sprintf(\"Error on parsing log file path: %s\", logdir))\n\n\t\terr = os.MkdirAll(folderPath, os.ModePerm)\n\t\tpanicIfError(err, fmt.Sprintf(\"Error on creating log dir: %s\", folderPath))\n\n\t\tif stdout {\n\t\t\tfmt.Println(\"Will be logged to stdout and \", abspath)\n\t\t\tfileWriter := mylog.RotateLog(abspath)\n\t\t\twriter = io.MultiWriter(os.Stdout, fileWriter)\n\t\t} else {\n\t\t\tfmt.Println(\"Will be logged to \", abspath)\n\t\t\twriter = mylog.RotateLog(abspath)\n\t\t}\n\t} else {\n\t\t// stdout only\n\t\tfmt.Println(\"Will be logged to stdout\")\n\t\twriter = os.Stdout\n\t}\n\tlogrus.SetOutput(writer)\n\n\t// Only log the warning severity or above.\n\tswitch viper.GetString(\"log.level\") {\n\tcase \"panic\":\n\t\tlogrus.SetLevel(logrus.PanicLevel)\n\tcase \"fatal\":\n\t\tlogrus.SetLevel(logrus.FatalLevel)\n\tcase \"error\":\n\t\tlogrus.SetLevel(logrus.ErrorLevel)\n\tcase \"warn\":\n\t\tlogrus.SetLevel(logrus.WarnLevel)\n\tcase \"info\":\n\t\tlogrus.SetLevel(logrus.InfoLevel)\n\tcase \"debug\":\n\t\tlogrus.SetLevel(logrus.DebugLevel)\n\tcase \"trace\":\n\t\tlogrus.SetLevel(logrus.TraceLevel)\n\tdefault:\n\t\tfmt.Println(\"Unknown level\", viper.GetString(\"log.level\"), \"Set to INFO\")\n\t\tlogrus.SetLevel(logrus.InfoLevel)\n\t}\n\n\tFormatter := new(logrus.TextFormatter)\n\tFormatter.ForceColors = false\n\tFormatter.DisableColors = true\n\tFormatter.TimestampFormat = \"06-01-02 15:04:05.000000\"\n\tFormatter.FullTimestamp = true\n\tlogrus.SetFormatter(Formatter)\n\n\t// redirect standard log to logrus\n\t//log.SetOutput(logrus.StandardLogger().Writer())\n\t//log.Println(\"Standard logger. Am I here?\")\n\tlineNum := viper.GetBool(\"log_line_number\")\n\tif lineNum {\n\t\t//filenameHook := filename.NewHook()\n\t\t//filenameHook.Field = \"line\"\n\t\t//logrus.AddHook(filenameHook)\n\t\tlogrus.SetReportCaller(true)\n\t}\n\tbyLevel := viper.GetBool(\"multifile_by_level\")\n\tif byLevel && logdir != \"\" {\n\t\tpanicLog, _ := filepath.Abs(path.Join(logdir, \"panic.log\"))\n\t\tfatalLog, _ := filepath.Abs(path.Join(logdir, \"fatal.log\"))\n\t\twarnLog, _ := filepath.Abs(path.Join(logdir, \"warn.log\"))\n\t\terrorLog, _ := filepath.Abs(path.Join(logdir, \"error.log\"))\n\t\tinfoLog, _ := filepath.Abs(path.Join(logdir, \"info.log\"))\n\t\tdebugLog, _ := filepath.Abs(path.Join(logdir, \"debug.log\"))\n\t\ttraceLog, _ := filepath.Abs(path.Join(logdir, \"trace.log\"))\n\t\twriterMap := lfshook.WriterMap{\n\t\t\tlogrus.PanicLevel: mylog.RotateLog(panicLog),\n\t\t\tlogrus.FatalLevel: mylog.RotateLog(fatalLog),\n\t\t\tlogrus.WarnLevel: mylog.RotateLog(warnLog),\n\t\t\tlogrus.ErrorLevel: mylog.RotateLog(errorLog),\n\t\t\tlogrus.InfoLevel: mylog.RotateLog(infoLog),\n\t\t\tlogrus.DebugLevel: mylog.RotateLog(debugLog),\n\t\t\tlogrus.TraceLevel: mylog.RotateLog(traceLog),\n\t\t}\n\t\tlogrus.AddHook(lfshook.NewHook(\n\t\t\twriterMap,\n\t\t\tFormatter,\n\t\t))\n\t}\n\tlogger := logrus.StandardLogger()\n\tlogrus.Debug(\"Logger initialized.\")\n\tbyModule := viper.GetBool(\"multifile_by_module\")\n\tif !byModule {\n\t\tlogdir = \"\"\n\t}\n\n\tdownloader.InitLoggers(logger, logdir)\n\tfetcher.InitLoggers(logger, logdir)\n\tp2p.InitLoggers(logger, logdir)\n\tog.InitLoggers(logger, logdir)\n\tsyncer.InitLoggers(logger, logdir)\n\tannsensus.InitLoggers(logger, logdir)\n\n}", "func Logger(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tlog.Infof(\"logging: request middleware\")\n\t\tnext.ServeHTTP(w, r)\n\t\tlog.Infof(\"logging: response middleware\")\n\t})\n}", "func GetLoggerWithPrefix(prefix string) StimLogger {\n\tif prefix == \"\" {\n\t\treturn GetLogger()\n\t}\n\tif prefixLogger == nil {\n\t\tstimLoggerCreateLock.Lock()\n\t\tif prefixLogger == nil {\n\t\t\tprefixLogger = make(map[string]StimLogger)\n\t\t}\n\t\tstimLoggerCreateLock.Unlock()\n\t}\n\tstimLoggerCreateLock.Lock()\n\tdefer stimLoggerCreateLock.Unlock()\n\tif sl, ok := prefixLogger[prefix]; ok {\n\t\treturn sl\n\t}\n\tprefixLogger[prefix] = &StimPrefixLogger{stimLogger: GetLogger(), prefix: prefix}\n\treturn prefixLogger[prefix]\n}", "func newLogrus(level string, formatter string, output io.Writer) *logrus.Logger {\n\tl, err := logrus.ParseLevel(level)\n\tif err != nil {\n\t\tfmt.Printf(\"Bad level: %v, set it to 'debug'\", level)\n\t\tl = logrus.DebugLevel\n\t}\n\tlogger := &logrus.Logger{\n\t\tOut: output,\n\t\tHooks: make(logrus.LevelHooks),\n\t\tLevel: l,\n\t}\n\tswitch formatter {\n\tcase \"json\":\n\t\tlogger.Formatter = &logrus.JSONFormatter{TimestampFormat: TimestampFormat}\n\tcase \"text\":\n\t\tfallthrough\n\tdefault:\n\t\tlogger.Formatter = &logrus.TextFormatter{DisableColors: true,\n\t\t\tDisableSorting: false, TimestampFormat: TimestampFormat}\n\t}\n\treturn logger\n}", "func GinRecovery(logger log.Logger) gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\tdefer func() {\n\t\t\tif err := recover(); err != nil {\n\t\t\t\t// Check for a broken connection, as it is not really a\n\t\t\t\t// condition that warrants a panic stack trace.\n\t\t\t\tvar brokenPipe bool\n\t\t\t\tif ne, ok := err.(*net.OpError); ok {\n\t\t\t\t\tif se, ok := ne.Err.(*os.SyscallError); ok {\n\t\t\t\t\t\tif strings.Contains(strings.ToLower(se.Error()), \"broken pipe\") ||\n\t\t\t\t\t\t\tstrings.Contains(strings.ToLower(se.Error()), \"connection reset by peer\") {\n\t\t\t\t\t\t\tbrokenPipe = true\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif logger != nil {\n\t\t\t\t\thttpRequest, _ := httputil.DumpRequest(c.Request, false)\n\t\t\t\t\theaders := strings.Split(string(httpRequest), \"\\r\\n\")\n\t\t\t\t\tfor idx, header := range headers {\n\t\t\t\t\t\tcurrent := strings.Split(header, \":\")\n\t\t\t\t\t\tif current[0] == \"Authorization\" {\n\t\t\t\t\t\t\theaders[idx] = current[0] + \": *\"\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tlogger.Errorc(c.Request.Context(), \"[Recovery] panic recovered:\\n%s\\n [%s]\",\n\t\t\t\t\t\tstrings.Join(headers, \"\\r\\n\"), err)\n\t\t\t\t}\n\n\t\t\t\tif brokenPipe {\n\t\t\t\t\t// If the connection is dead, we can't write a status to it.\n\t\t\t\t\tc.Error(err.(error)) // nolint: errcheck\n\t\t\t\t\tc.Abort()\n\t\t\t\t} else {\n\t\t\t\t\tc.AbortWithStatus(http.StatusInternalServerError)\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t\tc.Next()\n\t}\n}", "func requestLogger(l *log.Logger) Wrapper {\n\treturn func(fn http.HandlerFunc) http.HandlerFunc {\n\t\treturn func(w http.ResponseWriter, req *http.Request) {\n\t\t\trid, ok := req.Context().Value(\"rid\").(string)\n\t\t\tif !ok {\n\t\t\t\trid = \"none\"\n\t\t\t}\n\n\t\t\tl.Printf(\"[%s] Before\", rid)\n\n\t\t\tstart := time.Now()\n\t\t\tfn(w, req)\n\t\t\tl.Printf(\"[%s] Finished in %s\", rid, time.Since(start))\n\t\t}\n\t}\n}", "func RequestLoggingHandler(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) {\n\tres := w.(negroni.ResponseWriter)\n\tnext(w, r)\n\tlogrus.Info(fmt.Sprintf(\"%s %s %d\", r.Method, r.RequestURI, res.Status()))\n}", "func NewRouter() *gin.Engine {\n\trouter := logger_util.NewGinWithLogrus(logger.GinLog)\n\tAddService(router)\n\treturn router\n}", "func NewRouter() *gin.Engine {\n\trouter := logger_util.NewGinWithLogrus(logger.GinLog)\n\tAddService(router)\n\treturn router\n}", "func logger(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) {\n\tlog.Printf(\"---> Unary interceptor: %v\\n\", info.FullMethod)\n\treturn handler(ctx, req)\n}", "func Inject(l *logrus.Entry) {\n\tgrpclog.SetLogger(New(l))\n}", "func RequestLoggerFromCtx(ctx context.Context) Logger {\n\tif l, _ := ctx.Value(ctxKeyLogger).(*zap.SugaredLogger); l != nil {\n\t\treturn l\n\t}\n\n\treturn stdLogger\n}", "func SetLogger(l LevelledLogger) {\n\tlogger = &logPrefixer{log: l}\n}", "func NewLogursLogger(logger *logrus.Logger) *negronilogrus.Middleware {\n\tm := negronilogrus.NewMiddlewareFromLogger(logger, \"negroni\")\n\tm.SetLogStarting(false)\n\tm.Before = logBefore\n\tm.After = logAfter\n\treturn m\n}", "func (h *HomeHandlers) Logger(next http.HandlerFunc) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tstartTime := time.Now()\n\t\tdefer h.logger.Printf(\"request processed in %d ms \\n\", time.Now().Sub(startTime))\n\t\tnext(w, r)\n\t}\n}", "func Logger(next http.Handler) http.Handler {\n\treturn DefaultLogger(next)\n}", "func New() *logrus.Logger {\n\treturn logrus.New()\n}", "func NewRegistraterWithLogrus(base Registrater, log *logrus.Entry) RegistraterWithLogrus {\n\treturn RegistraterWithLogrus{\n\t\tbase: base,\n\t\tlog: log,\n\t}\n}", "func Wrap(l *gol.Logger) logging.Logger {\n\tl.ExtraCalldepth += 1 // one layer of wrapping in loggerImpl struct above\n\treturn &loggerImpl{l}\n}", "func ReplaceGrpcLogger() {\n\tgrpclog.SetLoggerV2(ctx_glog.Logger)\n}", "func newGossipLogWrapper() *log.Logger {\n\treturn log.New(&gossipLogWriter{\n\t\tlogger: logger.GetLogger(\"gossip\"),\n\t}, \"\", 0)\n}", "func loggerMiddleware() martini.Handler {\n\treturn func(res http.ResponseWriter, req *http.Request, c martini.Context) {\n\t\tstart := time.Now()\n\t\taddr := req.Header.Get(\"X-Real-IP\")\n\t\tif addr == \"\" {\n\t\t\taddr = req.Header.Get(\"X-Forwarded-For\")\n\t\t\tif addr == \"\" {\n\t\t\t\taddr = req.RemoteAddr\n\t\t\t}\n\t\t}\n\t\trw := res.(martini.ResponseWriter)\n\t\tc.Next()\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"method\": req.Method,\n\t\t\t\"path\": req.URL.Path,\n\t\t\t\"addr\": addr,\n\t\t\t\"status\": rw.Status(),\n\t\t\t\"status_text\": http.StatusText(rw.Status()),\n\t\t\t\"duration\": time.Since(start),\n\t\t}).Info(\"Completed\")\n\t}\n}", "func loggerInterceptor() func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {\n\treturn func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {\n\t\tpeer, _ := peer.FromContext(ctx)\n\t\tmaxDispLen := 50\n\t\treqStr := fmt.Sprintf(\"%v\", req)\n\n\t\tvar reqDispStr string\n\t\tif len(reqStr) > maxDispLen {\n\t\t\treqDispStr = reqStr[:maxDispLen] + \"...\"\n\t\t} else {\n\t\t\treqDispStr = reqStr\n\t\t}\n\t\trpcsLog.Tracef(\"%v: %v %v\\n\", peer.Addr.String(), info.FullMethod, reqDispStr)\n\n\t\tresp, err := handler(ctx, req)\n\n\t\tif err != nil {\n\t\t\trpcsLog.Tracef(\"%v: FAILURE %v %s\", peer.Addr.String(), info.FullMethod, err)\n\t\t}\n\t\treturn resp, err\n\t}\n}", "func DefaultLogger(c *gin.Context) *Logger {\n\tv, ok := c.Get(DefaultKey)\n\tif !ok {\n\t\txl := New(c.Writer, c.Request)\n\t\tc.Set(DefaultKey, xl)\n\t\treturn xl\n\t}\n\treturn v.(*Logger)\n}", "func Logger(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tstart := time.Now()\n\t\tww := middleware.NewWrapResponseWriter(w, r.ProtoMajor)\n\t\thost, port, _ := net.SplitHostPort(r.RemoteAddr)\n\t\tdefer func() {\n\t\t\tvar event *zerolog.Event\n\t\t\tif ww.Status() < 500 {\n\t\t\t\tevent = log.Info()\n\t\t\t} else {\n\t\t\t\tevent = log.Error()\n\t\t\t}\n\t\t\tevent.\n\t\t\t\tFields(map[string]interface{}{\n\t\t\t\t\t\"host\": host,\n\t\t\t\t\t\"port\": port,\n\t\t\t\t\t\"method\": r.Method,\n\t\t\t\t\t\"status\": ww.Status(),\n\t\t\t\t\t\"took\": float64(time.Since(start)) / 1e6,\n\t\t\t\t\t\"bytes_in\": r.Header.Get(\"Content-Length\"),\n\t\t\t\t\t\"bytes_out\": ww.BytesWritten(),\n\t\t\t\t}).\n\t\t\t\tTimestamp().\n\t\t\t\tMsg(r.URL.Path)\n\t\t}()\n\t\tnext.ServeHTTP(ww, r)\n\t})\n}", "func New(l *logrus.Logger) loggers.Logger {\n\treturn &Logrus{l}\n}", "func logger(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tformat := \"[%s] User agent => %s Remote addr => %s\"\n\t\tlog.Printf(format, r.Method, r.UserAgent(), r.RemoteAddr)\n\t\tnext.ServeHTTP(w, r)\n\t})\n}", "func Logger(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) {\n\t\tfmt.Printf(\"[ %s ] :: %s :: %v\\n\", req.Method, req.URL, time.Now())\n\t\tnext.ServeHTTP(res, req)\n\t})\n}", "func withLogger(logger Logger) func(next http.Handler) http.Handler {\n\treturn func(next http.Handler) http.Handler {\n\t\tfn := func(w http.ResponseWriter, r *http.Request) {\n\n\t\t\t// Capture our own copy of the logger so change in this closure\n\t\t\t// won't affect the object passed-in.\n\n\t\t\tlogger := logger\n\n\t\t\tif reqID := middleware.GetReqID(r.Context()); reqID != \"\" {\n\t\t\t\tlogger = logger.With(\"HTTP Request ID\", reqID)\n\t\t\t}\n\n\t\t\t// Defer a function to log and entry once the main handler\n\t\t\t// has returned.\n\n\t\t\tww := middleware.NewWrapResponseWriter(w, r.ProtoMajor)\n\t\t\tt1 := time.Now()\n\n\t\t\tdefer func() {\n\t\t\t\tscheme := \"http\"\n\t\t\t\tif r.TLS != nil {\n\t\t\t\t\tscheme = \"https\"\n\t\t\t\t}\n\n\t\t\t\tlogger.Infow(\"HTTP request\",\n\t\t\t\t\t\"Method\", r.Method,\n\t\t\t\t\t\"URI\", fmt.Sprintf(\"%s://%s%s\", scheme, r.Host, r.RequestURI),\n\t\t\t\t\t\"Protocol\", r.Proto,\n\t\t\t\t\t\"Remote Address\", r.RemoteAddr,\n\t\t\t\t\t\"Status\", ww.Status(),\n\t\t\t\t\t\"Bytes Written\", ww.BytesWritten(),\n\t\t\t\t\t\"Time Taken\", time.Since(t1),\n\t\t\t\t)\n\t\t\t}()\n\n\t\t\tctx := context.WithValue(r.Context(), ctxKeyLogger, logger)\n\t\t\tnext.ServeHTTP(ww, r.WithContext(ctx))\n\t\t}\n\t\treturn http.HandlerFunc(fn)\n\t}\n}", "func init() {\n\taddrmgr.UseLogger(amgrLog)\n\tconnmgr.UseLogger(cmgrLog)\n\tdatabase.UseLogger(bcdbLog)\n\tblockchain.UseLogger(chanLog)\n\tindexers.UseLogger(indxLog)\n\tmining.UseLogger(minrLog)\n\tcpuminer.UseLogger(minrLog)\n\tpeer.UseLogger(peerLog)\n\ttxscript.UseLogger(scrpLog)\n\tnetsync.UseLogger(syncLog)\n\tmempool.UseLogger(txmpLog)\n}", "func NewLogger() func(next http.Handler) http.Handler {\n\treturn middleware.RequestLogger(&Logger{})\n}", "func RequestLogger() wago.MiddleWareHandler {\n\treturn func(c *wago.Context) {\n\t\tlogger.WithFields(logger.Fields{\n\t\t\twago.REQUEST_ID: c.GetString(wago.REQUEST_ID),\n\t\t\t\"path\": c.Request.URL.Path,\n\t\t\t\"host\": c.Request.Host,\n\t\t\t\"header\": c.Request.Header,\n\t\t}).Debug(\"before-handle\")\n\n\t\tc.Next()\n\n\t\tlogger.WithFields(logger.Fields{\n\t\t\twago.REQUEST_ID: c.GetString(wago.REQUEST_ID),\n\t\t\t\"path\": c.Request.URL.Path,\n\t\t\t\"host\": c.Request.Host,\n\t\t\t\"header\": c.Request.Header,\n\t\t}).Debug(\"after-handle\")\n\t}\n}", "func rpcLogger(flag bool) Logger {\n\treturn makeLogger(flag, Fields{\"layer\": \"rpc\"})\n}" ]
[ "0.7025706", "0.6979332", "0.6601648", "0.6236444", "0.61680114", "0.6146722", "0.602604", "0.5978633", "0.59643364", "0.5946916", "0.59344643", "0.5901862", "0.5896292", "0.5896292", "0.57851714", "0.5780678", "0.5773026", "0.57373214", "0.57046056", "0.5691755", "0.5688423", "0.56778526", "0.5660871", "0.5634347", "0.5621055", "0.56115717", "0.5576399", "0.5568366", "0.556691", "0.5565056", "0.5560436", "0.55449533", "0.5500224", "0.5493875", "0.5463791", "0.54600275", "0.544598", "0.5444606", "0.5442374", "0.54416186", "0.5423525", "0.54224586", "0.54173803", "0.5375264", "0.5368531", "0.5323283", "0.53224015", "0.53198373", "0.5318763", "0.53173965", "0.53172386", "0.5304729", "0.53020054", "0.530114", "0.5300454", "0.5297709", "0.5294468", "0.52908057", "0.5287706", "0.5285494", "0.5285135", "0.5278153", "0.5277521", "0.52753824", "0.52691597", "0.52689034", "0.5264122", "0.5241253", "0.52382576", "0.5234605", "0.5224498", "0.5222159", "0.5218712", "0.5215053", "0.520198", "0.520198", "0.5200972", "0.5199208", "0.51960546", "0.5195533", "0.51883644", "0.51856446", "0.518271", "0.5182254", "0.5181707", "0.5177255", "0.5175079", "0.51705074", "0.51656765", "0.516531", "0.51643085", "0.51607674", "0.5159863", "0.5157333", "0.515466", "0.51545656", "0.5150444", "0.5149965", "0.5141119", "0.51403415" ]
0.75822556
0
NewPage create new page
func (d *Dir) NewPage(f os.FileInfo) (*Page, error) { prs, err := parser.New(f.Name()) if err != nil || strings.HasPrefix(f.Name(), "_") { return nil, errors.New(fmt.Sprintf("Not allowed file format %s\n", f.Name())) } cont, err := ioutil.ReadFile(getPath(d.mdDir, f.Name())) if (err != nil) { return nil, err } title := prs.GetTitle(f.Name()) html := prs.Parse(cont) p := &Page{} p.Title = title p.Seo = &Seo{ Title: "", Description: "", Keywords: "", } p.Body = template.HTML(html) p.Path = getPath(d.htmlDir, getUrl(p.Title) + ".html") p.Url = getPath(d.path, getUrl(p.Title) + ".html") p.Template = d.template return p, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (s *SimPDF) NewPage(page models.Pages) {\n\ts.Page = page\n\ts.PDF.AddPageFormat(page.ToPDFOrientation(), gofpdf.SizeType{Wd: page.Width, Ht: page.Height})\n}", "func NewPage(ctx *sweetygo.Context) error {\n\tctx.Set(\"title\", \"New\")\n\tctx.Set(\"editor\", true)\n\treturn ctx.Render(200, \"posts/new\")\n}", "func NewPage(mediaBox, cropBox *types.Rectangle) Page {\n\treturn Page{\n\t\tMediaBox: mediaBox,\n\t\tCropBox: cropBox,\n\t\tFm: FontMap{},\n\t\tIm: ImageMap{},\n\t\tAnnotTabs: map[int]FieldAnnotation{},\n\t\tBuf: new(bytes.Buffer)}\n}", "func NewPage(title, content, burl, url string) *Page {\n\treturn &Page{title, content, burl, url}\n}", "func NewPage(hc *HttpContext, title string, user interface{}, ctx interface{}, data map[string]string) *Page {\n\treturn &Page{\n\t\tTitle: title,\n\t\tUser: user,\n\t\tFlashes: hc.GetFlashes(),\n\t\tContext: ctx,\n\t\tData: data,\n\t\tClientConfig: ClientConfig,\n\t}\n\n}", "func newPage(db *Database, pageHead *pageHead) *page {\n\tp := page{\n\t\tdb: db,\n\t\tindex: db.dbHead.pageCount + 1,\n\t\tpageHead: pageHead,\n\t\tdata: make([]byte, db.dbHead.pageSize-PAGE_HEAD_SIZE),\n\t}\n\tdb.dbHead.pageCount++\n\tdb.writeHead()\n\treturn &p\n}", "func NewPage(title string, author string, authoremail string, posts []Post) Page {\n\treturn Page{Title: title, Author: author, Posts: posts, AuthorEmail: authoremail}\n}", "func NewPage(recs interface{}, pr *PageRequest, total int64) *Page {\n\tmeta := &PageMetadata{}\n\tmeta.PageRequest = pr\n\tmeta.Total = total\n\t// again, check per page\n\tif pr.PerPage <= 0 || pr.PerPage > 100 {\n\t\tpr.PerPage = 100\n\t}\n\n\tmeta.PageCount = int64(math.Ceil(float64(total) / float64(pr.PerPage)))\n\tpage := &Page{\n\t\tPageMetadata: meta,\n\t\tData: recs,\n\t}\n\n\treturn page\n}", "func newPage(pattern string, tmpls []string, getData getDataFn) *page {\n\treturn &page{\n\t\tpattern,\n\t\tgetTemplate(tmpls),\n\t\tgetData,\n\t}\n}", "func NewPage(content *file.File) (p *Page, err error) {\n\tp = &Page{\n\t\tfile: content,\n\t}\n\tp.meta = file.NewFile(content.Name+\".yaml\", content.Dir)\n\terr = p.Update()\n\treturn\n}", "func NewPage(url *Url) *Page {\n\tlogger := logrus.WithField(\"page\", url.String())\n\treturn &Page{Url: url, Logger: logger}\n}", "func NewPage(ID string, h *live.Handler, s *live.Socket, title string) (*page.Component, error) {\n\treturn page.NewComponent(ID, h, s,\n\t\tpage.WithRegister(pageRegister),\n\t\tpage.WithMount(pageMount(title)),\n\t\tpage.WithRender(pageRender),\n\t)\n}", "func CreateArticelePage(w http.ResponseWriter, r *http.Request) {\n\n\tvars := mux.Vars(r)\n\n\tmtitle := vars[\"mtitle\"]\n\n\tmongoDBDialInfo := &mgo.DialInfo{\n\n\t\tAddrs: []string{\"mymongo-controller\"},\n\t\tTimeout: 60 * time.Second,\n\t\tDatabase: \"admin\",\n\t\tUsername: mongodbuser,\n\t\tPassword: mongodbpass,\n\t\tMechanism: \"SCRAM-SHA-1\",\n\t}\n\n\tdbsession, err := mgo.DialWithInfo(mongoDBDialInfo)\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer dbsession.Close()\n\n\tif mtitle == \"\" {\n\n\t\tlog.Println(\"no mtitle\")\n\n\t\tarticles := dbhandler.GetAllForStatic(*dbsession, \"kaukotyo.eu\")\n\t\tjson.NewEncoder(w).Encode(articles)\n\n\t} else {\n\n\t\tarticle := dbhandler.GetOneArticle(*dbsession, mtitle)\n\n\t\tjson.NewEncoder(w).Encode(article)\n\n\t}\n}", "func NewPage(c PageConfig) Page {\n\treturn driver.NewPage(c)\n}", "func CreatePage(browserName ...string) core.Page {\n\tcapabilities := core.Use()\n\tif len(browserName) > 0 {\n\t\tcapabilities.Browser(browserName[0])\n\t}\n\tnewPage, err := driver.Page(capabilities)\n\tcheckFailure(err)\n\treturn newPage\n}", "func createLandingPage(metadata *libgin.RepositoryMetadata, targetfile string) error {\n\ttmpl, err := prepareTemplates(\"DOIInfo\", \"LandingPage\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tfp, err := os.Create(targetfile)\n\tif err != nil {\n\t\tlog.Printf(\"Could not create the landing page file: %s\", err.Error())\n\t\treturn err\n\t}\n\tdefer fp.Close()\n\tif err := tmpl.Execute(fp, metadata); err != nil {\n\t\tlog.Printf(\"Error rendering the landing page: %s\", err.Error())\n\t\treturn err\n\t}\n\treturn nil\n}", "func (hs Handlers) APIPagesCreate(c echo.Context) error {\n\ttype Request struct {\n\t\tPostID string `json:\"post_id\" validate:\"required,min=1\"`\n\t\tIndex int `json:\"index\" validate:\"required\"`\n\t\tSlug string `json:\"slug\" validate:\"required,min=1\"`\n\t\tInNavigation bool `json:\"in_navigation\"`\n\t}\n\ttype Response struct {\n\t\tID string `json:\"id\"`\n\t\tIndex int `json:\"index\"`\n\t\tSlug string `json:\"slug\"`\n\t\tInNavigation bool `json:\"in_navigation\"`\n\t\tCreatedAt time.Time `json:\"created_at\"`\n\t\tUpdatedAt time.Time `json:\"updated_at\"`\n\t}\n\treq := Request{}\n\terr := c.Bind(&req)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = validator.New().Struct(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\t//page, err := pageModel.Create(\n\t//\ths.DB,\n\t//\treq.PostID,\n\t//\treq.Index,\n\t//\treq.Slug,\n\t//\treq.InNavigation,\n\t//)\n\t//res := Response{\n\t//\tID: page.ID,\n\t//\tIndex: page.Index,\n\t//\tSlug: page.Slug,\n\t//\tInNavigation: page.InNavigation,\n\t//\tCreatedAt: page.CreatedAt,\n\t//\tUpdatedAt: page.UpdatedAt,\n\t//}\n\t//return c.JSON(http.StatusOK, &res)\n\treturn nil\n}", "func NewInfoPage(p *BoardPage, listIdx, taskIdx int) tview.Primitive {\n\ttask, err := p.data.GetTask(listIdx, taskIdx)\n\tif err != nil {\n\t\tapp.Stop()\n\t\tlog.Fatal(err)\n\t}\n\tinfo := tview.NewModal().\n\t\tSetText(fmt.Sprintf(\"Task: %v\\n Task Description: %v\", task.ItemName, task.ItemDescription)).\n\t\tSetBackgroundColor(theme.PrimitiveBackgroundColor).\n\t\tAddButtons([]string{\"OK\"}).\n\t\tSetDoneFunc(func(buttonIndex int, buttonLabel string) {\n\t\t\tif buttonLabel == \"OK\" {\n\t\t\t\tcloseInfoPage()\n\t\t\t}\n\t\t})\n\tinfo.SetInputCapture(func(event *tcell.EventKey) *tcell.EventKey {\n\t\tswitch event.Key() {\n\t\tcase tcell.KeyEsc:\n\t\t\tcloseInfoPage()\n\t\t}\n\t\tswitch event.Rune() {\n\t\tcase 'q':\n\t\t\tcloseInfoPage()\n\t\t}\n\t\treturn event\n\t})\n\twidth, height := GetSize()\n\treturn GetCenteredModal(info, width/2, height/2)\n}", "func (self templateEngine) genBoardPage(prefix, frontend, newsgroup string, page int, outfile string, db Database) {\n // get the board model\n board := self.obtainBoard(prefix, frontend, newsgroup, db)\n // update the board page\n board = board.Update(page, db)\n if page >= len(board) {\n log.Println(\"board page should not exist\", newsgroup, \"page\", page)\n return\n }\n // render it\n wr, err := OpenFileWriter(outfile)\n if err == nil {\n updateLinkCacheForBoard(board[page])\n board[page].RenderTo(wr)\n wr.Close()\n log.Println(\"wrote file\", outfile)\n } else {\n log.Println(\"error generating board page\", page, \"for\", newsgroup, err)\n }\n // save it\n self.groups[newsgroup] = board\n}", "func AddPage(newPage model.Page) error {\n\t_, err := sess.InsertInto(\"page\").Columns(\"url\", \"title\", \"tags\", \"content\").Record(newPage).Exec()\n\treturn err\n}", "func NewPage(in php_serialize.PhpArray) (Page, error) {\n\tentries := make([]EntryRef, 0)\n\n\tfor key, val := range in[\"entries\"].(php_serialize.PhpArray) {\n\t\tentries = append(entries, NewEntryRef(php_serialize.PhpValueString(key), php_serialize.PhpValueString(val)))\n\t}\n\n\treturn Page{\n\t\tTotal: php_serialize.PhpValueInt64(in[\"total\"]),\n\t\tPerpage: php_serialize.PhpValueInt64(in[\"perpage\"]),\n\t\tEntries: entries,\n\t}, nil\n}", "func CreatePage(p Page) (int, error) {\n\tvar id int\n\terr := newDB().DB.QueryRow(\"INSERT INTO PAGES(title,content) values($1,$2) RETURNING id\", p.Title, p.Content).Scan(&id)\n\treturn id, err\n}", "func NewPage(body []byte, css, title string) *PageData {\n\treturn &PageData{Title: title, CSS: css, Body: string(body)}\n}", "func NewPage(limit string) (p Page, err error) {\n\tif limit == \"\" {\n\t\treturn p, err\n\t}\n\n\tlimit = strings.TrimSpace(limit)\n\n\tp.Limit, err = strconv.ParseUint(limit, 10, 64)\n\tif err != nil {\n\t\treturn p, err\n\t}\n\n\tif p.Valid() {\n\t\treturn p, err\n\t}\n\treturn p, errors.New(\"Invalid limit specified\")\n}", "func newAllProcPage() *allProcPage {\n\tpage := &allProcPage{\n\t\tGrid: ui.NewGrid(),\n\t\tProcTable: viz.NewTable(),\n\t}\n\tpage.init()\n\treturn page\n}", "func (f Impl) AddPage() {\n\tf.GoPdf.AddPage()\n}", "func ShowCreatePage(c *gin.Context) {\n\tisLoggedIn, _ := c.Get(\"is_logged_in\")\n\trender(c, gin.H{\n\t\t\"is_logged_in\": isLoggedIn,\n\t\t\"title\": \"Create Message\"}, \"create-message.html\")\n}", "func (p *Pages) Add(pageName string, creatorFunc PageCreator) error {\n\tvar prefixPage = cleanAllSlashes(handlePath(p.prefix, pageName))\n\n\tvar routerPath = cleanAllSlashes(handlePath(pageName))\n\tvar routerPathForMore = cleanAllSlashes(handlePath(p.prefix, pageName, \"*path\"))\n\n\tif p.Has(prefixPage) {\n\t\treturn nerror.New(\"already exists\")\n\t}\n\n\tvar manager = NewPageSessionManager(p.ctx, prefixPage, p.maxIdle, p.idleCheck, creatorFunc, p.theme, p.onNewPage.Emit)\n\tmanager.Start()\n\n\tp.waiter.Add(1)\n\tgo func() {\n\t\tdefer p.waiter.Done()\n\t\tmanager.Wait()\n\n\t\tp.sl.Lock()\n\t\tdelete(p.managers, prefixPage)\n\t\tp.sl.Unlock()\n\t}()\n\n\tp.sl.Lock()\n\tp.managers[prefixPage] = manager\n\tp.sl.Unlock()\n\n\tvar handler = createHandler(prefixPage, manager, p.tr)\n\tp.router.Serve(routerPath, handler)\n\tp.router.Serve(routerPathForMore, handler)\n\tp.onNewPage.Emit(prefixPage, nil)\n\treturn nil\n}", "func CreatePage(\n\txRefTable *model.XRefTable,\n\tparentPageIndRef types.IndirectRef,\n\tp *model.Page,\n\tfonts model.FontMap) (*types.IndirectRef, types.Dict, error) {\n\n\tpageDict := types.Dict(\n\t\tmap[string]types.Object{\n\t\t\t\"Type\": types.Name(\"Page\"),\n\t\t\t\"Parent\": parentPageIndRef,\n\t\t\t\"MediaBox\": p.MediaBox.Array(),\n\t\t\t\"CropBox\": p.CropBox.Array(),\n\t\t},\n\t)\n\n\terr := addPageResources(xRefTable, pageDict, *p, fonts)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tir, err := xRefTable.StreamDictIndRef(p.Buf.Bytes())\n\tif err != nil {\n\t\treturn nil, pageDict, err\n\t}\n\tpageDict.Insert(\"Contents\", *ir)\n\n\tpageDictIndRef, err := xRefTable.IndRefForNewObject(pageDict)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tif len(p.AnnotTabs) == 0 && len(p.Annots) == 0 && len(p.LinkAnnots) == 0 {\n\t\treturn pageDictIndRef, pageDict, nil\n\t}\n\n\tif err := setAnnotationParentsAndFields(xRefTable, p, *pageDictIndRef); err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tarr, err := mergeAnnotations(nil, p.Annots, p.AnnotTabs)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tfor _, la := range p.LinkAnnots {\n\t\td, err := la.RenderDict(xRefTable, *pageDictIndRef)\n\t\tif err != nil {\n\t\t\treturn nil, nil, &json.UnsupportedTypeError{}\n\t\t}\n\t\tir, err := xRefTable.IndRefForNewObject(d)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\tarr = append(arr, *ir)\n\t}\n\n\tpageDict[\"Annots\"] = arr\n\n\treturn pageDictIndRef, pageDict, err\n}", "func (h *Handler) AddPage(c echo.Context) error {\n\tm := echo.Map{}\n\tif err := c.Bind(&m); err != nil {\n\t\treturn err\n\t}\n\troute, title := m[\"route\"].(string), m[\"title\"].(string)\n\n\tuserDataMap := utils.GetUserDataFromContext(&c)\n\temail := (*userDataMap)[\"email\"].(string)\n\n\tpage, err := h.pageStore.AddPage(email, route, title)\n\tif err != nil {\n\t\tutils.Logger.Error(err)\n\t\treturn c.JSON(http.StatusInternalServerError, createRes(false, nil, nil, http.StatusText(http.StatusInternalServerError)))\n\t}\n\treturn c.JSON(http.StatusOK, createRes(true, page, nil, \"\"))\n}", "func Page(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\tp, err := strconv.ParseUint(ps.ByName(\"page\"), 10, 8)\n\tif err != nil {\n\t\thttp.NotFound(w, r)\n\t}\n\tdata, err := newPageData(uint8(p))\n\tif err != nil {\n\t\thttp.NotFound(w, r)\n\t}\n\terr = t.ExecuteTemplate(w, \"index\", data)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}", "func (lf *ListFactory) CreatePage(title, author string) PageInterface {\n\treturn newListPage(title, author)\n}", "func (env *Env) Create(res http.ResponseWriter, req *http.Request) {\n\tswitch req.Method {\n\tcase \"GET\":\n\t\tenv.Log.V(1, \"beginning handling of GET request for Create endnode.\")\n\t\tp := &webAppGo.Page{}\n\t\tenv.Log.V(1, \"rendering the create template.\")\n\t\tenv.Render(res, \"create\", p)\n\tcase \"POST\":\n\t\tenv.Log.V(1, \"beginning handling of POST request for Create endnode.\")\n\t\ttitle := strings.Title(req.FormValue(\"title\"))\n\t\tif strings.Contains(title, \" \") {\n\t\t\ttitle = strings.Replace(title, \" \", \"_\", -1)\n\t\t}\n\t\tbody := req.FormValue(\"body\")\n\t\tp := &webAppGo.Page{Title: strings.Title(title), Body: []byte(body)}\n\t\terr := env.Cache.SaveToCache(p)\n\t\tif err != nil {\n\t\t\tenv.Log.V(1, \"notifying client that an internal error occured. Error assocaited with Cache.SaveToCache.\")\n\t\t\thttp.Error(res, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\terr = env.DB.SavePage(p)\n\t\tif err != nil {\n\t\t\tenv.Log.V(1, \"notifying client that an internal error occured. Error assocaited with DB.SavePage\")\n\t\t\thttp.Error(res, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\tenv.Log.V(1, \"page successfully saved, redirecting the client to /view/PageTitle.\")\n\t\thttp.Redirect(res, req, \"/view/\"+title, 302)\n\t\treturn\n\t}\n}", "func NewPageBuffer(aSlice interface{}, desiredPageNo int) *PageBuffer {\n return newPageBuffer(\n sliceValue(aSlice, false),\n desiredPageNo,\n valueHandler{})\n}", "func NewPageHandler(s page.Repository) *PageHandler {\n\treturn &PageHandler{\n\t\trepository: s,\n\t}\n}", "func (s *PagesService) Create(ctx context.Context, page Page) (*Page, *http.Response, error) {\n\tu := \"v1/pages\"\n\treq, err := s.client.NewRequest(\"POST\", u, page)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\treq.Header.Set(\"Accept\", \"application/json\")\n\n\tvar newPage *Page\n\tresp, err := s.client.Do(ctx, req, &newPage)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn newPage, resp, nil\n}", "func (d *webData) mainPage(w http.ResponseWriter, r *http.Request) {\n\t//start a web page based on template\n\terr := d.tpl.ExecuteTemplate(w, \"mainCompletePage\", d)\n\tif err != nil {\n\t\tlog.Println(\"mainPage: template execution error = \", err)\n\t}\n}", "func NewPage(url string) *Page {\n\tp := Page{\n\t\tUrl: url,\n\t\tArticles: make([]*Article, 0),\n\t}\n\n\turl = YC_ROOT + url\n\n\thead, _ := http.NewRequest(\"HEAD\", url, nil)\n\n\tif resp, err := client.Do(head); err == nil && len(resp.Cookies()) > 0 {\n\t\tc := resp.Cookies()\n\t\tcfduid = c[0].Raw\n\t} /*else {\n\t\tgoncurses.End()\n\t\tlog.Println(resp)\n\t\tlog.Println(err)\n\t}*/\n\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tdoc := doReq(req)\n\n\t//Get all the trs with subtext for children then go back one (for the first row)\n\trows := doc.Find(\".subtext\").ParentsFilteredUntil(\"tr\", \"tbody\").Prev()\n\n\tvar a bool\n\n\tp.NextUrl, a = doc.Find(\"td.title\").Last().Find(\"a\").Attr(\"href\")\n\n\tif !a {\n\t\tgoncurses.End()\n\t\tlog.Println(\"Could not retreive next hackernews page. Time to go outside?\")\n\t}\n\n\tfor len(p.NextUrl) > 0 && p.NextUrl[0] == '/' {\n\t\tp.NextUrl = p.NextUrl[1:]\n\t}\n\n\trows.Each(func(i int, row *goquery.Selection) {\n\t\tar := Article{\n\t\t\tRank: len(p.Articles) + i,\n\t\t}\n\n\t\ttitle := row.Find(\".title\").Eq(1)\n\t\tlink := title.Find(\"a\").First()\n\n\t\tar.Title = link.Text()\n\n\t\tif url, exists := link.Attr(\"href\"); exists {\n\t\t\tar.Url = url\n\t\t}\n\n\t\trow = row.Next()\n\n\t\trow.Find(\"span.score\").Each(func(i int, s *goquery.Selection) {\n\t\t\tif karma, err := strconv.Atoi(strings.Split(s.Text(), \" \")[0]); err == nil {\n\t\t\t\tar.Karma = karma\n\t\t\t} else {\n\t\t\t\tlog.Println(\"Error getting karma count:\", err)\n\t\t\t}\n\n\t\t\tif idSt, exists := s.Attr(\"id\"); exists {\n\t\t\t\tif id, err := strconv.Atoi(strings.Split(idSt, \"_\")[1]); err == nil {\n\t\t\t\t\tar.Id = id\n\t\t\t\t} else {\n\t\t\t\t\tlog.Println(err)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\n\t\tsub := row.Find(\"td.subtext\")\n\t\tt := sub.Text()\n\n\t\tar.Created = parseCreated(t)\n\n\t\tar.User = sub.Find(\"a\").First().Text()\n\n\t\tcomStr := strings.Split(sub.Find(\"a\").Last().Text(), \" \")[0]\n\n\t\tif comNum, err := strconv.Atoi(comStr); err == nil {\n\t\t\tar.NumComments = comNum\n\t\t}\n\n\t\tp.Articles = append(p.Articles, &ar)\n\n\t})\n\n\treturn &p\n}", "func (a *PDFApiService) InsertNewPage(ctx _context.Context, pdfInsertNewPageParameters PdfInsertNewPageParameters) (PdfInsertNewPageResponse, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodPost\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue PdfInsertNewPageResponse\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/api/pdf/InsertNewPage\"\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{\"application/json-patch+json\", \"application/json\", \"text/json\", \"application/_*+json\"}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"text/plain\", \"application/json\", \"text/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &pdfInsertNewPageParameters\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 200 {\n\t\t\tvar v PdfInsertNewPageResponse\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func AddTablePage(c *fiber.Ctx) error {\n\t// Firstly checks if the user can actually perform this action\n\tif !databasepack.Allowed(querypack.INFO.Roles, \"creator_\") {\n\t\treturn c.Send([]byte(\"Permission declined!\"))\n\t}\n\t// Firstly check if all the informations are correct.\n\tvar tab databasepack.TableType\n\terr := c.BodyParser(&tab)\n\tif err != nil || tab.Type < 0 || tab.Type > 1 {\n\t\tfmt.Println(err.Error())\n\t\treturn c.Send([]byte(\"Wrong input.\"))\n\t}\n\texists := databasepack.FindTable(tab.Name)\n\tif exists != -1 {\n\t\treturn c.Send([]byte(\"Table with such name already exists!\"))\n\t}\n\t// Next check if the name is proper\n\tif !databasepack.CheckName([]byte(tab.Name)) {\n\t\treturn c.Send([]byte(\"Bad table name!\"))\n\t}\n\tdatabasepack.AddTable(tab)\n\treturn c.Send([]byte(\"Added table: \" + tab.Name))\n}", "func (p *Page) save(d *Dir) error {\n\tp.Sidebar = d.sidebar\n\tp.Items = d.pages\n\tfile, err := os.Create(p.Path)\n\n\tif (err != nil) {\n\t\treturn err\n\t}\n\n\tfmt.Printf(\"Create new page: %s\\n \\tby link:%s\\n\", p.Title, p.Path)\n\n\treturn p.render(file)\n}", "func viewPage(response http.ResponseWriter, request *http.Request) {\n\ttitle := request.URL.Path[len(\"/\"):]\n\tpage, err := loadPage(title)\n\tif err != nil {\n\t\thttp.Error(response, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tfmt.Fprintf(response, \"<div><h1>%s</h1></div><div><p>%s</p></div>\", page.Title, page.Body)\n}", "func CreatePreviewPage(req *http.Request, bp core.Page, dimensions []filter.ModelDimension, fm filter.Model, dst dataset.DatasetDetails, filterOutputID, datasetID, releaseDate, apiRouterVersion string, enableDatasetPreview bool, lang, serviceMessage string, emergencyBannerContent zebedee.EmergencyBanner) model.Preview {\n\tp := model.Preview{\n\t\tPage: bp,\n\t}\n\tp.FeatureFlags.SixteensVersion = sixteensVersion\n\tp.Metadata.Title = \"Preview and Download\"\n\tp.BetaBannerEnabled = true\n\tp.EnableDatasetPreview = enableDatasetPreview\n\tp.Language = lang\n\tp.ServiceMessage = serviceMessage\n\tp.EmergencyBanner = mapEmergencyBanner(emergencyBannerContent)\n\tp.RemoveGalleryBackground = true\n\n\tmapCookiePreferences(req, &p.CookiesPreferencesSet, &p.CookiesPolicy)\n\n\tctx := req.Context()\n\tlog.Info(ctx, \"mapping api responses to preview page model\", log.Data{\"filterOutputID\": filterOutputID, \"datasetID\": datasetID})\n\n\tp.SearchDisabled = false\n\tp.ReleaseDate = releaseDate\n\tp.Data.UnitOfMeasurement = dst.UnitOfMeasure\n\tp.URI = req.URL.Path\n\n\tversionURL, err := url.Parse(fm.Links.Version.HRef)\n\tif err != nil {\n\t\tlog.Warn(ctx, \"unable to parse version url\", log.FormatErrors([]error{err}))\n\t}\n\tversionPath := strings.TrimPrefix(versionURL.Path, apiRouterVersion)\n\n\tp.Data.CurrentVersionURL = versionPath\n\n\tp.IsInFilterBreadcrumb = true\n\n\t_, edition, _, err := helpers.ExtractDatasetInfoFromPath(ctx, versionPath)\n\tif err != nil {\n\t\tlog.Warn(ctx, \"unable to extract edition from url\", log.FormatErrors([]error{err}))\n\t}\n\n\tp.Breadcrumb = append(\n\t\tp.Breadcrumb,\n\t\tcore.TaxonomyNode{\n\t\t\tTitle: dst.Title,\n\t\t\tURI: fmt.Sprintf(\"/datasets/%s/editions\", dst.ID),\n\t\t}, core.TaxonomyNode{\n\t\t\tTitle: edition,\n\t\t\tURI: versionPath,\n\t\t}, core.TaxonomyNode{\n\t\t\tTitle: \"Filter options\",\n\t\t\tURI: fmt.Sprintf(\"/filters/%s/dimensions\", fm.Links.FilterBlueprint.ID),\n\t\t}, core.TaxonomyNode{\n\t\t\tTitle: \"Preview\",\n\t\t})\n\n\tp.Data.FilterID = fm.Links.FilterBlueprint.ID\n\tp.Data.FilterOutputID = filterOutputID\n\n\tp.DatasetTitle = dst.Title\n\tp.Data.DatasetID = datasetID\n\tp.DatasetId = datasetID\n\t_, editionFromPath, _, err := helpers.ExtractDatasetInfoFromPath(ctx, versionPath)\n\tif err != nil {\n\t\tlog.Warn(ctx, \"unable to extract edition from url\", log.FormatErrors([]error{err}))\n\t}\n\tp.Data.Edition = editionFromPath\n\n\tfor ext, d := range fm.Downloads {\n\t\tp.Data.Downloads = append(p.Data.Downloads, model.Download{\n\t\t\tExtension: ext,\n\t\t\tSize: d.Size,\n\t\t\tURI: d.URL,\n\t\t\tSkipped: d.Skipped,\n\t\t})\n\t}\n\n\tfor i := range dimensions {\n\t\tp.Data.Dimensions = append(p.Data.Dimensions, model.PreviewDimension{\n\t\t\tName: dimensions[i].Name,\n\t\t\tValues: dimensions[i].Values,\n\t\t})\n\t}\n\tif enableDatasetPreview && p.Data.Dimensions == nil {\n\t\tp.NoDimensionData = true\n\t}\n\n\treturn p\n}", "func NewPageContext() (pc PageContext, err error) {\n\t// any default initialization common to all pages goes here\n\tpc = PageContext{Static: STATIC_URL}\n\treturn\n}", "func CreatePageTable() {\n\t_, _ = sess.Exec(\"create table page(id INTEGER PRIMARY KEY AUTOINCREMENT, url TEXT, title TEXT,tags TEXT,content TEXT);\")\n}", "func AddPostPage(w http.ResponseWriter, r *http.Request) {\n\tif _, err := cmanager.SessionExist(r); err != nil {\n\t\thttp.Redirect(w, r, \"/login\", http.StatusSeeOther)\n\t\treturn\n\t}\n\tthreads := threadsmanager.GetThreads()\n\tuser := usermanager.GetUser(r)\n\tpostCreationStruct := addpoststruct{User: user, Threads: threads}\n\terr := tools.Templates.ExecuteTemplate(w, \"addpost.html\", postCreationStruct)\n\tif err != nil {\n\t\tlog.Fatal(\" /// \" + err.Error())\n\t}\n}", "func NewPageStack() *PageStack {\n\treturn &PageStack{\n\t\tPages: ui.NewPages(),\n\t}\n}", "func (l *Container) AddPage(lines []*line.Line, pagination *Pagination, r *colly.Response) {\n\tmux.Lock()\n\tdefer mux.Unlock()\n\n\tcurrentPage := 1\n\tif pagination != nil {\n\t\tcurrentPage = pagination.Current\n\t}\n\n\tl.Data[currentPage] = lines\n}", "func (q *QuestionnaireT) AddPage() *pageT {\n\tcntr := ctr.Increment()\n\tp := &pageT{\n\t\tLabel: trl.S{\"en\": fmt.Sprintf(\"PageLabel_%v\", cntr), \"de\": fmt.Sprintf(\"Seitentitel_%v\", cntr)},\n\t\tDesc: trl.S{\"en\": \"\", \"de\": \"\"},\n\t}\n\tq.Pages = append(q.Pages, p)\n\tret := q.Pages[len(q.Pages)-1]\n\treturn ret\n}", "func StreamPage(qw *quicktemplate.Writer, p BasePage) {\n\t//line templates/basepage.qtpl:13\n\tqw.N().S(`\n<html>\n\t<head>\n\t\t<title>`)\n\t//line templates/basepage.qtpl:16\n\tp.StreamTitle(qw)\n\t//line templates/basepage.qtpl:16\n\tqw.N().S(`</title>\n\t</head>\n\t<body>\n\t\t<div>\n\t\t\t<a href=\"/\">return to main page</a>\n\t\t</div>\n\t\t`)\n\t//line templates/basepage.qtpl:22\n\tp.StreamBody(qw)\n\t//line templates/basepage.qtpl:22\n\tqw.N().S(`\n\t</body>\n</html>\n`)\n//line templates/basepage.qtpl:25\n}", "func (gui *Gui) nextPage() {\n\tcurrentPageName, _ := gui.pages.GetFrontPage()\n\tif currentPageName == \"help\" {\n\t\treturn\n\t}\n\n\tnextIdx := getIndex(gui.views, gui.currentLogMainView) + 1\n\tif nextIdx == len(gui.views) {\n\t\tnextIdx = 0\n\t}\n\n\tgui.showPage(nextIdx)\n}", "func (app * application) createSnippetForm(w http.ResponseWriter, r *http.Request){\n\tapp.render(w, r, \"create.page.tmpl\",nil)\n}", "func ExamplePdfMaroto_AddPage() {\n\tm := pdf.NewMaroto(consts.Portrait, consts.A4)\n\n\t// Add rows, cols and components\n\tm.AddPage()\n\n\t// Add rows, col and components in a new page\n\t// Do more things and save...\n}", "func makeAdminPage(title, subtitle, page string) AdminPage {\n\treturn AdminPage{\n\t\tTitle: title,\n\t\tSubtitle: subtitle,\n\t\tPage: page,\n\t}\n}", "func (graph *Graph) CreateAllPages(pageDocs []goquery.Document) {\n\tfor idx, pageDoc := range pageDocs {\n\t\tpage := \"index\"\n\t\tif idx != 0 {\n\t\t\tpage = Helper.CreateRandomString(10)\n\t\t}\n\t\tpageURL := \"/\" + page + \".html\"\n\t\tgraph.Nodes = append(graph.Nodes, Node{URL: pageURL, doc: pageDoc})\n\t}\n}", "func mainPage(w http.ResponseWriter, r *http.Request) {\n\n\t// Pick an image if there are images to pick from (otherwise 404)\n\tfile := \"assets/404.jpg\"\n\tif len(images) > 0 {\n\t\tfile = pickRandom()\n\t} else {\n\t\t// TODO use a different image to say there are no images\n\t}\n\n\t// Read the code in page.html\n\thtml, err := ioutil.ReadFile(\"page.html\")\n\terrFail(err) // it's bad if you can't read the GUI's code\n\n\t// Replace variables in the HTML with current values\n\thtml = bytes.Replace(html, []byte(\"RANDOM\"), []byte(file), 1)\n\thtml = bytes.Replace(html, []byte(\"FILENAME\"), []byte(file), 1)\n\thtml = bytes.Replace(html, []byte(\"FOLDER\"), []byte(folder), 1)\n\n\t// Serve the finished page up\n\tw.Write(html)\n}", "func MakePage(wr io.Writer, tmpl *template.Template, keyValues map[string]string) error {\n\tdata, err := ResolveData(keyValues)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Can't resolve data source %s\", err)\n\t}\n\treturn tmpl.Execute(wr, data)\n}", "func (c *PagesService) Create(ctx context.Context, newPage *Page) (*Page, *Response, error) {\n\tvar created Page\n\tresp, err := c.Client.Create(ctx, \"pages\", newPage, &created)\n\n\tcreated.setService(c)\n\n\treturn &created, resp, err\n}", "func newPerProcPage() *perProcPage {\n\tpage := &perProcPage{\n\t\tGrid: ui.NewGrid(),\n\t\tCPUChart: widgets.NewGauge(),\n\t\tMemChart: widgets.NewGauge(),\n\t\tPIDTable: widgets.NewTable(),\n\t\tChildProcsTable: viz.NewTable(),\n\t\tCTXSwitchesChart: viz.NewBarChart(),\n\t\tPageFaultsChart: viz.NewBarChart(),\n\t\tMemStatsChart: viz.NewBarChart(),\n\t}\n\tpage.init()\n\treturn page\n}", "func (c Page) Page() revel.Result {\n\n\tc.RenderArgs[\"Site\"] = site.Site\n\n\t// Create PageData\n\tpdata := site.LoadPage(c.Params.Route.Get(\"section\"), c.Params.Route.Get(\"page\"))\n\tc.RenderArgs[\"Page\"] = pdata\n\n\tif pdata.Error != nil {\n\t\treturn c.NotFound(\"missing secton\")\n\t}\n\n\tc.RenderArgs[\"Section\"] = site.Site.Sections[pdata.Section]\n\n\treturn c.Render()\n\n}", "func NewPage(pathToDir string) (*Page, error) {\n\tallFiles, err := ioutil.ReadDir(pathToDir)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar mdFile, modtime string\n\tfor _, f := range allFiles {\n\t\tif path.Ext(f.Name()) == \".md\" {\n\t\t\tmdFile = path.Join(pathToDir, f.Name())\n\t\t\tmodtime = DateFormat(f.ModTime())\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif mdFile == \"\" {\n\t\treturn nil, ErrMarkdownMissing\n\t}\n\n\tbuf, err := ioutil.ReadFile(mdFile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tpage := &Page{\n\t\tID: path.Base(pathToDir),\n\t\tPublished: modtime,\n\t}\n\n\ttitle := rePageTitle.FindSubmatch(buf)\n\tif title == nil {\n\t\treturn nil, ErrPageMissingTitle\n\t}\n\tpage.Title = string(title[1])\n\t// remove the blog's title from the markdown\n\tbuf = bytes.Replace(buf, append(title[0], []byte(\"\\n\\n\")...), nil, 1)\n\n\tdesc := rePageDesc.FindSubmatch(buf)\n\tif desc == nil {\n\t\treturn nil, ErrPageMissingDesc\n\t}\n\tpage.Desc = string(desc[1])\n\t// remove the blog's description from the markdown\n\tbuf = bytes.Replace(buf, append(desc[0], []byte(\"\\n\\n\")...), nil, 1)\n\n\timages := rePageImages.FindAllSubmatch(buf, -1)\n\tif images != nil {\n\t\tpage.Images = make(map[string]*Image, len(images))\n\n\t\tfor i := range images {\n\t\t\torigName := string(images[i][1])\n\t\t\t// skip any \"hot linked\" images\n\t\t\tif strings.HasPrefix(origName, \"http\") {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif _, exists := page.Images[origName]; exists == false {\n\t\t\t\timg, err := NewImage(path.Join(pathToDir, origName))\n\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\tpage.Images[origName] = img\n\t\t\t\tbuf = bytes.Replace(buf, []byte(origName), []byte(img.URL()), -1)\n\t\t\t}\n\t\t}\n\t}\n\n\ttags := rePageTags.FindAllSubmatch(buf, -1)\n\tif tags == nil {\n\t\treturn nil, ErrPageMissingTags\n\t}\n\n\tfor i := range tags {\n\t\tt := bytes.Split(tags[i][1], []byte(\",\"))\n\t\tfor _, tag := range t {\n\t\t\tpage.Tags = append(page.Tags, strings.ToLower(string(tag)))\n\t\t}\n\t}\n\n\tpage.buffer = buf\n\n\treturn page, nil\n}", "func (client *ClientImpl) AddPage(ctx context.Context, args AddPageArgs) (*Page, error) {\n\tif args.Page == nil {\n\t\treturn nil, &azuredevops.ArgumentNilError{ArgumentName: \"args.Page\"}\n\t}\n\trouteValues := make(map[string]string)\n\tif args.ProcessId == nil {\n\t\treturn nil, &azuredevops.ArgumentNilError{ArgumentName: \"args.ProcessId\"}\n\t}\n\trouteValues[\"processId\"] = (*args.ProcessId).String()\n\tif args.WitRefName == nil || *args.WitRefName == \"\" {\n\t\treturn nil, &azuredevops.ArgumentNilOrEmptyError{ArgumentName: \"args.WitRefName\"}\n\t}\n\trouteValues[\"witRefName\"] = *args.WitRefName\n\n\tbody, marshalErr := json.Marshal(*args.Page)\n\tif marshalErr != nil {\n\t\treturn nil, marshalErr\n\t}\n\tlocationId, _ := uuid.Parse(\"1cc7b29f-6697-4d9d-b0a1-2650d3e1d584\")\n\tresp, err := client.Client.Send(ctx, http.MethodPost, locationId, \"6.0-preview.1\", routeValues, nil, bytes.NewReader(body), \"application/json\", \"application/json\", nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar responseValue Page\n\terr = client.Client.UnmarshalBody(resp, &responseValue)\n\treturn &responseValue, err\n}", "func saveHandler(w http.ResponseWriter, r *http.Request) {\r\n title := r.URL.Path[len(\"/save/\"):]\r\n body := r.FormValue(\"body\")\r\n p := &Page{Title: title, Body: []byte(body)}\r\n p.save()\r\n http.Redirect(w, r, \"/view/\"+title, http.StatusFound)\r\n}", "func mypage(w http.ResponseWriter, req *http.Request) {\n\n\t// var b []Board\n\n\t// if !alreadyLoggedIn(w, req) {\n\t// \thttp.Redirect(w, req, \"/\", http.StatusSeeOther)\n\t// \treturn\n\t// }\n\tif !alreadyLoggedIn(w, req) {\n\t\thttp.Redirect(w, req, \"/\", http.StatusSeeOther)\n\t\treturn\n\t}\n\tu := getUser(w, req)\n\ttpl.ExecuteTemplate(w, \"mypage.gohtml\", u) //! html로 바꾸는법~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n}", "func NewCreateRestorePage(l *load.Load) *CreateRestore {\n\tpg := &CreateRestore{\n\t\tLoad: l,\n\t\tkeyEvent: l.Receiver.KeyEvents,\n\n\t\terrLabel: l.Theme.Body1(\"\"),\n\t\tspendingPassword: l.Theme.EditorPassword(new(widget.Editor), \"Spending password\"),\n\t\twalletName: l.Theme.Editor(new(widget.Editor), \"Wallet name\"),\n\t\tmatchSpendingPassword: l.Theme.EditorPassword(new(widget.Editor), \"Confirm spending password\"),\n\t\tsuggestionLimit: 3,\n\t\tcreateModal: l.Theme.Modal(),\n\t\twarningModal: l.Theme.Modal(),\n\t\tmodalTitleLabel: l.Theme.H6(\"\"),\n\t\tpasswordStrength: l.Theme.ProgressBar(0),\n\t\topenPopupIndex: -1,\n\t\trestoreContainer: layout.List{\n\t\t\tAxis: layout.Vertical,\n\t\t\tAlignment: layout.Middle,\n\t\t},\n\t}\n\n\tif pg.WL.MultiWallet.LoadedWalletsCount() == 0 {\n\t\tpg.walletName.Editor.SetText(\"mywallet\")\n\t}\n\n\tpg.optionsMenuCard = decredmaterial.Card{Color: pg.Theme.Color.Surface}\n\tpg.optionsMenuCard.Radius = decredmaterial.Radius(8)\n\n\tpg.restoreWalletBtn = l.Theme.Button(\"Restore\")\n\n\tpg.closePageBtn = l.Theme.IconButton(decredmaterial.MustIcon(widget.NewIcon(icons.NavigationClose)))\n\tpg.closePageBtn.Background = color.NRGBA{}\n\tpg.closePageBtn.Color = l.Theme.Color.Hint\n\n\tpg.resetSeedFields = l.Theme.OutlineButton(\"Clear all\")\n\n\tpg.alertIcon = pg.Icons.AlertGray\n\n\tpg.restoreWalletBtn.Background = l.Theme.Color.InactiveGray\n\tpg.restoreWalletBtn.TextSize = values.TextSize16\n\tpg.errLabel.Color = pg.Theme.Color.Danger\n\n\tpg.passwordStrength.Color = pg.Theme.Color.LightGray\n\n\tfor i := 0; i <= numberOfSeeds; i++ {\n\t\twidgetEditor := new(widget.Editor)\n\t\twidgetEditor.SingleLine, widgetEditor.Submit = true, true\n\t\tpg.seedEditors.editors = append(pg.seedEditors.editors, l.Theme.RestoreEditor(widgetEditor, \"\", fmt.Sprintf(\"%d\", i+1)))\n\t}\n\tpg.seedEditors.focusIndex = -1\n\n\t// init suggestion buttons\n\tpg.initSeedMenu()\n\n\tpg.seedList = &layout.List{Axis: layout.Vertical}\n\tpg.spendingPassword.Editor.SingleLine, pg.matchSpendingPassword.Editor.SingleLine = true, true\n\tpg.walletName.Editor.SingleLine = true\n\n\tpg.allSuggestions = dcrlibwallet.PGPWordList()\n\n\treturn pg\n}", "func (q *QuestionnaireT) FindNewPage(sess *sessx.SessT) {\n\n\tprevPage := q.PrevPage()\n\tcurrPage := prevPage // Default assumption: we are still on prev page - unless there is some modification:\n\tsubmit := sess.EffectiveStr(\"submitBtn\")\n\tif submit == \"prev\" {\n\t\tcurrPage = q.Prev()\n\t} else if submit == \"next\" {\n\t\tcurrPage = q.Next()\n\t} else {\n\t\t// Apart from \"prev\" and \"next\", submitBtn can also hold an explicit destination page\n\t\texplicit, ok, err := sess.EffectiveInt(\"submitBtn\")\n\t\tif err != nil {\n\t\t\t// invalid page value, just dont use it\n\t\t}\n\t\tif ok && err == nil && explicit > -1 {\n\t\t\tlog.Printf(\"curPage set explicitly by 'submitBtn' to %v\", explicit)\n\t\t\tcurrPage = explicit\n\t\t}\n\t}\n\t// The progress bar uses \"page\" to submit an explicit destination page.\n\t// There are no conflicts of overriding submitBtn and page\n\t// since submitBtn has only a value if actually pressed.\n\texplicit, ok, err := sess.EffectiveInt(\"page\")\n\tif err != nil {\n\t\t// invalid page value, just dont use it\n\t}\n\tif ok && err == nil && explicit > -1 {\n\t\tlog.Printf(\"curPage set explicitly by param 'page' to %v\", explicit)\n\t\tcurrPage = explicit\n\t}\n\tq.CurrPage = currPage // Put current page into questionnaire\n\tlog.Printf(\"submitBtn was '%v' - new currPage is %v\", submit, currPage)\n\n}", "func NewPageRequest(page, perPage int64, spec *Spec) *PageRequest {\n\tif page < 1 {\n\t\tpage = 1\n\t}\n\n\t// set limit to per page var\n\tif perPage <= 0 || perPage > 100 {\n\t\tperPage = 100\n\t}\n\n\treturn &PageRequest{\n\t\tPage: page,\n\t\tPerPage: perPage,\n\t\tSpec: spec,\n\t}\n}", "func saveHandler(w http.ResponseWriter, r *http.Request) {\n\ttitle, err := getTitle(w, r)\n\tif err != nil {\n\t\treturn\n\t}\n\tbody := r.FormValue(\"body\")\t// Get the page content. It is of type string - we must convert it to []byte before it will fit into the Page struct.ß\n\tp := &Page{Title: title, Body: []byte(body)}\n\terr = p.save()\t// Write the data to a file\n\t// An error that occurs during p.save() will be reported to the user\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\thttp.Redirect(w, r, \"/view/\"+title, http.StatusFound)\n}", "func PostPage(w http.ResponseWriter, r *http.Request) {\n\tresponse := services.CreatePage(r)\n\n\trender.Status(r, response.Code)\n\trender.JSON(w, r, response)\n}", "func putPage(c *gin.Context) {\n\n\tlog.Print(\"putPage\")\n\tp := new(WikiPage)\n\n\terr := c.BindJSON(p)\n\tif err != nil {\n\t\tfmt.Printf(\"something happened on c.BindJSON \\n\")\n\t\tfmt.Println(err)\n\t\tc.AbortWithError(400, err)\n\t\treturn\n\t}\n\n\tputFilestoreDoc(c.Param(\"id\"), p)\n\n\tgetPage(c)\n}", "func loadPage(title string) (*PageTablature, error) {\n filename := \"tablatures/\"+ title + \".txt\"\n body, err := ioutil.ReadFile(filename)\n if err != nil {\n return nil, err\n }\n return &PageTablature{Titre: title, Tab: body}, nil\n}", "func (h *MovieHandler) new(w http.ResponseWriter, r *http.Request) {\n\t// Render a HTML response and set status code.\n\trender.HTML(w, http.StatusOK, \"movie/new.html\", nil)\n}", "func (r *Repository) CreatePage(p *page.Page, authorID int) (int, error) {\n log.Println(\"creating row in `pages` table...\")\n psqlStmt := `\n INSERT INTO pages (title, body, author_id, version)\n VALUES ($1, $2, $3, $4)\n RETURNING id`\n pageID := 0\n err := r.DB.QueryRow(psqlStmt, p.Title, p.Body, authorID,\n user.CurrentVersion).Scan(&pageID)\n if err != nil {\n log.Println(\"failed to store page\")\n return 0, err\n }\n return pageID, nil\n}", "func newThread(writer http.ResponseWriter, request *http.Requet) {\n\t_, err := session(writer, request)\n\tif err != nil {\n\t\thttp.Redirect(writer, request, \"/login\", 302)\n\t} else {\n\t\tgenerateHTML(writer, nil, \"layout\", \"private.navbar\", \"new.thread\")\n\t}\n}", "func (q *QuestionnaireT) PageHTML(pageIdx int) (string, error) {\n\n\tif q.CurrPage > len(q.Pages)-1 || q.CurrPage < 0 {\n\t\ts := fmt.Sprintf(\"You requested page %v out of %v. Page does not exist\", pageIdx, len(q.Pages)-1)\n\t\tlog.Print(s)\n\t\treturn s, fmt.Errorf(s)\n\t}\n\n\tpage := q.Pages[pageIdx]\n\n\tkv := q.DynamicPageValues()\n\terr := q.DynamicPages()\n\tif err != nil {\n\t\terr = fmt.Errorf(\"dyn page creation in PageHTML() q: %w\", err)\n\t\treturn err.Error(), err\n\t}\n\tq.DynamicPagesApplyValues(kv)\n\n\tfound := false\n\tfor _, lc := range q.LangCodes {\n\t\tif q.LangCode == lc {\n\t\t\tfound = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif !found {\n\t\ts := fmt.Sprintf(\"Language code '%v' is not supported in %v\", q.LangCode, q.LangCodes)\n\t\tlog.Print(s)\n\t\treturn s, fmt.Errorf(s)\n\t}\n\n\t// adding a group containing the previous/next buttons\n\tif !page.NoNavigation {\n\n\t\tvar footer *groupT\n\n\t\tif len(page.Groups) == 0 { // stupid edge case\n\t\t\tfooter = page.AddGroup()\n\t\t\tfooter.ID = \"footer\"\n\t\t\tfooter.BottomVSpacers = 0\n\t\t}\n\t\tif page.Groups[len(page.Groups)-1].ID == \"footer\" {\n\t\t\tfooter = page.Groups[len(page.Groups)-1]\n\t\t\tfooter.Inputs = nil\n\t\t} else {\n\t\t\tfooter = page.AddGroup()\n\t\t\tfooter.ID = \"footer\"\n\t\t\tfooter.BottomVSpacers = 0\n\t\t}\n\t\tfooter.Cols = 2\n\n\t\t// lblNext := cfg.Get().Mp[\"page\"]\n\t\t// lblNext := cfg.Get().MpSite[q.Survey.Type][\"continue_to_page_x\"]\n\t\tlblNext := q.SiteSpecificTrl(\"continue_to_page_x\")\n\t\tcloneNext := lblNext.Pad(2)\n\t\tcloneNext = cloneNext.Fill(q.NextNaviNum())\n\n\t\t// lblPrev := cfg.Get().Mp[\"previous\"]\n\t\t// lblPrev := cfg.Get().MpSite[q.Survey.Type][\"back_to_page_x\"]\n\t\tlblPrev := q.SiteSpecificTrl(\"back_to_page_x\")\n\t\tclonePrev := lblPrev.Pad(1)\n\t\tclonePrev = clonePrev.Fill(q.PrevNaviNum())\n\n\t\tif q.HasNext() {\n\t\t\tinp := footer.AddInput()\n\t\t\tinp.Type = \"button\"\n\t\t\tinp.Name = \"submitBtn\"\n\t\t\tinp.Response = \"next\"\n\t\t\tinp.Label = cloneNext\n\t\t\tinp.AccessKey = \"n\"\n\t\t\tinp.ColSpanControl = 1\n\n\t\t\tinp.Style = css.NewStylesResponsive(inp.Style)\n\t\t\tinp.Style.Desktop.StyleGridItem.Order = 2\n\n\t\t\tinp.StyleCtl = css.ItemEndMA(inp.StyleCtl)\n\t\t\tinp.StyleCtl.Desktop.StyleBox.Position = \"relative\"\n\t\t\tinp.StyleCtl.Desktop.StyleBox.Left = \"3rem\"\n\t\t\tinp.StyleCtl.Mobile.StyleBox.Left = \"0\"\n\t\t} else {\n\t\t\tinp := footer.addEmptyTextblock()\n\t\t\tinp.Style = css.NewStylesResponsive(inp.Style)\n\t\t\tinp.Style.Desktop.StyleGridItem.Order = 2\n\t\t}\n\n\t\tif q.HasPrev() {\n\t\t\tinp := footer.AddInput()\n\t\t\tinp.Type = \"button\"\n\t\t\tinp.Name = \"submitBtn\"\n\t\t\tinp.Response = \"prev\"\n\t\t\tinp.Label = clonePrev\n\t\t\tinp.AccessKey = \"p\"\n\t\t\tinp.ColSpanControl = 1\n\n\t\t\tinp.Style = css.NewStylesResponsive(inp.Style)\n\t\t\tinp.Style.Desktop.StyleGridItem.AlignSelf = \"end\" // smaller font-size\n\n\t\t\tinp.StyleCtl = css.NewStylesResponsive(inp.StyleCtl)\n\t\t\t// inp.StyleCtl = css.ItemEndMA(inp.StyleCtl)\n\t\t\tinp.StyleCtl.Desktop.StyleBox.Position = \"relative\"\n\t\t\tinp.StyleCtl.Desktop.StyleBox.Left = \"-2.5rem\"\n\t\t\tinp.StyleCtl.Mobile.StyleBox.Left = \"0\"\n\t\t\tinp.StyleCtl.Mobile.StyleText.FontSize = 85\n\n\t\t} else {\n\t\t\tfooter.addEmptyTextblock()\n\t\t}\n\t}\n\n\tw := &strings.Builder{}\n\n\t//\n\t//\n\tif q.PostponeNavigationButtons > 0 {\n\n\t\ts := `\n\t\t<style>\n\t\t\tbutton[type=\"submit\"], \n\t\t\tbutton[accesskey=\"n\"] \n\t\t\t{\n\t\t\t\tanimation: %vms ease-in-out 1ms 1 nameAppear;\n\t\t\t}\n\t\t</style>\n\t\t`\n\t\tfmt.Fprintf(w, s, 1000*q.PostponeNavigationButtons)\n\n\t}\n\n\tpage.WidthDefault()\n\tpageClass := fmt.Sprintf(\"pg%02v\", pageIdx)\n\tfmt.Fprint(w, css.StyleTag(page.Style.CSS(pageClass)))\n\n\t// i.e. smaller - for i.e. radios more closely together\n\twidth := fmt.Sprintf(\"<div class='%v' >\\n\", pageClass)\n\tfmt.Fprint(w, width)\n\n\tif q.HasErrors {\n\t\tfmt.Fprintf(w,\n\t\t\t`<p class=\"error\" id=\"page-error\" >%v</p>`,\n\t\t\tcfg.Get().Mp[\"correct_errors\"].Tr(q.LangCode),\n\t\t)\n\t}\n\n\thasHeader := false\n\n\tif page.Section != nil {\n\t\tfmt.Fprintf(w, \"<span class='go-quest-page-section' >%v</span>\", page.Section.Tr(q.LangCode))\n\t\tif page.Label.Tr(q.LangCode) != \"\" {\n\t\t\tfmt.Fprint(w, \"<span class='go-quest-page-desc'> &nbsp; - &nbsp; </span>\")\n\t\t}\n\t\thasHeader = true\n\t}\n\tif page.Label.Tr(q.LangCode) != \"\" {\n\t\tfmt.Fprintf(w, \"<span class='go-quest-page-header' >%v</span>\", page.Label.Tr(q.LangCode))\n\t\thasHeader = true\n\t}\n\tif page.Desc.Tr(q.LangCode) != \"\" {\n\t\tfmt.Fprint(w, vspacer0)\n\t\tfmt.Fprintf(w, \"<p class='go-quest-page-desc'>%v</p>\", page.Desc.Tr(q.LangCode))\n\t\thasHeader = true\n\t}\n\n\tif hasHeader {\n\t\tfmt.Fprint(w, vspacer16)\n\t}\n\n\tgrpOrder := q.RandomizeOrder(pageIdx)\n\n\tpage.ConsolidateRadioErrors(grpOrder)\n\n\tpdsSpecialDisableColumns(q, page, pageIdx, 0)\n\n\tcompositCntr := -1 // group counter - per page\n\tnonCompositCntr := -1 // group counter - per page\n\n\tchildGroups := 0 // see groupT.ChildGroups\n\n\t// nextGroup:\n\tfor loopIdx, grpIdx := range grpOrder {\n\n\t\tif _, ok, _ := q.HasComposit(pageIdx, grpIdx); ok {\n\t\t\tcompositCntr++\n\t\t\tcompFuncNameWithParamSet := page.Groups[grpIdx].Inputs[0].DynamicFunc\n\t\t\tcF, seqIdx, paramSetIdx := parseComposite(compFuncNameWithParamSet)\n\t\t\tgrpHTML, _, err := cF(q, seqIdx, paramSetIdx, false) // QuestionnaireT must comply to qstif.Q\n\t\t\tif err != nil {\n\t\t\t\tfmt.Fprintf(w, \"composite func error %v \\n\", err)\n\t\t\t} else {\n\t\t\t\t// grpHTML also contains HTML and CSS stuff - which could be hyphenized too\n\t\t\t\tgrpHTML = trl.HyphenizeText(grpHTML)\n\t\t\t\tfmt.Fprint(w, grpHTML+\"\\n\")\n\t\t\t}\n\t\t} else {\n\n\t\t\tgrpHTML := q.GroupHTMLGridBased(pageIdx, grpIdx)\n\n\t\t\tif page.Groups[grpIdx].ChildGroups > 0 {\n\t\t\t\t// => this is a master group\n\t\t\t\tchildGroups = page.Groups[grpIdx].ChildGroups\n\t\t\t\tln := len(grpHTML) - len(\"</div>\\n\")\n\t\t\t\tgrpHTML = grpHTML[:ln]\n\t\t\t\tif logChildGroups {\n\t\t\t\t\tlog.Printf(\"page%v - group%v has childGroups %v\", pageIdx, grpIdx, childGroups)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// dynamic numbering - based on group sequence per page after shuffling\n\t\t\tif strings.Contains(grpHTML, \"[groupID]\") {\n\t\t\t\tnonCompositCntr++\n\t\t\t\tgrpHTML = strings.Replace(grpHTML, \"[groupID]\", fmt.Sprintf(\"%v\", nonCompositCntr+1), -1)\n\t\t\t}\n\n\t\t\t// this is ugly - and its not localized by language\n\t\t\t// introduced for PDS questionnaire\n\t\t\tif strings.Contains(grpHTML, \"[quarter-1]\") {\n\t\t\t\tgrpHTML = strings.ReplaceAll(grpHTML, \"[quarter-1]\", q.Survey.Quarter(-1))\n\t\t\t}\n\t\t\tif strings.Contains(grpHTML, \"[quarter-0]\") {\n\t\t\t\tgrpHTML = strings.ReplaceAll(grpHTML, \"[quarter-0]\", q.Survey.Quarter(0))\n\t\t\t}\n\t\t\tif strings.Contains(grpHTML, \"[quarter-p1]\") {\n\t\t\t\tgrpHTML = strings.ReplaceAll(grpHTML, \"[quarter-p1]\", q.Survey.Quarter(1))\n\t\t\t}\n\n\t\t\t// dynamic question numbering - based on NavigationCondition, IsInNavigation()\n\t\t\t// todo\n\n\t\t\tfmt.Fprint(w, grpHTML+\"\\n\")\n\n\t\t}\n\n\t\trenderBottomSpacers := true\n\t\tif childGroups > 0 && page.Groups[grpIdx].BottomVSpacers > 0 {\n\t\t\tlog.Printf(`page %v group %v is a master or child group - BottomVSpacers derail the logic - \n\t\t\t\tuse margin-bottom on the master group`, pageIdx, grpIdx)\n\t\t\trenderBottomSpacers = false\n\t\t}\n\n\t\t// vertical distance at the end of groups\n\t\tif renderBottomSpacers {\n\t\t\tif loopIdx < len(page.Groups)-1 {\n\t\t\t\tfor i2 := 0; i2 < page.Groups[grpIdx].BottomVSpacers; i2++ {\n\t\t\t\t\tfmt.Fprint(w, vspacer16)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tfmt.Fprint(w, vspacer16)\n\t\t\t}\n\t\t}\n\n\t\t//\n\t\t//\n\t\tif childGroups > 0 && page.Groups[grpIdx].ChildGroups == 0 {\n\t\t\tchildGroups--\n\t\t\t// log.Printf(\"page%v - group%v childGroups %v - of master group\", pageIdx, grpIdx, childGroups)\n\t\t\tif childGroups > 0 {\n\t\t\t\t// nothing todo\n\t\t\t} else if childGroups == 0 {\n\t\t\t\tfmt.Fprintf(w, \"</div>\\n <!-- master group closed gr%v -->\\n\", grpIdx)\n\t\t\t\tif logChildGroups {\n\t\t\t\t\tlog.Printf(\"page%v - group%v - previous master group closed\", pageIdx, grpIdx)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t}\n\n\tfmt.Fprintf(w, \"</div> <!-- /%v -->\\n\\n\", pageClass)\n\n\t//\n\t//\n\tif page.ValidationFuncName != \"\" {\n\n\t\tfuncNames := strings.Split(page.ValidationFuncName, \",\")\n\t\tfor _, funcName := range funcNames {\n\t\t\tq.RenderJS(\n\t\t\t\tw,\n\t\t\t\tfuncName,\n\t\t\t\tmap[string]trl.S{\"msg\": page.ValidationFuncMsg},\n\t\t\t\tmap[string]string{},\n\t\t\t)\n\t\t}\n\n\t}\n\n\tret := w.String()\n\n\t// inject user data into HTML text\n\t// i.e. [attr-country] => Latvia\n\tfor k, v := range q.Attrs {\n\t\tk1 := fmt.Sprintf(\"[attr-%v]\", strings.ToLower(k))\n\t\tret = strings.Replace(ret, k1, v, -1)\n\t}\n\n\tif strings.Contains(ret, \"(MISSING)\") {\n\t\tlog.Printf(\"PageHTML() returns (MISSING). Reason: Printf(w, fmt.Sprintf('xxx ... %% ... ')) - remove suffix 'f' from outer call.\")\n\t\t// ret = strings.ReplaceAll(ret, \"(MISSING)\", \"\")\n\t}\n\n\treturn ret, nil\n}", "func handlerView(w http.ResponseWriter, r *http.Request, title string) {\r\n\tp2, err := loadPage(title)\r\n\tif err != nil {\r\n\t\t//thiswill redirect the cliebt to the edit page so the content may be created\r\n\t\t//the http.redirect fucntion adds an HTTP status code\r\n\t\t//of fttp.statusFound(302) and a location header to the http response\r\n\t\thttp.Redirect(w, r, \"/edit/\"+title, http.StatusFound)\r\n\t\tfmt.Println(err.Error())\r\n\t\tos.Exit(1)\r\n\t}\r\n\tfetchHTML(w, \"view\", p2)\r\n}", "func handlerSave(w http.ResponseWriter, r *http.Request, title string) {\r\n\tbody := r.FormValue(\"body\")\r\n\tp := &Page{Title: title, Body: []byte(body)}\r\n\terr := p.save()\r\n\tif err != nil {\r\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\r\n\t\tos.Exit(1)\r\n\t}\r\n\thttp.Redirect(w, r, \"/view/\"+p.Title, http.StatusFound)\r\n}", "func StreamPageTemplate(qw422016 *qt422016.Writer, p Page) {\n\t//line base.qtpl:13\n\tqw422016.N().S(`\n<html>\n\t<head>\n\t\t<title>`)\n\t//line base.qtpl:16\n\tp.StreamTitle(qw422016)\n\t//line base.qtpl:16\n\tqw422016.N().S(`</title>\n\t</head>\n\t<body>\n\t\t<div>\n\t\t\t<a href=\"/\">return to main page</a>\n\t\t</div>\n\t\t`)\n\t//line base.qtpl:22\n\tp.StreamBody(qw422016)\n\t//line base.qtpl:22\n\tqw422016.N().S(`\n\t</body>\n</html>\n`)\n//line base.qtpl:25\n}", "func (account *Account) CreatePage(page *Page, returnContent bool) (*Page, error) {\n\targs := http.AcquireArgs()\n\n\t// Access token of the Telegraph account.\n\targs.Add(\"access_token\", account.AccessToken) // required\n\n\t// Page title.\n\targs.Add(\"title\", page.Title) // required\n\n\tif page.AuthorName != \"\" {\n\t\t// Author name, displayed below the article's title.\n\t\targs.Add(\"author_name\", page.AuthorName)\n\t}\n\n\tif page.AuthorURL != \"\" {\n\t\t// Profile link, opened when users click on the author's name below the title. Can be any\n\t\t// link, not necessarily to a Telegram profile or channel.\n\t\targs.Add(\"author_url\", page.AuthorURL)\n\t}\n\n\t// If true, a content field will be returned in the Page object.\n\targs.Add(\"return_content\", strconv.FormatBool(returnContent))\n\n\tcontent, err := json.Marshal(page.Content)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Content of the page.\n\targs.Add(\"content\", string(content)) // required\n\n\tbody, err := request(\"createPage\", \"\", args)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar resp Page\n\terr = json.Unmarshal(*body.Result, &resp)\n\n\treturn &resp, err\n}", "func saveHandler (w http.ResponseWriter, r *http.Request) {\n\n\t//on sauvegarde dans un fichier\n\tp := &PageTablature{Titre:r.FormValue(\"titre\"), Tab:[]byte(r.FormValue(\"sandbox\"))}\n\t\n\terr := p.save()\n\t\n\tif err != nil {\n\t\t//redirection vers page de création\n\t\tcreateHandler(w, r)\n\t}\n\n\ttabsHandler(w, r)\n}", "func BlankPage() *Page {\n\treturn &Page{title: \"Empty page\", body: []byte(\"Write some content\")}\n}", "func HandleCreate(w http.ResponseWriter, r *http.Request) error {\n\n\tpage := pages.New()\n\n\t// Check the authenticity token\n\terr := session.CheckAuthenticity(w, r)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Authorise\n\tuser := session.CurrentUser(w, r)\n\terr = can.Create(page, user)\n\tif err != nil {\n\t\treturn server.NotAuthorizedError(err)\n\t}\n\n\t// Setup context\n\tparams, err := mux.Params(r)\n\tif err != nil {\n\t\treturn server.InternalError(err)\n\t}\n\n\t// Validate the params, removing any we don't accept\n\tpageParams := page.ValidateParams(params.Map(), pages.AllowedParams())\n\n\tid, err := page.Create(pageParams)\n\tif err != nil {\n\t\treturn server.InternalError(err)\n\t}\n\n\t// Redirect to the new page\n\tpage, err = pages.Find(id)\n\tif err != nil {\n\t\treturn server.InternalError(err)\n\t}\n\n\treturn server.Redirect(w, r, page.IndexURL())\n}", "func loadPage(title string) (*Page, error){\r\n\tfilename := title + \".txt\"\r\n\tbody, err := ioutil.ReadFile(filename)\r\n\tif err != nil {\r\n\t\treturn nil, err\r\n\t}\r\n\treturn &Page{Title: title, Body: body}, nil\r\n}", "func saveHandler(w http.ResponseWriter, r *http.Request, title string) {\n\tbody := r.FormValue(\"body\")\n\tp := &Page{Title: title, Body: []byte(body)}\n\terr := p.save()\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\thttp.Redirect(w, r, \"/view/\"+title, http.StatusFound)\n}", "func loadPage(file string) (*Page, error) {\n\tfilename := filepath.Join(pagesDir, file+\".txt\")\n\tbody, err := ioutil.ReadFile(filename)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tingredients, instructions := parseRecipe(body)\n\n\tp := &Page{\n\t\tTitle: convertFilenameToTitle(file),\n\t\tFilename: filepath.Base(file),\n\t\tIngredients: template.HTML(ingredients),\n\t\tInstructions: template.HTML(instructions)}\n\n\treturn p, nil\n}", "func (gui *Gui) showPage(idx int) {\n\tgui.currentLogMainView = gui.views[idx]\n\tgui.currentLogMainView.Select()\n\tgui.pages.SwitchToPage(strconv.Itoa(gui.currentLogMainView.id))\n\tgui.navBar.SelectPage(strconv.Itoa(gui.currentLogMainView.id))\n}", "func New(w http.ResponseWriter, r *http.Request) {\r\n\ttmpl.ExecuteTemplate(w, \"New\", nil)\r\n}", "func New(w http.ResponseWriter, r *http.Request) {\r\n\ttmpl.ExecuteTemplate(w, \"New\", nil)\r\n}", "func Page(w http.ResponseWriter, r *http.Request) {\n\tuuid, loggedIn := context.GetOk(r, \"uuid\")\n\n\tvariables := models.TemplateVariables{\n\t\tLoggedIn: loggedIn,\n\t}\n\n\tif loggedIn {\n\t\tself, err := db.GetUserFromUUID(uuid.(string))\n\t\tif err != nil {\n\t\t\thelpers.ThrowErr(w, r, \"Getting user from DB error\", err)\n\t\t\treturn\n\t\t}\n\n\t\tcsrfSecret, err := r.Cookie(\"csrfSecret\")\n\t\tif err != nil {\n\t\t\thelpers.ThrowErr(w, r, \"Getting CSRF Secret cookie error\", err)\n\t\t\treturn\n\t\t}\n\n\t\tvariables.Self = self\n\t\tvariables.CsrfSecret = csrfSecret.Value\n\t}\n\n\tvars := mux.Vars(r)\n\n\tpost, err := db.GetPost(vars[\"uuid\"])\n\tif err != nil {\n\t\thelpers.ThrowErr(w, r, \"Getting post from DB error\", err)\n\t\treturn\n\t}\n\n\tvariables.Post = post\n\n\tvar t *template.Template\n\n\tif post.Creation == 0 {\n\t\tt, err = template.ParseFiles(\"templates/post/not-found.html\", \"templates/nested.html\") // Parse the HTML pages.\n\t\tif err != nil {\n\t\t\thelpers.ThrowErr(w, r, \"Template parsing error\", err)\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tt, err = template.ParseFiles(\"templates/post/page.html\", \"templates/nested.html\") // Parse the HTML pages.\n\t\tif err != nil {\n\t\t\thelpers.ThrowErr(w, r, \"Template parsing error\", err)\n\t\t\treturn\n\t\t}\n\t}\n\n\tif vote, ok := variables.Post.Votes[variables.Self.UUID]; ok {\n\t\tif vote {\n\t\t\tvariables.Post.Vote = 1\n\t\t} else {\n\t\t\tvariables.Post.Vote = 2\n\t\t}\n\t}\n\n\terr = t.Execute(w, variables)\n\tif err != nil {\n\t\thelpers.ThrowErr(w, r, \"Template execution error\", err)\n\t}\n}", "func loadPage(title string) (*Page, error) {\n\tfilename := \"./pages/\" + title + \".txt\"\n\tbody, err := ioutil.ReadFile(filename)\n\tlist := fetchPageList()\n\tif err != nil {\n\t\tlog.Println(\"loadPage() error: \", err)\n\t\treturn nil, err\n\t}\n\treturn &Page{Title: title, Body: body, List: list}, nil\n}", "func main() {\n\tp1 := &gowiki.Page{Title: \"TestPage\", Body: []byte(\"This is a sample Page.\")}\n\tp1.Save()\n\tp2, _ := gowiki.LoadPage(\"TestPage\")\n\tfmt.Println(string(p2.Body))\n}", "func viewHandler(w http.ResponseWriter, r *http.Request) {\n\ttitle := r.URL.Path[len(\"/view/\"):]\n\tp, err := loadPage(title)\n\tif err != nil {\n\t\thttp.Redirect(w, r, \"/edit/\"+title, http.StatusFound)// if there does not exist page:title, make a new page.\n\t\treturn\t\t\n\t}\n\trenderTemplate(w, \"view\", p)\n\t// t, _ := template.ParseFiles(\"view.html\")// return *template.Template, error\n\t// t.Execute(w,p)\n\t// fmt.Fprintf(w, \"<h1>%s</h1><div>%s</div>\", p.Title, p.Body)\n}", "func (p *provider) page(initialURL, token string, newObj func() interface{}, processObj func(interface{}) error) error {\n\t// track urls we've fetched to avoid cycles\n\turl := initialURL\n\tfetchedURLs := sets.NewString(url)\n\tfor {\n\t\t// fetch and process\n\t\tobj := newObj()\n\t\tlinks, err := p.getJSON(url, token, obj)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := processObj(obj); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// see if we need to page\n\t\t// https://developer.github.com/v3/#link-header\n\t\turl = links[\"next\"]\n\t\tif len(url) == 0 {\n\t\t\t// no next URL, we're done paging\n\t\t\tbreak\n\t\t}\n\t\tif fetchedURLs.Has(url) {\n\t\t\t// break to avoid a loop\n\t\t\tbreak\n\t\t}\n\t\t// remember to avoid a loop\n\t\tfetchedURLs.Insert(url)\n\t}\n\treturn nil\n}", "func AddGroupPage(c *fiber.Ctx) error {\n\t// First check if id is an integer\n\tid, err := strconv.Atoi(c.Params(\"id\"))\n\tif err != nil {\n\t\treturn c.Send([]byte(\"Value not an integer!\"))\n\t}\n\t// Next checks if the user can edit table licenser\n\tif !databasepack.Allowed(querypack.INFO.Roles, \"licenser_\") {\n\t\treturn c.Send([]byte(\"Permission declined!\"))\n\t}\n\treturn c.Send([]byte(databasepack.AddGroup(id)))\n}", "func loadPage(title string) (*Page, error) {\n\tfilename := title + \".txt\"\n\tbody, err := ioutil.ReadFile(filename)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Page{Title: title, Body: body}, nil\n}", "func Index(w http.ResponseWriter, r *http.Request) {\n\ttmpl.ExecuteTemplate(w, \"New\", nil)\n}", "func Page(c *fiber.Ctx) {\n\tShopID := c.Params(\"shop_id\")\n\tuserID := userIDF(c.Get(\"token\"))\n\tvar PageFromSQL PageSQL\n\tvar PagePointer PagesPointer\n\tvar response PageInformation\n\n\tErrorPage := sq.Select(\n\t\t\"active\",\n\t\t\"template_type\",\n\t\t\"style_sheets\",\n\t\t\"active_days\",\n\t\t\"images_days\",\n\t\t\"offers_active\",\n\t\t\"accept_card_active\",\n\t\t\"subdomain\",\n\t\t\"domain\",\n\t\t\"shop.shop_id\",\n\t\t\"pages_id\",\n\t\t\"type_charge\",\n\t\t\"shop_name\",\n\t\t\"description\",\n\t\t\"cover_image\",\n\t\t\"logo\",\n\t\t\"shop.user_id\",\n\t).\n\t\tFrom(\"pages\").\n\t\tLeftJoin(\"plans_pay on pages.shop_id = plans_pay.shop_id\").\n\t\tLeftJoin(\"shop on pages.shop_id = shop.shop_id\").\n\t\tWhere(\"pages.shop_id = ? AND shop.user_id = ?\", ShopID, userID).\n\t\tRunWith(database).\n\t\tQueryRow().\n\t\tScan(\n\t\t\t&PageFromSQL.Active,\n\t\t\t&PageFromSQL.TemplateType,\n\t\t\t&PageFromSQL.StyleSheets,\n\t\t\t&PageFromSQL.ActiveDays,\n\t\t\t&PageFromSQL.ImagesDays,\n\t\t\t&PageFromSQL.OffersActive,\n\t\t\t&PageFromSQL.AcceptCardActive,\n\t\t\t&PageFromSQL.Subdomain,\n\t\t\t&PageFromSQL.Domain,\n\t\t\t&PageFromSQL.ShopID,\n\t\t\t&PageFromSQL.PagesID,\n\t\t\t&PageFromSQL.TypeCharge,\n\t\t\t&PageFromSQL.ShopName,\n\t\t\t&PageFromSQL.Description,\n\t\t\t&PageFromSQL.CoverImage,\n\t\t\t&PageFromSQL.Logo,\n\t\t\t&PageFromSQL.UserID,\n\t\t)\n\n\tif ErrorPage != nil {\n\t\tfmt.Println(ErrorPage, \"Error get page\")\n\t\tc.JSON(ErrorResponse{MESSAGE: \"Problem with get page\"})\n\t\tc.SendStatus(400)\n\t\treturn\n\t}\n\n\tActive, _ := strconv.Atoi(PageFromSQL.Active.String)\n\tPagePointer.Active = Active\n\tTemplateType, _ := strconv.Atoi(PageFromSQL.TemplateType.String)\n\tPagePointer.TemplateType = TemplateType\n\tStyleSheets, _ := strconv.Atoi(PageFromSQL.StyleSheets.String)\n\tPagePointer.StyleSheets = StyleSheets\n\tActiveDays, _ := strconv.Atoi(PageFromSQL.ActiveDays.String)\n\tPagePointer.ActiveDays = ActiveDays\n\tImagesDays, _ := strconv.Atoi(PageFromSQL.ImagesDays.String)\n\tPagePointer.ImagesDays = ImagesDays\n\tOffersActive, _ := strconv.Atoi(PageFromSQL.OffersActive.String)\n\tPagePointer.OffersActive = OffersActive\n\tAcceptCardActive, _ := strconv.Atoi(PageFromSQL.AcceptCardActive.String)\n\tPagePointer.AcceptCardActive = AcceptCardActive\n\tPagePointer.Subdomain = &PageFromSQL.Subdomain.String\n\tPagePointer.Domain = &PageFromSQL.Domain.String\n\tPagePointer.ShopID = &PageFromSQL.ShopID.String\n\tPagePointer.PagesID = &PageFromSQL.PagesID.String\n\tPagePointer.TypeCharge = &PageFromSQL.TypeCharge.String\n\tPagePointer.ShopName = &PageFromSQL.ShopName.String\n\tPagePointer.Description = &PageFromSQL.Description.String\n\tPagePointer.CoverImage = &PageFromSQL.CoverImage.String\n\tPagePointer.Logo = &PageFromSQL.Logo.String\n\tPagePointer.UserID = &PageFromSQL.UserID.String\n\n\tresponse.Page = PagePointer\n\tUserID := *PagePointer.UserID\n\tif UserID == userID {\n\t\tc.JSON(response)\n\t} else {\n\t\tc.JSON(ErrorResponse{MESSAGE: \"Not is owner or active shop\"})\n\t}\n}", "func NewListPage(getNextPage func(context.Context, List) (List, error)) ListPage {\n\treturn ListPage{fn: getNextPage}\n}", "func saveHandler(w http.ResponseWriter, r *http.Request, title string) {\n\tbody := r.FormValue(\"body\")\n\tp := &Page{Title: title, Body: []byte(body)}\n\terr := p.save()\n\tif err != nil {\n\t\tlog.Println(\"saveHandler() error: \", err)\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\thttp.Redirect(w, r, \"/view/\"+title, http.StatusFound)\n}", "func editHandler(w http.ResponseWriter, r *http.Request) {\r\n title := r.URL.Path[len(\"/edit/\"):]\r\n p, err := loadPage(title)\r\n if err != nil {\r\n p = &Page{Title: title}\r\n }\r\n renderTemplate(w, \"edit\", p)\r\n}" ]
[ "0.7086286", "0.69852895", "0.6925897", "0.67609227", "0.66914123", "0.65977377", "0.65409267", "0.6484053", "0.6477484", "0.6449281", "0.63723683", "0.63473636", "0.6308704", "0.62883514", "0.60617995", "0.60510904", "0.5933118", "0.5927139", "0.5916846", "0.58892477", "0.5887049", "0.58674026", "0.5789682", "0.5775522", "0.5720787", "0.5705028", "0.56760496", "0.56526875", "0.5640932", "0.5600588", "0.5595735", "0.5578068", "0.5563687", "0.5534268", "0.55176395", "0.5500714", "0.549703", "0.5484167", "0.5474354", "0.5467304", "0.54634315", "0.5457738", "0.5455294", "0.5452823", "0.54497623", "0.5440376", "0.54196775", "0.54195416", "0.54145664", "0.5381459", "0.53808033", "0.5354544", "0.5335837", "0.53318244", "0.5327773", "0.5316785", "0.53114474", "0.53092146", "0.53036726", "0.52924347", "0.5285417", "0.5285022", "0.5259779", "0.52536947", "0.52477294", "0.5246323", "0.5236925", "0.5222028", "0.522085", "0.5220743", "0.5216911", "0.5213262", "0.5211912", "0.52106357", "0.52001214", "0.5189263", "0.517969", "0.51722366", "0.51657665", "0.51576144", "0.5156869", "0.51482123", "0.51447153", "0.5136762", "0.5132765", "0.51316714", "0.5127129", "0.5127129", "0.51207125", "0.51187956", "0.51107806", "0.51100427", "0.51097023", "0.50982624", "0.50951445", "0.5088154", "0.5068974", "0.50634456", "0.50593424", "0.505623" ]
0.6275084
14
getUrl returns generated url
func getUrl(title string) string { url := title if title == "README" { url = "index" } return url }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (t *Tracker) getURL(r *request) string {\n\treturn baseURL + t.serviceID + r.String()\n}", "func (s Store) GetURL(id string) string {\n\treturn s.get(id)\n}", "func (d dvd) GetURL(req *http.Request, s *site) *url.URL {\n\treturn makeURL(d, req, s, []string{\"name\", d.Name})\n}", "func (session *Session) getUrl(path string, params Params) string {\n\tbuf := &bytes.Buffer{}\n\tbuf.WriteString(path)\n\n\tif params != nil {\n\t\tbuf.WriteRune('?')\n\t\tparams.Encode(buf)\n\t}\n\n\treturn buf.String()\n}", "func (c Client) getURL(url string) string {\n\treturn fmt.Sprintf(\"%s%s\", c.baseAPI, url)\n}", "func GetUrl(c *gin.Context) {\n\tsurl := c.Param(\"surl\")\n\tfmt.Println(surl)\n\tkey, err := strconv.ParseUint(surl, 36, 0)\n\tif err != nil {\n\t\tfmt.Println(\"\\033[;31murl not validated\\033[;0m\")\n\t}\n\tstr := storage.RedisPool.GetShortUrlFromRedis(key)\n\tfmt.Println(str)\n\tif str == \"\" {\n\t\tc.JSON(500, gin.H{\n\t\t\t\"surl\": \"\",\n\t\t\t\"msg\": \"failed\",\n\t\t})\n\t}\n\tc.Redirect(302, str)\n}", "func getUrl(config map[string]string, path string) (string) {\n url := fmt.Sprint(\"https://\", config[\"CONTROL_SERVICE\"], \":\", config[\"CONTROL_PORT\"], path)\n return url\n}", "func (opa *client) getURL(path string) string {\n\treturn fmt.Sprintf(\"%s/%s\", opa.Host, path)\n}", "func (m *WorkforceIntegration) GetUrl()(*string) {\n return m.url\n}", "func (w *Worker) Url(path string, params url.Values) string {\n\t// random choice one host for load balance\n\ti := rand.Intn(len(w.ctx.addrs))\n\taddr := w.ctx.addrs[i]\n\ts := fmt.Sprintf(\"http://%s%s\", addr, path)\n\tif params == nil {\n\t\treturn s\n\t}\n\tp := params.Encode()\n\treturn fmt.Sprintf(\"%s?%s\", s, p)\n}", "func (session *Session) getUrl(path string, params Params) string {\n\tbuf := &bytes.Buffer{}\n\tbuf.WriteString(path)\n\n\tif params != nil {\n\t\tbuf.WriteRune('?')\n\n\t\tparams.EncodeUrlParam(buf)\n\t}\n\n\treturn buf.String()\n}", "func (n *Repository) generateURL(ar *Artifact, endOfFile string) string {\n\tg := strings.Replace(ar.GroupID, dot, slash, allReplacement)\n\tnameOfFile := fmt.Sprint(slash, ar.ArtifactID, dash, ar.Version, dot, endOfFile)\n\treturn fmt.Sprint(n.url, slash, g, slash, ar.ArtifactID, slash, ar.Version, nameOfFile)\n}", "func URLGen(uri string) string {\n\tconfig, err := generalConfigLoad()\n\tif err != nil {\n\t\tfmt.Println(\"URLGen:\", err)\n\t\tos.Exit(1)\n\t}\n\turl := config.Scheme + \"://\" + config.Dstip + uri\n\n\treturn url\n}", "func (env Environment) generateURL(version string, path string) string {\n\treturn string(env) + version + path\n}", "func GenURL(cdi *container.Registry, resource string, v ...interface{}) string {\n\n\tif cdi == nil {\n\t\tif len(v) == 0 {\n\t\t\treturn resource\n\t\t}\n\t\treturn fmt.Sprintf(resource, v...)\n\t}\n\n\turLer := GetURLGen(cdi)\n\tif urLer == nil {\n\t\tif len(v) == 0 {\n\t\t\treturn resource\n\t\t}\n\t\treturn fmt.Sprintf(resource, v...)\n\t}\n\n\treturn urLer.URL(resource, v...)\n}", "func GetURL(authUser AuthUser) (string, string) {\n\tstate := RandomString(30)\n\n\t//.AuthCodeURL generates the url the user visits to authorize access to our app:\n\treturn authUser.config.AuthCodeURL(state), state\n}", "func (_this *Report) Url() string {\n\tvar ret string\n\tvalue := _this.Value_JS.Get(\"url\")\n\tret = (value).String()\n\treturn ret\n}", "func (d dvdStockists) GetURL(req *http.Request, s *site) *url.URL {\n\treturn makeURL(d, req, s, []string{\"name\", d.DVD.Name})\n}", "func getURL(u *UseCase, ctx context.Context, shortCode string) (*database.URL, error) {\n\n\tgetPopularURL, err := u.RedisRepo.Get(ctx, shortCode)\n\tif err != nil {\n\t\treturn nil, errors.New(ErrorGeneric)\n\t}\n\n\tif getPopularURL != nil {\n\t\tresponse := new(database.URL)\n\t\terr = json.Unmarshal([]byte(*getPopularURL), response)\n\t\tif err != nil {\n\t\t\treturn nil, errors.New(ErrorGeneric)\n\t\t}\n\t\treturn response, nil\n\t}\n\n\tresponse, err := u.DatabaseRepo.GetURL(shortCode)\n\tif err != nil {\n\t\treturn nil, errors.New(ErrorGeneric)\n\t}\n\n\tif response == nil {\n\t\treturn nil, errors.New(ErrorRecordNotFound)\n\t}\n\n\treturn response, nil\n\n}", "func (pf *PostFile) GetUrl() string {\n\tif pf.Url != \"\" {\n\t\treturn pf.Url\n\t}\n\n\tlog.Printf(\"WARN: post.file.url not set\\n\")\n\n\tif pf.MD5 == \"\" {\n\t\treturn \"\"\n\t}\n\n\turl := \"https://static1.e621.net/data/\"\n\n\tmd5 := pf.MD5\n\n\turl = url + md5[0:2] + \"/\" + md5[2:4] + \"/\" + md5 + \".\" + pf.Ext\n\treturn url\n}", "func getUrl(basicUrl string, apiKey string, ticker string) string {\n\turlWithTicker := strings.Replace(basicUrl, \"__SYMBOL__\", ticker, 1)\n\turlWithKey := strings.Replace(urlWithTicker, \"__APIKEY__\", apiKey, 1)\n\n\treturn urlWithKey\n}", "func (o BuildSpecSourceOutput) Url() pulumi.StringOutput {\n\treturn o.ApplyT(func(v BuildSpecSource) string { return v.Url }).(pulumi.StringOutput)\n}", "func (h *Harness) URL() string {\n\treturn h.backend.URL()\n}", "func (doc Document) URL() string {\n\treturn fmt.Sprintf(\"https://www.wiso-net.de/document/%s\", doc.SourceAndID())\n}", "func GetURL(index int) (url string, err error) {\n\tclient := getClient()\n\turl, err = client.Get(\"url\" + string(index)).Result()\n\treturn\n}", "func (g *GitLab) URL() string {\n\treturn g.url\n}", "func (b *BaseProvider) GetURL(u string) (string, []byte, error) {\n\tglog.Infof(\"Getting URL %s\", u)\n\tresp, err := b.Client.Get(u)\n\tif err != nil {\n\t\treturn \"\", nil, err\n\t}\n\n\tfilename := \"\"\n\tcontHeader := resp.Header.Get(\"Content-Disposition\")\n\tres := strings.Split(contHeader, \"; \")\n\tfor _, res := range res {\n\t\tif strings.HasPrefix(res, \"filename=\") {\n\t\t\tfilename = strings.Split(res, \"=\")[1]\n\t\t}\n\t}\n\tfilename = strings.Trim(filename, \"\\\"\")\n\n\tdefer resp.Body.Close()\n\tcontent, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn filename, content, err\n\t}\n\treturn filename, content, nil\n}", "func GenerateURL(base, fileName, value string) (string, error) {\n\tu, err := url.Parse(base)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tq := u.Query()\n\tq.Set(\"filename\", fileName)\n\tq.Set(\"value\", value)\n\tu.RawQuery = q.Encode()\n\n\treturn u.String(), nil\n}", "func (s Shop) GetURL(baseURL string) (result string) {\n\tfullURL, err := url.Parse(baseURL)\n\tif err != nil {\n\t\tlog.Println(\"[Model/Shop] Error parsing baseURL: \", err.Error())\n\t\treturn\n\t}\n\tfullURL.Path += fmt.Sprintf(\"/%v/%s\", s.ID, strings.Replace(s.Name, \" \", \"-\", -1))\n\tresult = fullURL.String()\n\treturn\n}", "func (p *FileInf) getURL() (*url.URL, error) {\n\tu, err := url.Parse(driveapiurl)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tu.Path = path.Join(u.Path, p.PermissionInfo.FileID)\n\tu.Path = path.Join(u.Path, \"permissions\")\n\treturn u, nil\n}", "func (m *LogicAppTriggerEndpointConfiguration) GetUrl()(*string) {\n val, err := m.GetBackingStore().Get(\"url\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*string)\n }\n return nil\n}", "func (dash *Dashboard) GenerateUrl() string {\n\treturn GetDashboardUrl(dash.Uid, dash.Slug)\n}", "func GetURL(kind string, token string) string {\n\treturn fmt.Sprintf(\"%s%s?token=%s\", \"https://api.clubhouse.io/api/v2/\", kind, token)\n}", "func (h *handler) GetURL(response http.ResponseWriter, request *http.Request) {\n\tvars := mux.Vars(request)\n\tpath := vars[\"path\"]\n\n\tlog.Infof(\"Request to obtain URL associated with path '%s'\", path)\n\n\tif dest, ok := h.store.Get(path); ok {\n\t\tencoder := json.NewEncoder(response)\n\t\tencoder.Encode(urlMapping{path, dest})\n\t\treturn\n\t}\n\n\thttp.NotFound(response, request)\n}", "func GetUrl(c Category) string {\n\treturn URL + c.Type\n}", "func (r *ImageReprGQL) URL(ctx context.Context) *string {\n\tres := \"\"\n\treturn &res\n}", "func (r *EnableNotificationRequest) GenerateUrl() (string, url.Values, error) {\n\n\tbaseUrl, _, err := r.Identifier.GenerateUrl()\n\n\tif err != nil {\n\t\treturn \"\" , nil, err\n\t}\n\n\tif r.Identifier.RuleID == \"\" {\n\t\treturn \"\", nil, errors.New(\"Rule ID should be provided for enable action\")\n\t}\n\n\tbaseUrl += \"/\" + r.Identifier.RuleID + \"/enable\"\n\n\treturn baseUrl, nil, nil\n}", "func (c client) URL() string {\n\treturn c.url\n}", "func (conf FileConfiguration) URL() string {\n\treturn \"\"\n}", "func (cc *CloneCommand) URL() string {\n\treturn fmt.Sprintf(\"%s%s.git\", cc.BaseURL(), cc.GistID)\n}", "func (r *Response) URL() string {\n\treturn r.host + \"/convert/p3r68-cdx67/\" + r.Filename\n}", "func (q *QRCode) GetURL() string {\n\treturn os.Getenv(\"QRCODE_BASE_URL\") + q.ID\n}", "func (f *Feed) URL() string { return f.url }", "func gcUrl(path string) string {\n\treturn fmt.Sprintf(\"%s%s\", baseUrl, path)\n}", "func (c *client) URL() string {\n\treturn c.rawurl\n}", "func (o *LDAPIdentityProvider) URL() string {\n\tif o != nil && o.bitmap_&2 != 0 {\n\t\treturn o.url\n\t}\n\treturn \"\"\n}", "func (m *TelecomExpenseManagementPartner) GetUrl()(*string) {\n val, err := m.GetBackingStore().Get(\"url\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*string)\n }\n return nil\n}", "func GenerateUrl() string {\n\treturn FbConfig.AuthCodeURL(\"state\")\n}", "func (d *TestDex) URL() string {\n\treturn d.webServer.URL\n}", "func GetUrl(ctx *pulumi.Context) string {\n\treturn config.Get(ctx, \"grafana:url\")\n}", "func (o *Object) url() string {\n\treturn o.fs.url(o.remote)\n}", "func (p *Base) URL() string {\n\treturn \"https://thebase.in\"\n}", "func (i *instance) url() string {\n\treturn i.apiURL\n}", "func (c *CaptchaGID) URL() string {\n\tif *c == \"\" || *c == \"-1\" {\n\t\treturn \"\"\n\t}\n\treturn APIEndpoints.CommunityBase.String() + \"/public/captcha.php?gid=\" + c.String()\n}", "func (tw tweets) GetURL(req *http.Request, s *site) *url.URL {\n\treturn makeURL(tw, req, s, nil)\n}", "func getURL(host string, port int, ssl bool) string {\n\tif ssl {\n\t\treturn fmt.Sprintf(\"https://%s:%v\", host, port)\n\t} else {\n\t\treturn fmt.Sprintf(\"http://%s:%v\", host, port)\n\t}\n}", "func (d *Driver) GetURL() (string, error) {\n\treturn \"\", nil\n}", "func getURL(company string) (URL string, err error) {\n\tif val, found := stocksURL[strings.ToLower(company)]; found {\n\t\tURL = baseURL + \"/\" + val.Company + \"/\" + val.Symbol + \"/daily\"\n\t\treturn\n\t}\n\treturn \"\", fmt.Errorf(\"Company Not Found\")\n}", "func (s *Service) generateUrl(apiMethod string, arguments map[string]string) string {\n\tvalues := url.Values{}\n\tfor key, value := range arguments {\n\t\tvalues.Set(key, value)\n\t}\n\tvalues.Set(\"api_key\", s.ApiKey)\n\ttimestamp := fmt.Sprintf(\"%d\", time.Now().Unix())\n\tvalues.Set(\"ts\", timestamp)\n\thash := sha1.New()\n\tio.WriteString(hash, s.SharedSecret+timestamp)\n\tvalues.Set(\"hash\", fmt.Sprintf(\"%x\", hash.Sum(nil)))\n\treturn apiUrl + \"/\" + apiMethod + \"?\" + values.Encode()\n}", "func (o ApiLicenseOutput) Url() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ApiLicense) *string { return v.Url }).(pulumi.StringPtrOutput)\n}", "func (i Internet) URL() string {\n\turl := i.Faker.RandomStringElement(urlFormats)\n\n\t// {{domain}}\n\turl = strings.Replace(url, \"{{domain}}\", i.Domain(), 1)\n\n\t// {{slug}}\n\turl = strings.Replace(url, \"{{slug}}\", i.Slug(), 1)\n\n\treturn url\n}", "func (se *SharedElement) URL() string {\n\treturn \"#\" + se.ID()\n}", "func (se *SharedElement) URL() string {\n\treturn \"#\" + se.ID()\n}", "func (t *Toon) URL() string {\n\treturn fmt.Sprintf(\"%s/%d/%d/%d\", \"https://starcraft2.com/en-us/en/profile\", t.RegionID(), t.RealmID(), t.ID())\n}", "func getFinalUrl(rawurl string, client *http.Client) (string, error) {\n\turl, err := parseUrl(rawurl)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treq.Header.Set(\"User-Agent\", \"Mozilla\")\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer resp.Body.Close()\n\treturn fmt.Sprint(resp.Request.URL), nil\n}", "func (e *Endpoint) URL() string {\n\treturn e.url\n}", "func (r *Rietveld) Url(issueID int64) string {\n\tif issueID == 0 {\n\t\treturn r.url\n\t}\n\treturn fmt.Sprintf(\"%s/%d\", r.url, issueID)\n}", "func (e *EntityChat) GetURL() string {\n\treturn fmt.Sprintf(\n\t\t\"%s%s%d/%s\",\n\t\tglobalURLPrefix,\n\t\tclientPrefix,\n\t\te.Chat,\n\t\te.Token,\n\t)\n}", "func (u exampleRemoteData) GetURL() string {\n\treturn \"https://randomuser.me/api/\"\n}", "func (o *CreateEventPayloadActions) GetUrl() string {\n\tif o == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\n\treturn o.Url\n}", "func (o BackendProxyOutput) Url() pulumi.StringOutput {\n\treturn o.ApplyT(func(v BackendProxy) string { return v.Url }).(pulumi.StringOutput)\n}", "func (s *UAA) URL() string {\n\treturn s.server.URL\n}", "func (b *Brand) GetURL() string {\n\tslug := utility.Parameterize(b.Title[:utility.Min(30, len(b.Title))])\n\tif len(slug) == 0 {\n\t\tslug = \"empty\"\n\t}\n\treturn fmt.Sprintf(\"/brands/%d/%s\", b.ID, slug)\n}", "func (driver *DBClient) GenerateShortURL(w http.ResponseWriter, r *http.Request) {\n\tvar id int\n\tvar record Record\n\tpostBody, _ := ioutil.ReadAll(r.Body)\n\tjson.Unmarshal(postBody, &record)\n\terr := driver.db.QueryRow(\"INSERT INTO web_url(url) VALUES($1) RETURNING ide, record.URL\").Scan(&id)\n\n\tresponseMap := map[string]interface{}{\"encoded_string\": base62.Encode(id)}\n\n\tif err != nil {\n\t\tw.Write([]byte(err.Error()))\n\t} else {\n\t\tresponse, _ := json.Marshal(responseMap)\n\t\tw.Write(response)\n\t}\n}", "func (c Comic) URL() string {\n\treturn \"https://xkcd.com/\" + c.ID()\n}", "func (ep *Endpoint) URL() (string, error) {\n\tbaseURL, err := deploys.GetDownloadURL(ep.Version)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn baseURL + \"/\" + ep.Resource + \".json\", nil\n}", "func (o WebhookOutput) Url() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *Webhook) pulumi.StringOutput { return v.Url }).(pulumi.StringOutput)\n}", "func (l *PlexLogger) getURL() url.URL {\n\tif l.plexURL == nil {\n\t\tu, _ := url.Parse(\"http://127.0.0.1:32400/log\")\n\t\treturn *u\n\t}\n\treturn *l.plexURL\n}", "func (o ApplicationSpecRolloutplanRolloutbatchesBatchrolloutwebhooksOutput) Url() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ApplicationSpecRolloutplanRolloutbatchesBatchrolloutwebhooks) string { return v.Url }).(pulumi.StringOutput)\n}", "func (g *Gravatar) GetURL(email string) string {\n\thashedEmail := emailToHash(email)\n\tqueryString := g.encodeParameters()\n\n\tgravURL := g.gravatarURL + hashedEmail + queryString\n\n\treturn gravURL\n}", "func (dm *Datamuse) URL() string {\n\treturn dm.apiURL.String()\n}", "func GetURL(rel string) string {\n\treturn StringConcat(StringConcat(BaseURL, \"/\"), rel)\n}", "func (c *Campaigner) GenerateURL(url string) string {\n\tif strings.HasPrefix(url, \"/\") {\n\t\turl = strings.Replace(url, \"/\", \"\", 1)\n\t}\n\n\turl = fmt.Sprintf(\"%s/%s\", c.BaseURL, url)\n\n\treturn url\n}", "func (dash *Dashboard) GetUrl() string {\n\treturn GetDashboardFolderUrl(dash.IsFolder, dash.Uid, dash.Slug)\n}", "func (o ApplicationSpecRolloutplanRolloutwebhooksOutput) Url() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ApplicationSpecRolloutplanRolloutwebhooks) string { return v.Url }).(pulumi.StringOutput)\n}", "func (c *Client) URL() string {\n\treturn c.url\n}", "func (o CatalogOutput) Url() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *Catalog) pulumi.StringOutput { return v.Url }).(pulumi.StringOutput)\n}", "func (it *Item) URL() string { return it.url }", "func (i *Image) URL() string {\n\treturn fmt.Sprintf(\"projects/%s/global/images/%s\", i.Project, i.Name)\n}", "func getCloneURL(r repo, secret string) string {\n\tif !r.Private {\n\t\treturn r.URL\n\t}\n\tu, err := url.Parse(r.URL)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\tu.User = url.User(secret)\n\treturn u.String()\n}", "func (config *TOTPConfig) generateURL(label string) string {\n\tsecret := config.ExportKey()\n\tu := url.URL{}\n\tv := url.Values{}\n\tu.Scheme = \"otpauth\"\n\tu.Host = \"totp\"\n\tu.Path = label\n\tv.Add(\"secret\", secret)\n\tif config.Size != totpDefaultDigits {\n\t\tv.Add(\"digits\", fmt.Sprintf(\"%d\", config.Size))\n\t}\n\n\t// If other hash algorithms become supported in Google\n\t// Authenticator, enable these.\n\t// switch {\n\t// case config.Algo == crypto.SHA256:\n\t// \tv.Add(\"algorithm\", \"SHA256\")\n\t// case config.Algo == crypto.SHA512:\n\t// \tv.Add(\"algorithm\", \"SHA512\")\n\t// }\n\n\tif config.Provider != \"\" {\n\t\tv.Add(\"provider\", config.Provider)\n\t}\n\n\tu.RawQuery = v.Encode()\n\treturn u.String()\n}", "func (s *SlicingDice) getFullUrl(path string) string {\n\tif len(sd_base) != 0 {\n\t\treturn sd_base + path\n\t} \n\treturn \"https://api.slicingdice.com/v1\" + path\n}", "func (o BuildRunStatusBuildSpecSourceOutput) Url() pulumi.StringOutput {\n\treturn o.ApplyT(func(v BuildRunStatusBuildSpecSource) string { return v.Url }).(pulumi.StringOutput)\n}", "func getURL(tl string) string {\n\treturn fmt.Sprintf(\"https://translate.google.com/translate_a/single?client=at&dt=t&dt=ld&dt=qca&dt=rm&dt=bd&dj=1&hl=%s&ie=UTF-8&oe=UTF-8&inputm=2&otf=2&iid=1dd3b944-fa62-4b55-b330-74909a99969e\", tl)\n}", "func (sp *SessionProxy) URL() string { return sp.GetSession().URL() }", "func (b Book) URL() string {\n\treturn fmt.Sprintf(\"https://www.goodreads.com/book/show/%d\", b.ID)\n}", "func (e EndpointGateway) GetURL(ctx context.Context) (url string, err error) {\n\tvar urlStruct struct {\n\t\tURL string `json:\"url\"`\n\t}\n\treturn urlStruct.URL, e.doMethod(ctx, \"GET\", nil, &urlStruct)\n}", "func (k *Key) URL() string {\n\treturn k.url.String()\n}", "func URL(opts ...options.OptionFunc) string {\n\treturn singleFakeData(URLTag, func() interface{} {\n\t\topt := options.BuildOptions(opts)\n\t\ti := Internet{fakerOption: *opt}\n\t\tu, err := i.url()\n\t\tif err != nil {\n\t\t\tpanic(err.Error())\n\t\t}\n\t\treturn u\n\t}, opts...).(string)\n}", "func (a *Accessor) genURLString(epID EntryPointID, pathTo string, q *Query) (string, error) {\n\tu, err := a.genBaseURL(epID)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tu.Path = path.Join(u.Path, pathTo)\n\tif q != nil {\n\t\tq.SetToURL(u)\n\t}\n\treturn u.String(), nil\n}" ]
[ "0.695418", "0.6780465", "0.67709434", "0.6749998", "0.6744279", "0.67276794", "0.6697484", "0.6657503", "0.66535795", "0.66500205", "0.6645439", "0.66204995", "0.6618371", "0.6612726", "0.6570689", "0.6536291", "0.65258366", "0.6483786", "0.6464578", "0.64532846", "0.64516574", "0.64486134", "0.64456105", "0.644054", "0.64398074", "0.64394164", "0.64248496", "0.6424229", "0.63872564", "0.6358302", "0.6351773", "0.6338287", "0.63331693", "0.6323749", "0.63166165", "0.63161254", "0.6307755", "0.62985784", "0.6285958", "0.6275447", "0.6273195", "0.62729526", "0.6267182", "0.626713", "0.62649137", "0.6257761", "0.6252451", "0.62456757", "0.62449783", "0.6239885", "0.6224925", "0.6220559", "0.62150997", "0.62092966", "0.61992884", "0.61986357", "0.618844", "0.61804", "0.61797786", "0.6178685", "0.61757064", "0.617357", "0.617357", "0.6142378", "0.61394835", "0.6137328", "0.6136477", "0.61316144", "0.61255074", "0.61167026", "0.6109819", "0.60965824", "0.609055", "0.6086783", "0.6071197", "0.60651827", "0.6064746", "0.6063438", "0.60595983", "0.6058254", "0.6054514", "0.6050249", "0.60455745", "0.6043687", "0.6036831", "0.60346794", "0.60321623", "0.6029632", "0.6027885", "0.6026329", "0.60232437", "0.6021577", "0.6020345", "0.60183936", "0.60165423", "0.6016448", "0.60145557", "0.6013215", "0.60119987", "0.6005412" ]
0.60749775
74
save saving current page to filesystem
func (p *Page) save(d *Dir) error { p.Sidebar = d.sidebar p.Items = d.pages file, err := os.Create(p.Path) if (err != nil) { return err } fmt.Printf("Create new page: %s\n \tby link:%s\n", p.Title, p.Path) return p.render(file) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (p *Page) save() error {\n\tbody := fmt.Sprintf(\"<!-- Ingredients -->\\n%s\\n<!-- Instructions -->\\n%s\", p.Ingredients, p.Instructions)\n\treturn ioutil.WriteFile(filepath.Join(pagesDir, p.Filename+\".txt\"), []byte(body), 0600)\n}", "func (p *Page) save() error {\n\tfilename := \"./pages/\" + p.Title + \".txt\"\n\treturn ioutil.WriteFile(filename, p.Body, 0600)\n}", "func (page *Page) save() error {\n\tfilename := page.Title + \".txt\"\n\treturn ioutil.WriteFile(filename, page.Body, 0600)\n}", "func (p *Page) save() error {\n filename := p.Title + \".txt\"\n return ioutil.WriteFile(filename, p.Body, 0600)\n}", "func (p *Page) save() error {\n\tfilename := \"data/\" + p.Title + \".txt\"\n\treturn ioutil.WriteFile(filename, p.Body, 0600)\n}", "func (p *Page) save() error {\n\tfilename := p.Title + \".txt\"\n\treturn ioutil.WriteFile(filename, p.Body, 0600)\t// 0600 = r-w permissions for current user only\n}", "func (p *Page) save() error {\r\n\tfilename := p.Title + \".txt\"\r\n\treturn ioutil.WriteFile(filename, p.Body, 0600)\r\n}", "func (p *Page) save() error {\n\tfilename := p.Title + \".txt\"\n\treturn ioutil.WriteFile(filename, p.Body, 0600)\n}", "func (p *Page) save() error {\r\n\tfilename := p.Title\r\n\t//writes data to a file named by filename, the 0600 indicates that the file should be created with\r\n\t//read and write permissions for the user\r\n\treturn ioutil.WriteFile(filename, p.Body, 0600)\r\n}", "func (m *Mirror) save(link *url.URL, page *html.Node) {\n\tpath := m.buildLocalPath(link)\n\tdir := filepath.Dir(path)\n\tif err := os.MkdirAll(dir, 0777); err != nil {\n\t\tlog.Fatalf(\"creating dir %q: %v\", dir, err)\n\t}\n\n\tfile, err := os.Create(path)\n\tif err != nil {\n\t\tlog.Fatalf(\"creating file %q: %v\", path, err)\n\t}\n\tdefer file.Close()\n\n\terr = html.Render(file, page)\n\tif err != nil {\n\t\tlog.Fatalf(\"copying to %q: %v\", path, err)\n\t}\n}", "func savePage(url url.URL, body []byte) bool{\n\t// TODO: Take save location as a CMD line flag\n\trootDir := \"/tmp/scraper\"\n\n\tdirPath := rootDir + \"/\" + url.Host + url.Path\n\n\terr := os.MkdirAll(dirPath, 0777)\n\tif err != nil {\n\t\tlog.Printf(\"Cannot create directory %s. \\nError: %s\", dirPath, err)\n\t\treturn false\n\t}\n\n\tfilePath := dirPath + \"/index.html\"\n\n\terr = ioutil.WriteFile(filePath, body, 0777)\n\tif err != nil {\n\t\tlog.Printf(\"Cannot write to file=%s. \\nError: %s\", filePath, err)\n\t\treturn false\n\t}\n\treturn true\n}", "func saveHandler(w http.ResponseWriter, r *http.Request) {\n\ttitle, err := getTitle(w, r)\n\tif err != nil {\n\t\treturn\n\t}\n\tbody := r.FormValue(\"body\")\t// Get the page content. It is of type string - we must convert it to []byte before it will fit into the Page struct.ß\n\tp := &Page{Title: title, Body: []byte(body)}\n\terr = p.save()\t// Write the data to a file\n\t// An error that occurs during p.save() will be reported to the user\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\thttp.Redirect(w, r, \"/view/\"+title, http.StatusFound)\n}", "func saveWebPage(cr *Crawler, url, extension string, content []byte) {\n\tname := strings.Replace(strings.Replace(url, \":\", \"*\", -1), \"/\", \"!\", -1)\n\tfilename := cr.destinationPath + \"/\" + name + \".\" + extension\n\texisting := cr.destinationPath + \"/\" + name\n\tif extension == \"done\" {\n\t\texisting += \".saved\"\n\t} else {\n\t\tif extension == \"saved\" {\n\t\t\texisting += \".ready\"\n\t\t}\n\t\tfile, err := os.Create(existing)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tfile.Write(content)\n\t\tfile.Close()\n\t}\n\tif err := os.Rename(existing, filename); err != nil {\n\t\tpanic(err)\n\t}\n}", "func saveHandler (w http.ResponseWriter, r *http.Request) {\n\n\t//on sauvegarde dans un fichier\n\tp := &PageTablature{Titre:r.FormValue(\"titre\"), Tab:[]byte(r.FormValue(\"sandbox\"))}\n\t\n\terr := p.save()\n\t\n\tif err != nil {\n\t\t//redirection vers page de création\n\t\tcreateHandler(w, r)\n\t}\n\n\ttabsHandler(w, r)\n}", "func (m *settings) saveToDisk() error {\n\tb, err := json.Marshal(m.redirects)\n\tif err != nil {\n\t\tlog.Printf(\"Error marshalling %s\", err)\n\t\treturn fmt.Errorf(\"error marshalling %s\", err)\n\t}\n\n\tif err := ioutil.WriteFile(m.filename, b, 0644); err != nil {\n\t\treturn fmt.Errorf(\"unable to open file %s\", err)\n\t}\n\tlog.Printf(\"saving to disk.\")\n\treturn nil\n}", "func (s store) Save() {\n\ts.writeToDisk()\n}", "func saveHandler(w http.ResponseWriter, r *http.Request, title string) {\n\tingredients := r.FormValue(\"ingredients\")\n\tinstructions := r.FormValue(\"instructions\")\n\trecipeTitle := r.FormValue(\"recipeTitle\")\n\n\tfilename := convertTitleToFilename(recipeTitle)\n\n\tp := &Page{\n\t\tTitle: recipeTitle,\n\t\tFilename: filename,\n\t\tIngredients: template.HTML(ingredients),\n\t\tInstructions: template.HTML(instructions)}\n\n\terr := p.save()\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\t// If the filename is different than the title then we are renaming and\n\t// should remove the old file.\n\tif filename != title {\n\t\toldfile := filepath.Join(pagesDir, title+\".txt\")\n\n\t\t// Only proceed with the rename if the old file exists.\n\t\tif _, err := os.Stat(oldfile); err == nil {\n\t\t\tif err := os.Remove(oldfile); err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t}\n\t}\n\n\tupdateIndex()\n\thttp.Redirect(w, r, \"/view/\"+filename, http.StatusFound)\n}", "func saveHandler(w http.ResponseWriter, r *http.Request) {\r\n title := r.URL.Path[len(\"/save/\"):]\r\n body := r.FormValue(\"body\")\r\n p := &Page{Title: title, Body: []byte(body)}\r\n p.save()\r\n http.Redirect(w, r, \"/view/\"+title, http.StatusFound)\r\n}", "func handleSave(w http.ResponseWriter, r *http.Request, title string) {\n\tvar (\n\t\tbody = r.FormValue(\"body\")\n\t\tp = &page.Page{Title: title, Body: []byte(body)}\n\t)\n\tif err := p.Save(); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\n\thttp.Redirect(w, r, pathView+title, http.StatusFound)\n\tlogInfo(p.Title, \"file saved succesfully\")\n}", "func (s *FilesystemStore) save(session *SessionImp) error {\n\tencoded, err := securecookie.EncodeMulti(session.name, session.Values,\n\t\ts.Codecs...)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfilename := filepath.Join(s.path, \"session_\"+session.ID)\n\tfileMutex.Lock()\n\tdefer fileMutex.Unlock()\n\treturn ioutil.WriteFile(filename, []byte(encoded), 0600)\n}", "func (b *Bookmarks) Save() error {\n\treturn b.WriteToFile(b.Filename)\n}", "func (env *Env) Save(res http.ResponseWriter, req *http.Request, title string) {\n\tenv.Log.V(1, \"beginning hanlding of Save.\")\n\ttitle = strings.Replace(strings.Title(title), \" \", \"_\", -1)\n\tbody := []byte(req.FormValue(\"body\"))\n\tpage := &webAppGo.Page{Title: title, Body: body}\n\terr := env.Cache.SaveToCache(page)\n\tif err != nil {\n\t\tenv.Log.V(1, \"notifying client that an internal error occured. Error is associated with Cache.SaveToCache.\")\n\t\thttp.Error(res, err.Error(), 500)\n\t}\n\terr = env.DB.SavePage(page)\n\tif err != nil {\n\t\tenv.Log.V(1, \"notifying client that an internal error occured. Error is associated with Cache.SavePage.\")\n\t\thttp.Error(res, err.Error(), 500)\n\t}\n\tenv.Log.V(1, \"The requested new page was successully saved, redirecting client to /view/PageTitle.\")\n\thttp.Redirect(res, req, \"/view/\"+title, http.StatusFound)\n}", "func SaveFile(name string, content string) {\n\tmyself, error := user.Current()\n\n\tif error != nil {\n\t\tfmt.Println(error)\n\t}\n\n\tfullPath := myself.HomeDir + \"/Documents/Server/\" + name\n\tfmt.Println(\"FILE SAVED AT => \" + fullPath)\n\tioutil.WriteFile(fullPath, []byte(content), 0644)\n}", "func (app *service) Save(state State) error {\n\tjs, err := app.adapter.ToJSON(state)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tchainHash := state.Chain()\n\tindex := state.Height()\n\tpath := filePath(chainHash, index)\n\treturn app.fileService.Save(path, js)\n}", "func saveHandler(w http.ResponseWriter, r *http.Request, title string) {\n\tbody := r.FormValue(\"body\")\n\tp := &Page{Title: title, Body: []byte(body)}\n\terr := p.save()\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\thttp.Redirect(w, r, \"/view/\"+title, http.StatusFound)\n}", "func saveHandler(w http.ResponseWriter, r *http.Request, title string) {\n\tbody := r.FormValue(\"body\")\n\tp := &Page{Title: title, Body: []byte(body)}\n\terr := p.save()\n\tif err != nil {\n\t\tlog.Println(\"saveHandler() error: \", err)\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\thttp.Redirect(w, r, \"/view/\"+title, http.StatusFound)\n}", "func (p *DefaultParser) Save(buf *bytes.Buffer, filename string) error {\n\terr := ioutil.WriteFile(filename, buf.Bytes(), 0644)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (s *FilesystemStore) Save(c *kelly.Context, session *SessionImp) error {\n\t// Delete if max-age is <= 0\n\tif session.Options.MaxAge <= 0 {\n\t\tif err := s.erase(session); err != nil {\n\t\t\treturn err\n\t\t}\n\t\thttp.SetCookie(c, NewCookie(session.name, \"\", session.Options))\n\t\treturn nil\n\t}\n\n\tif session.ID == \"\" {\n\t\t// Because the ID is used in the filename, encode it to\n\t\t// use alphanumeric characters only.\n\t\tsession.ID = strings.TrimRight(\n\t\t\tbase32.StdEncoding.EncodeToString(securecookie.GenerateRandomKey(32)), \"=\")\n\t}\n\tif err := s.save(session); err != nil {\n\t\treturn err\n\t}\n\tencoded, err := securecookie.EncodeMulti(session.name, session.ID,\n\t\ts.Codecs...)\n\tif err != nil {\n\t\treturn err\n\t}\n\thttp.SetCookie(c, NewCookie(session.name, encoded, session.Options))\n\treturn nil\n}", "func (h *Homework) save() (err error) {\n\tbuf, err := json.MarshalIndent(h, \"\", \"\\t\")\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(h.path, buf, 0777)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func savePageMemory_(index int) {\n\tsite := fmt.Sprintf(\"http://c.api.globo.com/news/%s.json\", ufs[index])\n\tfile, err := http.Get(site)\n\t//connection error\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\t//get all data from website\n\tdataByte, err := ioutil.ReadAll(file.Body)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\t//split the data\n\tdataSplit := split(dataByte)\n\tsession := connectionDB()\n\tdefer session.Close()\n\tc := session.DB(\"apidb\").C(\"news\")\n\tvar ufPage UfPage\n\tufPage.Uf = ufs[index]\n\tufPage.Pageuf = make([]Page, len(dataSplit))\n\tfor i := 0; i < len(dataSplit); i++ {\n\t\tufPage.Pageuf[i] = unMarshal([]byte(dataSplit[i]))\n\t}\n\t_, err = c.Upsert(bson.M{\"uf\": ufs[index]}, &ufPage)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}", "func (bh browserHistory) Save() error {\n\tbytes, err := json.Marshal(bh.Records)\n\tif err != nil {\n\t\treturn err\n\t}\n\tjs.Global().Get(\"localStorage\").Set(\"history\", string(bytes))\n\n\treturn nil\n}", "func (fb *FileBackend) save(state *storage.State) error {\n\tout, err := proto.Marshal(state)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to encode state: %w\", err)\n\t}\n\ttmp := fmt.Sprintf(fb.path+\".%v\", time.Now())\n\tif err := ioutil.WriteFile(tmp, out, 0600); err != nil {\n\t\treturn fmt.Errorf(\"failed to write state: %w\", err)\n\t}\n\terr = os.Rename(tmp, fb.path)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to move state: %w\", err)\n\t}\n\treturn nil\n}", "func (s *ServiceState) save() {\n\tlog.Lvl3(\"Saving service\")\n\tb, err := network.Marshal(s.Storage)\n\tif err != nil {\n\t\tlog.Error(\"Couldn't marshal service:\", err)\n\t} else {\n\t\terr = ioutil.WriteFile(s.path+\"/prifi.bin\", b, 0660)\n\t\tif err != nil {\n\t\t\tlog.Error(\"Couldn't save file:\", err)\n\t\t}\n\t}\n}", "func (d *DiskStorage) Save() error {\n\n\tvar file, err = os.OpenFile(d.path, os.O_RDWR, 0644)\n\tif d.isError(err) {\n\t\treturn err\n\t}\n\tdefer file.Close()\n\n\t_, errWrite := file.WriteString(d.String())\n\treturn errWrite\n}", "func (p *Entry) Save() (err error) {\n\tfm, err := frontmatter.Marshal(&p)\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar perm os.FileMode = 0666\n\tif err = ioutil.WriteFile(p.Path, append(fm, p.Body...), perm); err != nil {\n\t\tfmt.Println(\"Dump:\")\n\t\tfmt.Println(string(fm))\n\t\tfmt.Println(string(p.Body))\n\t}\n\treturn err\n}", "func (p *Page) save() error {\n\t// Save to database\n\treturn pages.Insert(p)\n}", "func Save(path string, v interface{}) error {\n\tformatter, err := parsePath(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfile, err := os.OpenFile(path, os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer file.Close()\n\treturn formatter.Encode(file, v)\n}", "func (file *File) Save() {\n\tif file.autoFmt {\n\t\terr := file.Fmt()\n\t\tif err != nil {\n\t\t\tfile.NotifyUser(err.Error())\n\t\t}\n\t}\n\tfile.SnapshotSaved()\n\tcontents := []byte(file.ToString())\n\terr := ioutil.WriteFile(file.Name, contents, file.fileMode)\n\tif err != nil {\n\t\tfile.NotifyUser(\"Save Failed: \" + err.Error())\n\t} else {\n\t\tfile.savedBuffer.ReplaceBuffer(file.buffer.DeepDup())\n\t\tfile.NotifyUser(\"Saved.\")\n\t\tfile.modTime = time.Now()\n\t\tfile.md5sum = md5.Sum(contents)\n\t}\n}", "func (wlt *Wallet) Save(dir string) error {\n\tr := NewReadableWallet(*wlt)\n\treturn r.Save(filepath.Join(dir, wlt.GetFilename()))\n}", "func Save(path string, object interface{}) error {\n\tfile, err := os.Create(path)\n\tif err == nil {\n\t\tencoder := gob.NewEncoder(file)\n\t\tencoder.Encode(object)\n\t}\n\tfile.Close()\n\treturn err\n}", "func Save(path string, v interface{}, opts Options) (err error) {\n\tvar (\n\t\tfile *os.File\n\t\ttmp = path + \".tmp.\" + cmn.GenTie()\n\t)\n\tif file, err = cmn.CreateFile(tmp); err != nil {\n\t\treturn\n\t}\n\tdefer func() {\n\t\t_ = file.Close()\n\t\tif err != nil {\n\t\t\tos.Remove(tmp)\n\t\t}\n\t}()\n\tif err = Encode(file, v, opts); err != nil {\n\t\treturn\n\t}\n\tif err = file.Close(); err != nil {\n\t\treturn\n\t}\n\terr = os.Rename(tmp, path)\n\treturn\n}", "func Save(path string, object interface{}) error {\n\tfile, err := os.Create(path)\n\tdefer file.Close()\n\tif err == nil {\n\t\tencoder := gob.NewEncoder(file)\n\t\tencoder.Encode(object)\n\t}\n\treturn err\n}", "func Save(filePath string, content []byte) error {\n\tkh, err := KymaHome()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Could not save file\")\n\t}\n\n\tfilePath = fp.Join(kh, filePath)\n\n\terr = os.MkdirAll(fp.Dir(filePath), 0700)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Could not save file\")\n\t}\n\n\tif err = ioutil.WriteFile(filePath, content, 0644); err != nil {\n\t\treturn errors.Wrap(err, \"Could not save file\")\n\t}\n\treturn nil\n}", "func handlerSave(w http.ResponseWriter, r *http.Request, title string) {\r\n\tbody := r.FormValue(\"body\")\r\n\tp := &Page{Title: title, Body: []byte(body)}\r\n\terr := p.save()\r\n\tif err != nil {\r\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\r\n\t\tos.Exit(1)\r\n\t}\r\n\thttp.Redirect(w, r, \"/view/\"+p.Title, http.StatusFound)\r\n}", "func (d *Diff) Save(folder string) error {\n\tif err := os.MkdirAll(folder, 0755); err != nil {\n\t\treturn err\n\t}\n\n\tfname := filepath.Join(folder, fmt.Sprintf(\"%s.md\", d.Title))\n\tlog.Infof(\"Creating diff file: %s\", fname)\n\tf, err := os.Create(fname)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\n\t_, err = f.WriteString(d.String())\n\n\treturn err\n}", "func (a *Network) Save(path string) {\n\tioutil.WriteFile(path, []byte(a.outputFormat()), 0666)\n}", "func Save() error {\n\treturn nil\n}", "func (b *bookMark) save(event win_eventlog.EvtHandle) error {\n\tnewBookmark, err := win_eventlog.UpdateBookmark(b.handle, event, b.buf)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err := b.file.Truncate(0); err != nil {\n\t\treturn err\n\t}\n\tif _, err := b.file.Seek(0, 0); err != nil {\n\t\treturn err\n\t}\n\t_, err = b.file.WriteString(newBookmark)\n\treturn err\n}", "func (c *Passward) Save() error {\n\n\tif !util.DirectoryExists(c.Path) {\n\t\tif err := os.MkdirAll(c.Path, 0700); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tfile, err := os.Create(c.configPath())\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer file.Close()\n\tif err := toml.NewEncoder(file).Encode(c); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (f *File) Save(name string) error {\n\treturn ioutil.WriteFile(name, []byte(f.String()), 0666)\n}", "func (s *Store) Save() error {\n\tbk, err := os.OpenFile(filepath.Join(s.rwDirPath, storeBkName), os.O_RDWR|os.O_CREATE, 0666)\n\tif err != nil {\n\t\treturn errors.Wrap(err, err.Error())\n\t}\n\tdefer bk.Close()\n\n\tdst, err := os.OpenFile(filepath.Join(s.rwDirPath, storeName), os.O_RDWR|os.O_CREATE, 0666)\n\tif err != nil {\n\t\treturn errors.Wrap(err, err.Error())\n\t}\n\tdefer dst.Close()\n\n\t// backing up current store\n\t_, err = io.Copy(bk, dst)\n\tif err != nil {\n\t\treturn errors.Wrap(err, err.Error())\n\t}\n\n\tenc := gob.NewEncoder(dst)\n\tbook := s.Clone()\n\terr = enc.Encode(book)\n\tif err != nil {\n\t\treturn errors.Wrap(err, err.Error())\n\t}\n\treturn nil\n}", "func (api *api) save() {\n\tr := map[string]string{}\n\tfor k, v := range api.Manager.pathes {\n\t\tr[k] = v.content\n\t}\n\tdata, err := json.MarshalIndent(r, \"\", \" \")\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\terr = ioutil.WriteFile(api.ConfigFile, data, 0755)\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n}", "func save(bytes []byte, fh *os.File) error {\n\t_, err := fh.Write(bytes)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = fh.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func Save(path string, v interface{}) error {\n\tlock.Lock()\n\tdefer lock.Unlock()\n\tf, err := os.Create(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\tr, err := Marshal(v)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = io.Copy(f, r)\n\treturn err\n}", "func Save(path string, v interface{}) error {\n\tlock.Lock()\n\tdefer lock.Unlock()\n\tf, err := os.Create(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\tr, err := Marshal(v)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = io.Copy(f, r)\n\treturn err\n}", "func Save(path string, v interface{}) error {\n\tlock.Lock()\n\tdefer lock.Unlock()\n\tf, err := os.Create(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\tr, err := Marshal(v)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = io.Copy(f, r)\n\treturn err\n}", "func (file *File) Save() error {\n\tif file.path == \":memory:\" {\n\t\treturn nil\n\t}\n\n\t// Maybe an easier way is to use ioutil.WriteFile\n\tfp, err := os.OpenFile(file.path, os.O_TRUNC|os.O_WRONLY|os.O_CREATE, 0644)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer fp.Close()\n\n\tif len(file.cache) == 0 {\n\t\tfp.WriteString(\"\\n\")\n\t\treturn nil\n\t}\n\n\tfor _, alias := range file.cache {\n\t\tfp.WriteString(alias.String() + \"\\n\")\n\t}\n\n\treturn nil\n}", "func Save(fileName string, dst interface{}) error {\n\t// Create all directories\n\tif err := os.MkdirAll(filepath.Dir(fileName), os.ModePerm); err != nil {\n\t\treturn err\n\t}\n\tfile, err := os.Create(fileName)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer file.Close()\n\tif err == nil {\n\t\tencoder := gob.NewEncoder(file)\n\t\tif err = encoder.Encode(dst); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (app *service) Save(genesis Genesis) error {\n\tjs, err := app.adapter.ToJSON(genesis)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn app.fileService.Save(app.fileNameWithExt, js)\n}", "func (ns *NodeStore) save() error {\r\n\tb, err := json.Marshal(ns)\r\n\tif err != nil {\r\n\t\treturn err\r\n\t}\r\n\terr = ioutil.WriteFile(\"nodestore.json\", b, 0660)\r\n\tif err != nil {\r\n\t\treturn err\r\n\t}\r\n\treturn nil\r\n}", "func (p *Page) Save() error {\n\terr := p.requestHTML()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err = db.Update(func(tx *bolt.Tx) error {\n\t\terr := p.txDelete(tx)\n\t\tif err != nil && err != ErrPageNotFound {\n\t\t\treturn err\n\t\t}\n\t\treturn p.txPut(tx)\n\t}); err != nil {\n\t\treturn err\n\t}\n\n\tfor k, v := range p.Images {\n\t\tf, err := os.Open(path.Join(opts.RepoDir, p.ID, k))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// upload the high res image\n\t\tctx := context.Background()\n\t\tw := storageBucket.Object(v.ID).NewWriter(ctx)\n\t\tw.ACL = []storage.ACLRule{{Entity: storage.AllUsers, Role: storage.RoleReader}}\n\t\tw.ContentType = \"image/jpeg\"\n\t\tw.CacheControl = fmt.Sprintf(\"public, max-age=%d\", storageMaxAge)\n\n\t\tif _, err = io.Copy(w, f); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tf.Close()\n\t\tw.Close()\n\n\t\t// upload the preview image\n\t\tr := bufio.NewReader(&v.buffer)\n\t\tw = storageBucket.Object(\"preview_\" + v.ID).NewWriter(ctx)\n\t\tw.ACL = []storage.ACLRule{{Entity: storage.AllUsers, Role: storage.RoleReader}}\n\t\tw.ContentType = \"image/jpeg\"\n\t\tw.CacheControl = fmt.Sprintf(\"public, max-age=%d\", storageMaxAge)\n\t\tif _, err = io.Copy(w, r); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tw.Close()\n\t}\n\n\treturn nil\n}", "func (d *Dirs) Save() {\n\tj, err := json.Marshal(*d)\n\tutils.HandleErr(err, \"marshal json\")\n\terr = ioutil.WriteFile(PathToFile, j, 0611)\n\tutils.HandleErr(err, \"write to file\")\n}", "func save(object interface{}, c Configuration) error {\n\tfile, err := os.Create(c.StorageLocation + \"/index.gob\")\n\tif err == nil {\n\t\tencoder := gob.NewEncoder(file)\n\t\tencoder.Encode(object)\n\t} else {\n\t\tpanic(err)\n\t}\n\n\tfile.Close()\n\treturn err\n}", "func (ws *WalletStore) Save() {\n\tvar buffer bytes.Buffer\n\tgob.Register(elliptic.P256())\n\tencoder := gob.NewEncoder(&buffer)\n\terr := encoder.Encode(ws.Wallets)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tfile := ws.Config.GetWalletStoreFile(ws.NodeID)\n\terr = ioutil.WriteFile(file, buffer.Bytes(), 0644)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}", "func (d deck) saveToFile(fileName string) error {\n\treturn ioutil.WriteFile(fileName, []byte(d.toString()), 0776)\n}", "func (bs *BoxStorage) Save() error {\n\n\tb := &bytes.Buffer{}\n\tenc := gob.NewEncoder(b)\n\terr := enc.Encode(bs)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.Println(\"Saving file to\", bs.fileLocation+\"/\"+BoxStorageFileName)\n\n\tfh, err := os.OpenFile(bs.fileLocation+\"/\"+BoxStorageFileName,\n\t\tos.O_CREATE|os.O_WRONLY, 0777)\n\tdefer fh.Close()\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = fh.Write(b.Bytes())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func savingFile(states AllStates, ID string) {\n\tfile, err := os.Create(\"elevator_states.txt\") //Creates file that will only contain latest data\n\t//checks for errors and saves to file as JSON\n\tcheck(err)\n\te := json.NewEncoder(file).Encode(states) //saves the AllStates struct to file\n\tcheck(e)\n}", "func (settings *Settings) Save(path string) error {\n\tcontent, err := json.Marshal(settings)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn ioutil.WriteFile(path, content, 0644)\n}", "func (me fileStore) Save(directory, filename string, buf io.Reader) (string, error) {\n\n\tif filename == \"\" {\n\t\terr := fmt.Errorf(\"filename is incorrect\")\n\t\treturn \"\", err\n\t}\n\n\tpath := me.ImageBasePath\n\tif directory != \"\" {\n\t\tpath += string(os.PathSeparator) + directory\n\t}\n\n\tfsPath, err := utils.CreateDirIfNotExists(me.FileSystem, filepath.Join(me.PublicPath, path))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tfilename = me.FilePrefix + filename\n\n\tlog.Printf(\"Openning file %s\", fsPath+\"/\"+filename)\n\tf, err := me.FileSystem.OpenFile(filepath.Join(fsPath, filename))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tdefer f.Close()\n\tcount, err := io.Copy(f, buf)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif count == 0 {\n\t\treturn \"\", fmt.Errorf(\"file not created, no data to write\")\n\t}\n\n\tlog.Printf(\"%d bytes wrote at %s\", count, fsPath+\"/\"+filename)\n\n\treturn filepath.Join(path, filename), nil\n}", "func (p BlogPost) Save() error {\n\ttemp, err := json.Marshal(p)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(\"www/posts/\"+p.Path+\".json\", temp, 0644)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (s *Siegfried) Save(path string) error {\n\tls := persist.NewLoadSaver(nil)\n\tls.SaveTime(s.C)\n\ts.em.Save(ls)\n\ts.mm.Save(ls)\n\ts.cm.Save(ls)\n\ts.bm.Save(ls)\n\ts.tm.Save(ls)\n\tls.SaveTinyUInt(len(s.ids))\n\tfor _, i := range s.ids {\n\t\ti.Save(ls)\n\t}\n\tif ls.Err != nil {\n\t\treturn ls.Err\n\t}\n\tf, err := os.Create(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\t_, err = f.Write(append(config.Magic(), byte(config.Version()[0]), byte(config.Version()[1])))\n\tif err != nil {\n\t\treturn err\n\t}\n\tz, err := flate.NewWriter(f, 1)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = z.Write(ls.Bytes())\n\tz.Close()\n\treturn err\n}", "func Save() {\n\tdata := Savedata{\n\t\tName: GS.current.name,\n\t\tGamestate: GS.current,\n\t}\n\n\tf, err := json.MarshalIndent(data, \"\", \" \")\n\tcheck(err)\n\tioutil.WriteFile(\"data/savegame.json\", f, 0644)\n}", "func Save(c echo.Context) error {\n\tsess, _ := Get(c)\n\tsess.Options = &sessions.Options{\n\t\tPath: \"/\",\n\t\tHttpOnly: true,\n\t\tMaxAge: 20,\n\t}\n\treturn saveSession(c, sess)\n}", "func (buf *Buf) Save(file string) error {\n\tif file == \"\" {\n\t\treturn errors.New(\"No filename given\")\n\t}\n\n\tbs := []byte(buf.Text())\n\terr := ioutil.WriteFile(file, bs, 0644)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"%s\", err)\n\t}\n\n\tbuf.Name = file\n\tbuf.ClearDirty()\n\treturn nil\n}", "func (w *Worker) save() (n int, err error) {\n\tif w.position == 0 {\n\t\treturn 0, nil\n\t}\n\tif w.fileExists() {\n\t\tn, err = w.fileRoot.Write(w.buffer[0:w.position])\n\t\tif err == nil {\n\t\t\tw.position = 0\n\t\t}\n\t} else {\n\t\tw.errorCallback()\n\t}\n\treturn n, err\n}", "func (d deck) saveTofile(filename string) error {\n\treturn ioutil.WriteFile(filename, []byte(d.toString()), 0666)\n}", "func (repo Repository) Save(file domain.File) error {\n\tfileDir := path.Join(repo.StorageDir, file.Path)\n\terr := os.MkdirAll(fileDir, 0777)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfilePath := path.Join(fileDir, file.Name)\n\tf, err := os.OpenFile(filePath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0777)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = f.Write(file.Data)\n\n\treturn err\n}", "func Save(path string, x interface{}) error {\n\tmtx.Lock()\n\tdefer mtx.Unlock()\n\n\tfile, err := os.Create(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer file.Close()\n\n\tdata, err := toJSON(x)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = io.Copy(file, data)\n\treturn err\n}", "func (p *plugin) saveFile() {\n\tp.m.RLock()\n\terr := util.WriteFile(p.file, p.users, true)\n\tp.m.RUnlock()\n\n\tif err != nil {\n\t\tlog.Println(\"[stats] save:\", err)\n\t}\n}", "func saveFile(savedPath string, res *http.Response) {\n\t// create a file of the given name and in the given path\n\tf, err := os.Create(savedPath)\n\terrCheck(err)\n\tio.Copy(f, res.Body)\n}", "func Save(obj any, file string) error {\n\tfp, err := os.Create(file)\n\tdefer fp.Close()\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn err\n\t}\n\tbw := bufio.NewWriter(fp)\n\terr = Write(obj, bw)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn err\n\t}\n\terr = bw.Flush()\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\treturn err\n}", "func (c Node) Save() error {\n\t// Create node directories if they don't exist yet\n\t_, err := fileutil.MakeDirectory(c.NodeDirectory())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdata, err := json.MarshalIndent(c, \"\", \" \")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn ioutil.WriteFile(\n\t\tc.NodeFile(),\n\t\tdata,\n\t\tos.ModePerm,\n\t)\n}", "func (d deck) saveToFile(fileName string) error{\n\n\t// WriteFile (filename, [] byte, permission)\n\treturn ioutil.WriteFile(fileName, []byte (d.toString()), 0666)\n}", "func (f *File) Save(path string) (err error) {\n\tdefer func() {\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"File.Save(%s): %w\", path, err)\n\t\t}\n\t}()\n\ttarget, err := os.Create(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer func() {\n\t\tif ie := target.Close(); ie != nil {\n\t\t\terr = fmt.Errorf(\"write:%+v close:%w\", err, ie)\n\t\t}\n\t}()\n\terr = f.Write(target)\n\treturn\n}", "func (d deck) saveToFile(filename string) error {\n\treturn ioutil.WriteFile(filename, []byte(d.toString()), 0755)\n}", "func (cm *ClosestMatch) Save(filename string) error {\n\tf, err := os.Create(filename)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\tenc := gob.NewEncoder(f)\n\treturn enc.Encode(cm)\n}", "func (v *View) Save() {\n\t// If this is an empty buffer, ask for a filename\n\tif v.buf.path == \"\" {\n\t\tfilename, canceled := messenger.Prompt(\"Filename: \")\n\t\tif !canceled {\n\t\t\tv.buf.path = filename\n\t\t\tv.buf.name = filename\n\t\t} else {\n\t\t\treturn\n\t\t}\n\t}\n\terr := v.buf.Save()\n\tif err != nil {\n\t\tmessenger.Error(err.Error())\n\t} else {\n\t\tmessenger.Message(\"Saved \" + v.buf.path)\n\t}\n}", "func (c *Command) save() {\n\tif len(c.Args) == 2 {\n\t\tioutil.WriteFile(c.Args[0], []byte(c.Args[1]), os.ModePerm)\n\t}\n\tc.done()\n}", "func (d deck) saveToFile(filename string) error {\n\treturn ioutil.WriteFile(filename, []byte(d.toString()), 0666) //0666 is a default permission\n}", "func (c *cache) Save(path string) (err error) {\n\tc.mu.RLock()\n\tdefer c.mu.RUnlock()\n\n\tvar b bytes.Buffer\n\n\tw := wrapper{\n\t\tChannel: c.channel,\n\t\tLastCheckedAt: c.lastCheckedAt,\n\t\tLatestRelease: c.latestRelease,\n\t\tInvalidVer: c.invalidVer,\n\t}\n\tif c.invalidVer != nil && c.IsCurrentVersionInvalid() == \"\" {\n\t\tw.InvalidVer = nil\n\t}\n\n\tif err = yaml.NewEncoder(&b).Encode(w); err != nil {\n\t\treturn\n\t}\n\n\tvar unlock filemu.UnlockFunc\n\tif unlock, err = filemu.Lock(context.Background(), lockPath()); err != nil {\n\t\treturn\n\t}\n\tdefer func() {\n\t\tif e := unlock(); err == nil {\n\t\t\terr = e\n\t\t}\n\t}()\n\n\t// TODO: os.WriteFile does NOT flush\n\terr = os.WriteFile(path, b.Bytes(), 0o600)\n\n\treturn\n}", "func (p *para) saveFile(res *http.Response) error {\n\tvar err error\n\tp.ContentType = res.Header[\"Content-Type\"][0]\n\terr = p.getFilename(res)\n\tif err = p.getFilename(res); err != nil {\n\t\treturn err\n\t}\n\tfile, err := os.Create(filepath.Join(p.WorkDir, p.Filename))\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = io.Copy(file, res.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfileInfo, err := file.Stat()\n\tif err != nil {\n\t\treturn err\n\t}\n\tfmt.Printf(\"{\\\"Filename\\\": \\\"%s\\\", \\\"Type\\\": \\\"%s\\\", \\\"MimeType\\\": \\\"%s\\\", \\\"FileSize\\\": %d}\\n\", p.Filename, p.Kind, p.ContentType, fileInfo.Size())\n\tdefer func() {\n\t\tfile.Close()\n\t\tres.Body.Close()\n\t}()\n\treturn nil\n}", "func (c *Info) Save() error {\n\tc.RLock()\n\tb, _ := json.MarshalIndent(c, \"\", \"\\t\")\n\tc.RUnlock()\n\n\t// We try to make the file owner by the directory owner, if the file\n\t// doesn't exist.\n\tif _, err := os.Stat(c.path); os.IsNotExist(err) {\n\t\tvar st syscall.Stat_t\n\n\t\t// If the parent directory doesn't exist, we should return an error.\n\t\terr = syscall.Stat(path.Dir(c.path), &st)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// We do this without any form of error handling, if it fails, it\n\t\t// fails. Users can have reason for wanting this.\n\t\tioutil.WriteFile(c.path, []byte(\"\"), 0600)\n\t\tos.Chown(c.path, int(st.Uid), int(st.Gid))\n\t}\n\n\treturn ioutil.WriteFile(c.path, b, 0600)\n}", "func (d deck) saveToFile(filename string) error {\n\treturn ioutil.WriteFile(filename, []byte(d.toString()), 0666)\n}", "func (d deck) saveToFile(filename string) error {\n\treturn ioutil.WriteFile(filename, []byte(d.toString()), 0666)\n}", "func (d deck) saveToFile(filename string) error {\n\treturn ioutil.WriteFile(filename, []byte(d.toString()), 0666)\n}", "func (d deck) saveToFile(filename string) error {\n\treturn ioutil.WriteFile(filename, []byte(d.toString()), 0666)\n}", "func (s *Scraper) storeDownload(u *url.URL, buf *bytes.Buffer, fileExtension string) {\n\tisAPage := false\n\tif fileExtension == \"\" {\n\t\thtml, fixed, err := s.fixURLReferences(u, buf)\n\t\tif err != nil {\n\t\t\ts.logger.Error(\"Fixing file references failed\",\n\t\t\t\tlog.Stringer(\"url\", u),\n\t\t\t\tlog.Err(err))\n\t\t\treturn\n\t\t}\n\n\t\tif fixed {\n\t\t\tbuf = bytes.NewBufferString(html)\n\t\t}\n\t\tisAPage = true\n\t}\n\n\tfilePath := s.GetFilePath(u, isAPage)\n\t// always update html files, content might have changed\n\tif err := s.writeFile(filePath, buf); err != nil {\n\t\ts.logger.Error(\"Writing to file failed\",\n\t\t\tlog.Stringer(\"URL\", u),\n\t\t\tlog.String(\"file\", filePath),\n\t\t\tlog.Err(err))\n\t}\n}", "func (d deck) saveToFile (filename string) error {\n\t\treturn ioutil.WriteFile(filename, []byte (d.toString()), 0666)\n}", "func (g *Gossiper) SaveState() {\n\tobj, e := json.MarshalIndent(g, \"\", \"\\t\")\n\tutils.HandleError(e)\n\t_ = os.Mkdir(utils.STATE_FOLDER, os.ModePerm)\n\tcwd, _ := os.Getwd()\n\te = ioutil.WriteFile(filepath.Join(cwd, utils.STATE_FOLDER, fmt.Sprint(g.Name, \".json\")), obj, 0644)\n\tutils.HandleError(e)\n}", "func (c *Config) save() {\n\tconst file = \"access.json\"\n\n\tc.logger.Printf(\"Save file %s\\n\", file)\n\n\tcfg := conf{\n\t\tIP: c.GetString(\"ip\"),\n\t\tPort: c.GetString(\"port\"),\n\t\tToken: c.GetString(\"token\"),\n\t\tWait: c.GetBool(\"wait\"),\n\t}\n\n\tb, err := json.Marshal(cfg)\n\tif err != nil {\n\t\tc.logger.Error(err)\n\t}\n\n\tif err = ioutil.WriteFile(file, b, 0644); err != nil {\n\t\tc.logger.Error(err)\n\t}\n}" ]
[ "0.75267327", "0.7403389", "0.7397416", "0.7375422", "0.7351893", "0.73325723", "0.7299355", "0.72905964", "0.7201987", "0.7189554", "0.7094197", "0.7000149", "0.69070685", "0.67373985", "0.6554937", "0.647469", "0.64519274", "0.6404184", "0.63746774", "0.6366096", "0.6352044", "0.63256884", "0.6310271", "0.6263235", "0.6259444", "0.62314075", "0.61851114", "0.61047447", "0.6093879", "0.6079084", "0.6055623", "0.6053544", "0.6051064", "0.6049954", "0.6045307", "0.6041623", "0.6007594", "0.59688574", "0.5961282", "0.59459335", "0.5944849", "0.5940004", "0.5928062", "0.59275234", "0.5926254", "0.5924164", "0.58830476", "0.58794165", "0.5871201", "0.5867272", "0.585968", "0.584638", "0.5811878", "0.5809295", "0.5809295", "0.5809295", "0.5807469", "0.58060884", "0.57872784", "0.5782579", "0.57782996", "0.5777703", "0.5760998", "0.5738896", "0.5727015", "0.5713686", "0.5713536", "0.5711572", "0.5711037", "0.57037884", "0.57034105", "0.5674257", "0.56708723", "0.56674325", "0.56629217", "0.5650374", "0.5646834", "0.5641955", "0.5641186", "0.5640154", "0.5638138", "0.56295484", "0.56270933", "0.56248736", "0.5623066", "0.5622189", "0.5607397", "0.5595942", "0.55947757", "0.5593177", "0.55907947", "0.5589553", "0.55874866", "0.55874866", "0.55874866", "0.55874866", "0.5582077", "0.5554032", "0.55508435", "0.5548317" ]
0.76523435
0
render rendering current page template
func (p *Page) render(f *os.File) error { t, err := template.ParseFiles(p.Template) if (err != nil) { return err } return t.Execute(f, p) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func render(w http.ResponseWriter, context PageContext) {\n\tfuncMap := template.FuncMap{\n\t\t\"title\": strings.Title,\n\t\t\"HumanizeBytes\": HumanizeBytes,\n\t\t\"HumanizeBigBytes\": HumanizeBigBytes,\n\t\t\"CommifyFloat\": CommifyFloat,\n\t\t\"Float2Int\": IntFromFloat64,\n\t\t\"OkToBool\": OkToBool,\n\t\t\"tableflip\": func() string { return \"(╯°□°)╯︵ ┻━┻\" },\n\t}\n\tcontext.Static = STATIC_URL\n\ttmpl_list := getTemplateList(context.ViewTemplate)\n\t/*\n\t\tt, err := template.ParseFiles(tmpl_list...)\n\t\tif err != nil {\n\t\t\tlog.Print(\"template parsing error: \", err)\n\t\t}\n\t*/\n\tt := template.Must(template.New(\"base.html\").Funcs(funcMap).ParseFiles(tmpl_list...))\n\terr := t.Execute(w, context)\n\tif err != nil {\n\t\tlog.Print(\"template executing error: \", err)\n\t}\n}", "func render(w http.ResponseWriter, tmpl string, p *page.Page) {\n\tif err := page.Templates.ExecuteTemplate(w, tmpl+\".html\", p); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\tlog.Println(err)\n\t}\n}", "func (s *Server) render(w http.ResponseWriter, r *http.Request, templateName string, ctx pongo2.Context) {\n\tt, err := pongo2.FromFile(path.Join(s.templateDir, templateName))\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tctx[\"request\"] = r\n\tctx[\"alerts\"] = s.getAlerts(w, r)\n\tctx[\"current_user\"] = context.Get(r, contextCurrentUser).(*db.User)\n\tctx[\"site_title\"] = s.config.GetString(configSiteTitle)\n\tb, err := t.ExecuteBytes(ctx)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tw.Header().Set(\"Content-Type\", \"text/html\")\n\tw.WriteHeader(http.StatusOK)\n\tw.Write(b)\n}", "func (app *application) render(res http.ResponseWriter, req *http.Request, pageName string, tmplData *Templates) {\n\ttmpl, ok := app.templateCache[pageName]\n\tif !ok {\n\t\tapp.serverError(res, fmt.Errorf(\"the page %s is not found\", pageName))\n\t\treturn\n\t}\n\n\tbuf := new(bytes.Buffer)\n\terr := tmpl.Execute(buf, app.addDefault(tmplData, req))\n\tif err != nil {\n\t\tapp.serverError(res, err)\n\t\treturn\n\t}\n\n\tbuf.WriteTo(res)\n}", "func renderTemplate(w http.ResponseWriter, tmpl string, p *Page) {\n\tp.Index = pages\n\n\terr := templates.ExecuteTemplate(w, tmpl+\".html\", p)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t}\n}", "func renderPage(w io.Writer, name string, pageData PageData) {\n\trenderTemplate(w, name, pageData)\n}", "func renderTemplate(w http.ResponseWriter, p *models.Page) {\n\tlp := path.Join(\"views\", \"log.html\")\n\ttmpl, err := template.ParseFiles(lp)\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\n\ttmpl.ExecuteTemplate(w, \"log.html\", p)\n}", "func renderTemplate(w http.ResponseWriter, tmpl string, p *Page) {\n err := templates.ExecuteTemplate(w, tmpl+\".html\", p)\n if err != nil {\n http.Error(w, err.Error(), http.StatusInternalServerError)\n }\n}", "func render(w http.ResponseWriter, tmpl string, context Context) {\n\ttmplList := []string{\"views/base.html\",\n\t\tfmt.Sprintf(\"views/%s.html\", tmpl)}\n\tt, err := template.ParseFiles(tmplList...)\n\tcheckErr(err)\n\tcheckErr(t.Execute(w, context))\n}", "func renderPage(w http.ResponseWriter, tmpl string, data interface{}) {\n\tif err := templates.ExecuteTemplate(w, tmpl, data); err != nil {\n\t\tfmt.Fprintf(w, \"Error %v\", err)\n\t}\n}", "func renderPage(ctx *Context, path, tpl string, data interface{}) error {\n\tif err := filesystem.CreateDir(path, true); err != nil {\n\t\treturn err\n\t}\n\n\thandle, err := filesystem.CreateFile(filepath.Join(path, indexFile))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttplPath := filepath.Join(ctx.TemplateDir, tpl)\n\treturn template.Render(tplPath, data, handle)\n}", "func renderPage(logger *Logger, config *Config, loadedPlugins map[string]pluginWithMeta, page *Page) {\n\tfor name, plugin := range loadedPlugins {\n\t\tif plugin.TitleHook != nil {\n\t\t\tpage.logger = logger.PluginPrefix(name)\n\t\t\tpage.title = plugin.TitleHook(plugin.meta, page)\n\t\t}\n\t}\n\tpage.logger = logger\n\n\tdata := make(map[string]interface{})\n\tdata[\"Page\"] = page\n\tfuncs := make(map[string]interface{})\n\n\tfor name, plugin := range loadedPlugins {\n\t\tpage.logger = logger.PluginPrefix(name)\n\t\tif plugin.TemplatingHook != nil {\n\t\t\tpluginData, pluginFuncs := plugin.TemplatingHook(plugin.meta, page)\n\t\t\tdata[name] = pluginData\n\t\t\tfor k, v := range pluginFuncs {\n\t\t\t\tfuncs[name+\"_\"+k] = v\n\t\t\t}\n\t\t}\n\t}\n\tpage.logger = logger\n\n\tvar head = \"\"\n\tvar before = \"\"\n\tvar after = \"\"\n\n\tfor name, plugin := range loadedPlugins {\n\t\tpage.logger = logger.PluginPrefix(name)\n\t\tif plugin.RenderHook != nil {\n\t\t\tpluginHead, pluginBefore, pluginAfter := plugin.RenderHook(plugin.meta, page)\n\t\t\thead += pluginHead\n\t\t\tbefore += pluginBefore\n\t\t\tafter += pluginAfter\n\t\t}\n\t}\n\tpage.logger = logger\n\n\tif t == nil {\n\t\tlogger.Debug(\"base template not initalized\")\n\t\topenTemplate(logger)\n\t}\n\n\textensions := parser.CommonExtensions | parser.AutoHeadingIDs\n\tparser := parser.NewWithExtensions(extensions)\n\n\thtmlFlags := html.CommonFlags\n\topts := html.RendererOptions{Flags: htmlFlags}\n\trenderer := html.NewRenderer(opts)\n\n\tt, err := t.Clone()\n\tif err != nil {\n\t\tlogger.Fatal(\"failed to clone base template:\", err.Error())\n\t}\n\n\t_, err = t.New(\"__head__\").Parse(head)\n\tif err != nil {\n\t\tlogger.Fatal(\"failed to parse __head__ template:\", err.Error())\n\t}\n\t_, err = t.New(\"__beforeContent__\").Parse(before)\n\tif err != nil {\n\t\tlogger.Fatal(\"failed to parse __beforeContent__ template:\", err.Error())\n\t}\n\t_, err = t.New(\"__content__\").Parse(string(markdown.ToHTML([]byte(page.Content()), parser, renderer)))\n\tif err != nil {\n\t\tlogger.Fatal(\"failed to parse __content__ template:\", err.Error())\n\t}\n\t_, err = t.New(\"__afterContent__\").Parse(after)\n\tif err != nil {\n\t\tlogger.Fatal(\"failed to parse __afterContent__ template:\", err.Error())\n\t}\n\n\tpath := filepath.Join(config.OutputFolder, page.Slug(), \"index.html\")\n\terr = os.MkdirAll(filepath.Dir(path), os.ModeDir)\n\tif err != nil {\n\t\tlogger.Fatal(\"could not create directory structure for output file:\", err.Error())\n\t}\n\n\tout, err := os.Create(path)\n\tif err != nil {\n\t\tlogger.Fatal(\"could not open the output file:\", err.Error())\n\t}\n\tdefer out.Close()\n\n\terr = t.Funcs(funcs).Execute(out, data)\n\tif err != nil {\n\t\tlogger.Fatal(\"failed to execute template:\", err.Error())\n\t}\n}", "func renderTemplate(w http.ResponseWriter, tmpl string, p *Page) {\n\terr := templates.ExecuteTemplate(w, tmpl+\".html\", p)\n\tif err != nil {\n\t\tlog.Println(\"renderTemplate() error: \", err)\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t}\n}", "func renderTemplate(w http.ResponseWriter, tmpl string, p Page) {\n\terr := templates.ExecuteTemplate(w, tmpl+\".html\", p)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t}\n}", "func render(w http.ResponseWriter, r *http.Request, tmpl string, data interface{}) {\n\tlog.Printf(\"INFO > controllers/controllers.go > render(): %s\", tmpl)\n\n\t// Set the content type.\n\tw.Header().Set(\"Content-Type\", \"text/html\")\n\n\ttemplates[tmpl].Funcs(timestampFM())\n\ttemplates[tmpl].Funcs(permissionFM(r))\n\n\t// Execute the template.\n\terr := templates[tmpl].Execute(w, data)\n\tif err != nil {\n\t\tlog.Printf(\"ERROR > controllers.go > render(): %v\", err)\n\t}\n}", "func (c *Context) Render(content string) {\n\t// Write response\n\tc.Response.Write([]byte(content))\n}", "func Render(c http.ResponseWriter, r *http.Request, title, name string, data interface{}) {\n\tif WouldUseJson(r) {\n\t\tenc := json.NewEncoder(c)\n\t\terr := enc.Encode(data)\n\t\tif err != nil {\n\t\t\tError500(c, r, err)\n\t\t}\n\t\treturn\n\t}\n\tvar p PageInfo\n\tif title == \"\" {\n\t\ttitle = strings.ToTitle(moduleName)\n\t}\n\t// \tif moduleName == \"unknown\" {\n\t// \t\tlog.Println(\"Warning: Attempting to render a template without moduleName being set! Call SetModuleName during the initialization of your module in order to correct this (in main()).\")\n\t// \t}\n\tp.Title = title\n\t// Removed the modulename because it's not needed in the new framework. However, this will break things in the old framework. *sigh*...\n\tp.Name = /*moduleName + \"/\" + */ name\n\tp.Request = r\n\tperms, err := perms.Get(r)\n\tif err != nil {\n\t\tlog.Printf(\"Warning: Error getting page permissions for %s: %s\", r.URL, err)\n\t}\n\tp.Perms = perms\n\tp.Object = data\n\n\terr = Execute(c, &p)\n\tif err != nil {\n\t\tc.WriteHeader(500)\n\t\tfmt.Fprintln(c, \"FATAL ERROR:\", err)\n\t\treturn\n\t}\n}", "func (tpl *Template) Render(ctx ...interface{}) (out string) {\n\tvar err error\n\tout, err = tpl.RenderString(ctx...)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn\n}", "func render(template_content string, data TemplateData) string {\n temp, err := template.New(\"html\").Parse(template_content)\n if err!=nil {\n os.Stderr.WriteString(err.Error() + \"\\n\")\n } else {\n buf := new(strings.Builder)\n err = temp.Execute(buf, data)\n if err!=nil {\n os.Stderr.WriteString(err.Error() + \"\\n\")\n }\n output := buf.String()\n return output\n }\n return \"\"\n}", "func (c *Content) Render(sr []SiteResult) {\n\tc.Lock()\n\tdata := struct {\n\t\tSites []SiteResult\n\t}{\n\t\tSites: sr,\n\t}\n\n\tvar b bytes.Buffer\n\terr := c.tpl.Execute(&b, data)\n\tif err != nil {\n\t\tlog.WithError(err).Error(\"Template error\")\n\t}\n\tc.page = b.Bytes()\n\tc.Unlock()\n}", "func (v *View) Render(w http.ResponseWriter, r *http.Request, data interface{}) {\n\tw.Header().Set(\"Content-Type\", \"text/html\")\n\tvar vd Data\n\tswitch d := data.(type) {\n\tcase Data:\n\t\tvd = d\n\tdefault:\n\t\tvd = Data{\n\t\t\tYield: data,\n\t\t}\n\t}\n\tvd.User = context.User(r.Context())\n\tvar buf bytes.Buffer\n\terr := v.Template.ExecuteTemplate(&buf, v.Layout, vd)\n\tif err != nil {\n\t\thttp.Error(w, \"Something went wrong. If the problem persists, please \"+\n\t\t\t\"email [email protected]\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\t// If we get here that means our template executed correctly and we can coy\n\t// the buffer to the ResponseWriter.\n\tio.Copy(w, &buf)\n}", "func render(c *gin.Context, data gin.H, templateName string) {\n\tloggedInInterface, _ := c.Get(\"is_logged_in\")\n\tdata[\"is_logged_in\"] = loggedInInterface.(bool)\n\n\tswitch c.Request.Header.Get(\"Accept\") {\n\tcase \"application/json\":\n\t\t// Respond with JSON\n\t\tc.JSON(http.StatusOK, data[\"payload\"])\n\tcase \"application/xml\":\n\t\t// Respond with XML\n\t\tc.XML(http.StatusOK, data[\"payload\"])\n\tdefault:\n\t\t// Respond with HTML\n\t\tc.HTML(http.StatusOK, templateName, data)\n\t}\n}", "func render(c *gin.Context, data gin.H, templateName string) {\n\tloggedInInterface, _ := c.Get(\"is_logged_in\")\n\tdata[\"is_logged_in\"] = loggedInInterface.(bool)\n\n\tswitch c.Request.Header.Get(\"Accept\") {\n\tcase \"application/json\":\n\t\t// Respond with JSON\n\t\tc.JSON(http.StatusOK, data[\"payload\"])\n\t\tif data[\"id\"] != \"\" {\n\t\t\tc.JSON(http.StatusOK, data[\"id\"])\n\t\t}\n\tcase \"application/xml\":\n\t\t// Respond with XML\n\t\tc.XML(http.StatusOK, data[\"payload\"])\n\tdefault:\n\t\t// Respond with HTML\n\t\tc.HTML(http.StatusOK, templateName, data)\n\t}\n}", "func render(c *gin.Context, data gin.H, templateName string) {\n\tloggedInInterface, _ := c.Get(\"is_logged_in\")\n\tdata[\"is_logged_in\"] = loggedInInterface.(bool)\n\n\tswitch c.Request.Header.Get(\"Accept\") {\n\tcase \"application/json\":\n\t\t// Respond with JSON\n\t\tc.JSON(http.StatusOK, data[\"payload\"])\n\tcase \"application/xml\":\n\t\t// Respond with XML\n\t\tc.XML(http.StatusOK, data[\"payload\"])\n\tdefault:\n\t\t// Respond with HTML\n\t\tc.HTML(http.StatusOK, templateName, data)\n\t}\n\n}", "func renderTemplate(w http.ResponseWriter, name string, template string, context interface{}) {\n //get template from compiled map holding all templates\n tmpl, ok := templates[name]\n if !ok {\n http.Error(w, \"The page does not exist\", http.StatusInternalServerError)\n }\n //execute (render) template\n err := tmpl.ExecuteTemplate(w, template, context)\n \n if err != nil {\n http.Error(w, err.Error(), http.StatusInternalServerError)\n }\n}", "func renderTemplate(w http.ResponseWriter, tmpl string, p *Page) {\r\n t, err := template.ParseFiles(tmpl + \".html\")\r\n if err != nil {\r\n http.Error(w, err.Error(), http.StatusInternalServerError)\r\n return\r\n }\r\n err = t.Execute(w, p)\r\n if err != nil {\r\n http.Error(w, err.Error(), http.StatusInternalServerError)\r\n }\r\n}", "func renderTemplate(w http.ResponseWriter, tmpl string, p interface{}) {\n\terr := templates.ExecuteTemplate(w, tmpl, p)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t}\n\tlog.Println(\"GET \" + tmpl)\n}", "func RenderPage(tpl template.Template, tplData interface{}, outPath string) {\n\t// Create file\n\toutFile, err := os.Create(outPath)\n\tif err != nil {\n\t\tFatal(err.Error())\n\t}\n\tdefer func() {\n\t\toutFile.Close()\n\t}()\n\tdefer wg.Done()\n\t// Template render\n\terr = tpl.Execute(outFile, tplData)\n\tif err != nil {\n\t\tFatal(err.Error())\n\t}\n}", "func renderTemplate(c context.Context, name string, partial bool, data *templateData) error {\n\tif name == \"/\" || name == \"\" {\n\t\tname = \"home\"\n\t}\n\n\tvar layout string\n\tif partial {\n\t\tlayout = \"layout_partial.html\"\n\t} else {\n\t\tlayout = \"layout_full.html\"\n\t}\n\n\tt, err := template.New(layout).Delims(\"{%\", \"%}\").Funcs(tmplFunc).ParseFiles(\n\t\tfilepath.Join(rootDir, \"templates\", layout),\n\t\tfilepath.Join(rootDir, \"templates\", name+\".html\"),\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tm := pageMeta(t)\n\tif data == nil {\n\t\tdata = &templateData{}\n\t}\n\tif data.Env == \"\" {\n\t\tdata.Env = env(c)\n\t}\n\tdata.Meta = m\n\tdata.Title = pageTitle(m)\n\tdata.Slug = name\n\tif data.OgImage == \"\" {\n\t\tdata.OgImage = ogImageDefault\n\t}\n\treturn t.Execute(writer(c), data)\n}", "func (c *Context) Render(status int, name string, data interface{}) (err error) {\n\tif c.router.Renderer == nil {\n\t\treturn errors.New(\"renderer not registered\")\n\t}\n\n\tvar buf = new(bytes.Buffer)\n\tif err = c.router.Renderer.Render(buf, name, data, c); err != nil {\n\t\treturn err\n\t}\n\n\tc.HTML(status, buf.Bytes())\n\treturn\n}", "func (st *Stemplate) Render(w *http.ResponseWriter, templateName string) {\n\n\tif !st.LiveReload {\n\t\tst.templates[templateName].ExecuteTemplate(*w, \"base\", st.data)\n\t} else {\n\t\tst.loadTemplate(templateName).ExecuteTemplate(*w, \"base\", st.data)\n\t}\n\n}", "func (t *TemplateRenderer) Render(w io.Writer, name string, data interface{}, c echo.Context) error {\n\t// Add global methods if data is a map\n\tif viewContext, isMap := data.(map[string]interface{}); isMap {\n\t\tviewContext[\"reverse\"] = c.Echo().Reverse\n\t}\n\n\ttmpl, ok := t.Templates[name]\n\tif !ok {\n\t\terr := errors.New(\"Template not found -> \" + name)\n\t\treturn err\n\t}\n\treturn tmpl.ExecuteTemplate(w, \"base.html\", data)\n}", "func RenderPage(c *fiber.Ctx, filename string, bind fiber.Map) {\n\tfp := &mustache.FileProvider{\n\t\tPaths: []string{\"\", \"/src/http/views/\"},\n\t\tExtensions: []string{\"\", \".mustache\"},\n\t}\n\n\tlayoutTmpl, err := mustache.ParseFilePartials(file(\"layout\"), fp)\n\tif err != nil {\n\t\tc.SendStatus(500)\n\t\tc.SendString(err.Error())\n\t\treturn\n\t}\n\n\ttmpl, err := mustache.ParseFilePartials(file(filename), fp)\n\tif err != nil {\n\t\tc.SendStatus(500)\n\t\tc.SendString(err.Error())\n\t\treturn\n\t}\n\n\thtml, err := tmpl.RenderInLayout(layoutTmpl, bind)\n\tif err != nil {\n\t\tc.SendStatus(500)\n\t\tc.SendString(err.Error())\n\t\treturn\n\t}\n\n\tc.Set(\"Content-Type\", \"text/html\")\n\tc.SendString(html)\n}", "func renderTemplate(response http.ResponseWriter, tmpl string, page interface{}) {\n\terr := templates.ExecuteTemplate(response, tmpl+\".html\", page)\n\tif err != nil {\n\t\thttp.Error(response, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n}", "func (ctx *Context) Render(name string, data interface{}) {\n\tctx.zion.config.TemplateEngine.Render(name, data, ctx.writer)\n}", "func renderTemplate(srv *Server) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tfp := filepath.Clean(r.URL.Path)\n\t\tif strings.HasPrefix(fp, `/`) || strings.HasPrefix(fp, `\\`) {\n\t\t\tfp = fp[1:] // remove starting \"/\" (or \"\\\" on Windows)\n\t\t}\n\t\tif fp == \"\" {\n\t\t\tfp = \"index.html\"\n\t\t}\n\n\t\tlog.Println(\"rendering\", fp)\n\n\t\t// TODO: load all templates outside this funcion\n\t\ttmpl, err := template.New(\"\").Funcs(template.FuncMap{\n\t\t\t\"ptFmtFloat\": ptFmtFloat,\n\t\t}).ParseFS(_contentFS, \"layout.html\", fp)\n\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t\treturn\n\t\t}\n\n\t\t// Set the payload according to the URL path\n\t\tvar payload interface{}\n\t\tif strings.Contains(fp, \"fii.html\") && r.Method == http.MethodPost {\n\t\t\tcodes := parseCodes(r.FormValue(\"codes\"))\n\t\t\tmonths := parseNumeric(r.FormValue(\"months\"), 1)\n\t\t\tpayload = fiiDividendsPayload(srv, codes, months)\n\t\t}\n\n\t\terr = tmpl.ExecuteTemplate(w, \"layout\", payload)\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t}\n\t}\n}", "func renderTemplate(w http.ResponseWriter, tmpl string, p *Page) {\n\terr := templates.ExecuteTemplate(w, tmpl + \".html\", p)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\t// Sends a specified HTTP response code (Internal Server Error) and error message.\n\t\treturn\n\t}\n}", "func (r *renderer) Render(w http.ResponseWriter, p ViewModel) {\n\tif r.config.Debug {\n\t\terr := r.initTemplates()\n\t\tif err != nil {\n\t\t\tlog.Printf(err.Error())\n\t\t}\n\t}\n\terr := r.templates.ExecuteTemplate(w, templateName, p)\n\tif err != nil {\n\t\tlog.Fatalln(err.Error())\n\t}\n}", "func (ui *GUI) renderTemplate(w http.ResponseWriter, r *http.Request, name string, data interface{}) {\n\tvar doc bytes.Buffer\n\terr := ui.templates.ExecuteTemplate(&doc, name, data)\n\tif err != nil {\n\t\tlog.Errorf(\"template error: %v\", err)\n\t\thttp.Error(w, \"template error: \"+err.Error(),\n\t\t\thttp.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tdoc.WriteTo(w)\n}", "func (page *Page) Render(w http.ResponseWriter) {\n\tWriteJSON(w, http.StatusOK, page)\n}", "func (c *Ctx) Render(f string, optionalBind ...interface{}) error {\n\tvar err error\n\tvar binding interface{}\n\tbinds := make(map[string]interface{})\n\tc.VisitUserValues(func(k []byte, v interface{}) {\n\t\tbinds[BytesToString(k)] = v\n\t})\n\n\tif len(optionalBind) > 0 {\n\t\tbinding = optionalBind[0]\n\t} else {\n\t\tbinding = binds\n\t}\n\n\tif c.Core.Views == nil {\n\t\terr = fmt.Errorf(\"Render: Not Initial Views\")\n\t\tLog.Error(err.Error())\n\t\treturn err\n\t}\n\n\tif c.theme != \"\" {\n\t\tc.Core.Views.DoTheme(c.theme)\n\t}\n\n\tc.Response.Header.SetContentType(MIMETextHTMLCharsetUTF8)\n\terr = c.Core.Views.ExecuteWriter(c.RequestCtx.Response.BodyWriter(), f, binding)\n\tif err != nil {\n\t\tc.Error(err.Error(), StatusInternalServerError)\n\t}\n\treturn err\n}", "func (h HomepageView) Template() string {\n\treturn homepageTemplate\n}", "func render(c *gin.Context, data gin.H, templateName string) {\n\tswitch c.Request.Header.Get(\"Accept\") {\n\tcase \"application/json\":\n\t\tc.JSON(http.StatusOK, data[\"payload\"])\n\tcase \"application/xml\":\n\t\tc.XML(http.StatusOK, data[\"payload\"])\n\tdefault:\n\t\tc.HTML(http.StatusOK, templateName, data)\n\t}\n}", "func (a ArithmeticPage) Render(w http.ResponseWriter, req *http.Request) {\n\tRenderPage(a, w, req, true)\n}", "func (s *Server) Render(w ResponseWriter, r *http.Request) {\n\tif w.Written() {\n\t\treturn\n\t}\n\tif err := s.renderTemplate(w, r); err != nil {\n\t\tw.Header().Set(\"Content-Type\", \"text/plain; charset=utf-8\")\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tfmt.Fprintf(w, \"Error executing template: %s\\n\", err)\n\t}\n}", "func (v *View) Render(w http.ResponseWriter, r *http.Request, data interface{}) {\n\tw.Header().Set(\"Content-Type\", \"text/html\")\n\tvar vd Data\n\tswitch d := data.(type) {\n\tcase Data:\n\t\tvd = d\n\tdefault:\n\t\tvd = Data{\n\t\t\tYield: data,\n\t\t}\n\t}\n\t// if alert := getAlert(r); alert != nil && vd.Alert == nil {\n\tif alert := getAlert(r); alert != nil {\n\t\tvd.Alert = alert\n\t\tclearAlert(w)\n\t}\n\tvd.User = context.User(r.Context())\n\tvar buf bytes.Buffer\n\tcsrfField := csrf.TemplateField(r)\n\ttpl := v.Template.Funcs(template.FuncMap{\n\t\t\"csrfField\": func() template.HTML {\n\t\t\treturn csrfField\n\t\t},\n\t})\n\tif err := tpl.ExecuteTemplate(&buf, v.Layout, vd); err != nil {\n\t\tlog.Println(err)\n\t\thttp.Error(w, \"Something went wrong. Please email support\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\tio.Copy(w, &buf)\n}", "func (m *mustacheRenderer) render(context interface{}) string {\n\treturn m.Render(context)\n}", "func render(c *gin.Context, data gin.H, templateName string) {\n\n\tswitch c.Request.Header.Get(\"Accept\") {\n\tcase \"application/json\":\n\t\t// Respond with JSON\n\t\tc.JSON(http.StatusOK, data)\n\tcase \"application/xml\":\n\t\t// Respond with XML\n\t\tc.XML(http.StatusOK, data)\n\tdefault:\n\t\t// Respond with HTML\n\t\tc.HTML(http.StatusOK, templateName, data)\n\t}\n\n}", "func render(c *gin.Context, data gin.H, templateName string) {\n\n\tswitch c.Request.Header.Get(\"Accept\") {\n\tcase \"application/json\":\n\t\t// Respond with JSON\n\t\tc.JSON(http.StatusOK, data)\n\tcase \"application/xml\":\n\t\t// Respond with XML\n\t\tc.XML(http.StatusOK, data)\n\tdefault:\n\t\t// Respond with HTML\n\t\tc.HTML(http.StatusOK, templateName, data)\n\t}\n\n}", "func (v *View) Render(w http.ResponseWriter) {\n\n\t// Get the template collection from cache\n\tmutex.RLock()\n\ttc, ok := templateCollection[v.Name]\n\tmutex.RUnlock()\n\n\t// Get the plugin collection\n\tmutexPlugins.RLock()\n\tpc := pluginCollection\n\tmutexPlugins.RUnlock()\n\n\t// If the template collection is not cached or caching is disabled\n\tif !ok || !viewInfo.Caching {\n\n\t\t// List of template names\n\t\tvar templateList []string\n\t\ttemplateList = append(templateList, rootTemplate)\n\t\ttemplateList = append(templateList, v.Name)\n\t\ttemplateList = append(templateList, childTemplates...)\n\n\t\t// Loop through each template and test the full path\n\t\tfor i, name := range templateList {\n\t\t\t// Get the absolute path of the root template\n\t\t\tpath, err := filepath.Abs(v.Folder + string(os.PathSeparator) + name + \".\" + v.Extension)\n\t\t\tif err != nil {\n\t\t\t\thttp.Error(w, \"Template Path Error: \"+err.Error(), http.StatusInternalServerError)\n\t\t\t\treturn\n\t\t\t}\n\t\t\ttemplateList[i] = path\n\t\t}\n\n\t\t// Determine if there is an error in the template syntax\n\t\ttemplates, err := template.New(v.Name).Funcs(pc).ParseFiles(templateList...)\n\n\t\tif err != nil {\n\t\t\thttp.Error(w, \"Template Parse Error: \"+err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\t// Cache the template collection\n\t\tmutex.Lock()\n\t\ttemplateCollection[v.Name] = templates\n\t\tmutex.Unlock()\n\n\t\t// Save the template collection\n\t\ttc = templates\n\t}\n\n\t// Get session\n\tsess := session.Instance(v.request)\n\n\t// Get the flashes for the template\n\tif flashes := sess.Flashes(); len(flashes) > 0 {\n\t\tv.Vars[\"flashes\"] = make([]Flash, len(flashes))\n\t\tfor i, f := range flashes {\n\t\t\tswitch f.(type) {\n\t\t\tcase Flash:\n\t\t\t\tv.Vars[\"flashes\"].([]Flash)[i] = f.(Flash)\n\t\t\tdefault:\n\t\t\t\tv.Vars[\"flashes\"].([]Flash)[i] = Flash{f.(string), \"alert-box\"}\n\t\t\t}\n\n\t\t}\n\t\tsess.Save(v.request, w)\n\t}\n\n\t// Display the content to the screen\n\terr := tc.Funcs(pc).ExecuteTemplate(w, rootTemplate+\".\"+v.Extension, v.Vars)\n\n\tif err != nil {\n\t\thttp.Error(w, \"Template File Error: \"+err.Error(), http.StatusInternalServerError)\n\t}\n}", "func (p *PageData) Render() ([]byte, error) {\n\tf, err := data.Assets.Open(\"/template.html\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttmpData := &bytes.Buffer{}\n\tio.Copy(tmpData, f)\n\n\tt, err := template.New(\"Markdown Processing\").Parse(tmpData.String())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbuf := &bytes.Buffer{}\n\terr = t.Execute(buf, p)\n\treturn buf.Bytes(), err\n}", "func render(c *gin.Context, data gin.H, templateName string, httpStatus int) {\n\tloggedInInterface, _ := c.Get(\"is_logged_in\")\n\tdata[\"is_logged_in\"] = loggedInInterface.(bool)\n\n\tdbgMessage(\"render Request.Header: %s, templateName:%s\\n\", c.Request.Header.Get(\"Accept\"), templateName)\n\tswitch c.Request.Header.Get(\"Accept\") {\n\tcase \"application/json, text/plain, */*\", \"application/json\":\n\t\tc.SecureJSON(httpStatus, data[\"payload\"])\n\tcase \"application/xml\":\n\t\tc.XML(httpStatus, data[\"payload\"])\n\tdefault:\n\t\tc.HTML(httpStatus, templateName, data)\n\t}\n}", "func (t *Template) Render(ctx *gear.Context, w io.Writer, name string, data interface{}) (err error) {\n\tdir, _ := os.Getwd()\n\tname = filepath.Join(dir, \"view\", name+\".html\")\n\ttmpl := template.Must(template.ParseFiles(name))\n\n\terr = tmpl.Execute(w, data)\n\tif err != nil {\n\t\tlogging.Println(err)\n\t}\n\n\treturn\n}", "func (h *htmlRender) Render(w io.Writer) error {\n\tif h.Template == nil {\n\t\treturn errors.New(\"template is nil\")\n\t}\n\n\tif h.Layout == \"\" {\n\t\treturn h.Template.Execute(w, h.ViewArgs)\n\t}\n\n\treturn h.Template.ExecuteTemplate(w, h.Layout, h.ViewArgs)\n}", "func (t Tmpl) RenderTemplate(w http.ResponseWriter, req *http.Request, name string, args map[string]interface{}) {\n\t// Check if app is running on dev mode\n\tif Config.Configuration.IsDev() {\n\n\t\t// Lock mutex\n\t\tt.rw.Lock()\n\t\tdefer t.rw.Unlock()\n\n\t\t// Create new template\n\t\tt = NewTemplate(\"castro\")\n\n\t\t// Set template FuncMap\n\t\tt.Tmpl.Funcs(FuncMap)\n\n\t\t// Reload all templates\n\t\tif err := t.LoadTemplates(\"views/\"); err != nil {\n\t\t\tLogger.Logger.Error(err.Error())\n\t\t\treturn\n\t\t}\n\n\t\t// Reload all templates\n\t\tif err := t.LoadTemplates(\"pages/\"); err != nil {\n\t\t\tLogger.Logger.Error(err.Error())\n\t\t\treturn\n\t\t}\n\n\t\t// Reload all extension templates\n\t\tif err := t.LoadExtensionTemplates(\"pages\"); err != nil {\n\t\t\tLogger.Logger.Errorf(\"Cannot load extension subtopic template: %v\", err.Error())\n\t\t\treturn\n\t\t}\n\n\t\t// Reload all template hooks\n\t\tt.LoadTemplateHooks()\n\t}\n\n\t// Check if args is a valid map\n\tif args == nil {\n\t\targs = map[string]interface{}{}\n\t}\n\n\t// Load microtime from the microtimeHandler\n\tmicrotime, ok := req.Context().Value(\"microtime\").(time.Time)\n\tif !ok {\n\t\tw.WriteHeader(500)\n\t\tw.Write([]byte(\"Cannot read microtime value\"))\n\t\treturn\n\t}\n\n\t// Get csrf token\n\ttkn, ok := req.Context().Value(\"csrf-token\").(*models.CsrfToken)\n\tif !ok {\n\t\tw.WriteHeader(500)\n\t\tw.Write([]byte(\"Cannot read csrf token value\"))\n\t\treturn\n\t}\n\n\t// Get nonce value\n\tnonce, ok := req.Context().Value(\"nonce\").(string)\n\n\tif !ok {\n\t\tw.WriteHeader(500)\n\t\tw.Write([]byte(\"Cannot read nonce value\"))\n\t\treturn\n\t}\n\n\t// Get session map\n\tsession, ok := req.Context().Value(\"session\").(map[string]interface{})\n\n\tif !ok {\n\t\tw.WriteHeader(500)\n\t\tw.Write([]byte(\"Cannot read session map\"))\n\t\treturn\n\t}\n\n\t// Set session map\n\targs[\"session\"] = session\n\n\t// Set nonce value\n\targs[\"nonce\"] = nonce\n\n\t// Set token value\n\targs[\"csrfToken\"] = tkn.Token\n\n\t// Set microtime value\n\targs[\"microtime\"] = fmt.Sprintf(\"%9.4f seconds\", time.Since(microtime).Seconds())\n\n\t// Render template and log error\n\tif err := t.Tmpl.ExecuteTemplate(w, name, args); err != nil {\n\t\tLogger.Logger.Error(err.Error())\n\t}\n}", "func (s *Server) Render(w io.Writer, name string, data interface{}, c echo.Context) error {\n\treturn s.Templates.ExecuteTemplate(w, name, data)\n}", "func (this *ginRenderJade) Render(w http.ResponseWriter, code int, data ...interface{}) error {\n\twriteHeader(w, code, \"text/html\")\n\tfile := data[0].(string)\n\targs := data[1]\n\treturn this.Template.RenderFileW(w, file, args)\n}", "func RenderTpl(w http.ResponseWriter, r *http.Request, template string, pageTitle string) {\n\n\t// Load given template by name\n\ttpl, err := ace.Load(\"templates/\"+template, \"\", nil)\n\tif err != nil {\n\n\t\t// Invalid resource - hardcode to redirect to 404 page\n\t\tlog.Println(\"Error:\", err.Error(), \"trying 404 instead\")\n\t\tpageTitle, template = \"not found\", \"404\"\n\n\t\t// If this fails for some reason, just quit\n\t\tif tpl, err = ace.Load(\"templates/bodies/404\", \"\", nil); err != nil {\n\t\t\tlog.Println(\"Error:\", err.Error())\n\t\t\treturn\n\t\t}\n\t}\n\n\t// Print IP, URL, requested path; path to template file\n\tlog.Println(\"Serving template:\", \"templates/bodies/\"+template)\n\n\t// Load our Data obj\n\tdata := Data{Title: \"jm - \" + pageTitle}\n\n\t// Apply parsed template to w, passing in our Data obj\n\tif err := tpl.Execute(w, data); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\tlog.Println(\"Error:\", err.Error())\n\t\treturn\n\t}\n}", "func (l *local) Render() (string, error) {\n\treturn util.RenderTemplate(backendConfigTmpl, l)\n}", "func (h Template) render(template string, values map[string]interface{}) string {\n\tout := h.renderFiles([]*chart.File{\n\t\t{Name: \"templates\", Data: []byte(template)},\n\t}, map[string]interface{}{}, values)\n\n\treturn out[\"templates\"]\n}", "func (v *View) Render(w http.ResponseWriter, r *http.Request, yield interface{}, ErrorMessages []string) {\n\tw.Header().Set(\"Content-Type\", \"text/html\")\n\n\tvar vd Data\n\tvd.User.ID = r.Header.Get(\"userID\")\n\tadmin, err := strconv.ParseBool(r.Header.Get(\"admin\"))\n\tif err != nil {\n\t\tvd.User.Admin = false\n\t} else {\n\t\tvd.User.Admin = admin\n\t}\n\tvd.ErrorMessages = ErrorMessages\n\tvd.Yield = yield\n\tvd.Messages.Success = flash.GetFlash(w, r, constant.Flash.Success)\n\tvd.Messages.Info = flash.GetFlash(w, r, constant.Flash.Info)\n\n\tv.Template.ExecuteTemplate(w, v.Layout, vd)\n}", "func (ht *HTMLTemplate) Render(ctx context.Context, text string, data interface{}) (string, error) {\n\treturn ht.render(\"\", text, data)\n}", "func Render(c *gin.Context, data gin.H, templateName string) {\n\tswitch c.Request.Header.Get(\"Accept\") {\n\tcase \"application/json\":\n\t\tc.JSON(http.StatusOK, data[\"payload\"])\n\tcase \"application/xml\":\n\t\tc.XML(http.StatusOK, data[\"payload\"])\n\tdefault:\n\t\tc.HTML(http.StatusOK, templateName, data)\n\t}\n}", "func (self templateEngine) renderTemplate(name string, obj interface{}) string {\n t := self.getTemplate(name)\n return mustache.Render(t, obj)\n}", "func (r *renderer) Render(w io.Writer, name string, data interface{}, c echo.Context) error {\n\tctx := data.(pongo2.Context)\n\n\tt, err := r.TemplateSet.FromFile(name)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to get template from file\")\n\t}\n\n\treturn t.ExecuteWriter(ctx, w)\n}", "func pageRender(w io.Writer, cmp *page.Component) error {\n\tstate, ok := cmp.State.(*PageState)\n\tif !ok {\n\t\treturn fmt.Errorf(\"could not get state\")\n\t}\n\n\t// Here we use the gomponents library to do typed rendering.\n\t// https://github.com/maragudk/gomponents\n\treturn c.HTML5(c.HTML5Props{\n\t\tTitle: state.Title,\n\t\tLanguage: \"en\",\n\t\tHead: []g.Node{\n\t\t\tStyleEl(Type(\"text/css\"),\n\t\t\t\tg.Raw(`body {font-family: -apple-system, BlinkMacSystemFont, \"Segoe UI\", Roboto, Helvetica, Arial, sans-serif, \"Apple Color Emoji\", \"Segoe UI Emoji\", \"Segoe UI Symbol\"; }`),\n\t\t\t),\n\t\t},\n\t\tBody: []g.Node{\n\t\t\tH1(g.Text(\"World Clocks\")),\n\t\t\tFormEl(\n\t\t\t\tID(\"tz-form\"),\n\t\t\t\tg.Attr(\"live-change\", cmp.Event(validateTZ)), // c.Event scopes the events to this component.\n\t\t\t\tg.Attr(\"live-submit\", cmp.Event(addTime)),\n\t\t\t\tDiv(\n\t\t\t\t\tP(g.Text(\"Try Europe/London or America/New_York\")),\n\t\t\t\t\tInput(Name(\"tz\")),\n\t\t\t\t\tg.If(state.ValidationError != \"\", Span(g.Text(state.ValidationError))),\n\t\t\t\t),\n\t\t\t\tInput(Type(\"submit\"), g.If(state.ValidationError != \"\", Disabled())),\n\t\t\t),\n\t\t\tDiv(\n\t\t\t\tg.Group(g.Map(len(state.Timezones), func(idx int) g.Node {\n\t\t\t\t\treturn page.Render(state.Timezones[idx])\n\t\t\t\t})),\n\t\t\t),\n\t\t\tScript(Src(\"/live.js\")),\n\t\t},\n\t}).Render(w)\n}", "func (v *View) RenderSingle(w http.ResponseWriter) {\n\n\t// Get the template collection from cache\n\t/*mutex.RLock()\n\ttc, ok := templateCollection[v.Name]\n\tmutex.RUnlock()*/\n\n\t// Get the plugin collection\n\tmutexPlugins.RLock()\n\tpc := pluginCollection\n\tmutexPlugins.RUnlock()\n\n\ttemplateList := []string{v.Name}\n\n\t// List of template names\n\t/*templateList := make([]string, 0)\n\ttemplateList = append(templateList, rootTemplate)\n\ttemplateList = append(templateList, v.Name)\n\ttemplateList = append(templateList, childTemplates...)*/\n\n\t// Loop through each template and test the full path\n\tfor i, name := range templateList {\n\t\t// Get the absolute path of the root template\n\t\tpath, err := filepath.Abs(v.Folder + string(os.PathSeparator) + name + \".\" + v.Extension)\n\t\tif err != nil {\n\t\t\thttp.Error(w, \"Template Path Error: \"+err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\ttemplateList[i] = path\n\t}\n\n\t// Determine if there is an error in the template syntax\n\ttemplates, err := template.New(v.Name).Funcs(pc).ParseFiles(templateList...)\n\n\tif err != nil {\n\t\thttp.Error(w, \"Template Parse Error: \"+err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\t// Cache the template collection\n\t/*mutex.Lock()\n\ttemplateCollection[v.Name] = templates\n\tmutex.Unlock()*/\n\n\t// Save the template collection\n\ttc := templates\n\n\t// Get session\n\tsess := session.Instance(v.request)\n\n\t// Get the flashes for the template\n\tif flashes := sess.Flashes(); len(flashes) > 0 {\n\t\tv.Vars[\"flashes\"] = make([]Flash, len(flashes))\n\t\tfor i, f := range flashes {\n\t\t\tswitch f.(type) {\n\t\t\tcase Flash:\n\t\t\t\tv.Vars[\"flashes\"].([]Flash)[i] = f.(Flash)\n\t\t\tdefault:\n\t\t\t\tv.Vars[\"flashes\"].([]Flash)[i] = Flash{f.(string), \"alert-box\"}\n\t\t\t}\n\n\t\t}\n\t\tsess.Save(v.request, w)\n\t}\n\n\t// Display the content to the screen\n\terr = tc.Funcs(pc).ExecuteTemplate(w, v.Name+\".\"+v.Extension, v.Vars)\n\n\tif err != nil {\n\t\thttp.Error(w, \"Template File Error: \"+err.Error(), http.StatusInternalServerError)\n\t}\n}", "func homeHandler(env *Env, w http.ResponseWriter, r *http.Request) error {\n return renderTemplate(w, \"index\", \"base\", nil)\n}", "func (h HomepageView) Template() string {\n\treturn config.Path.Views + \"/homepage.gohtml\"\n}", "func (ctx *Context) Render(bytes []byte) {\n\t//debug\n\t//fmt.Println(\"response msg = \", string(bytes))\n\tctx.Writer.WriteHeader(200)\n\t_, err := ctx.Writer.Write(bytes)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}", "func RenderTemplate(w http.ResponseWriter, request *http.Request, tmpl string, templateData *models.TemplateData) error {\n\tvar templateCache map[string]*template.Template\n\n\t// if UseCache is true then read the info from the template cache\n\tif app.UseCache {\n\t\t// get the template cache from the app config\n\t\ttemplateCache = app.TemplateCache\n\t} else { // otherwise rebuild the template cache\n\t\ttemplateCache, _ = CreateTemplateCache()\n\t}\n\n\tt, ok := templateCache[tmpl]\n\tif !ok {\n\t\treturn errors.New(\"could not get template from cache\")\n\t}\n\n\t// holds bytes (parsed template)\n\tbuf := new(bytes.Buffer)\n\n\t// add data that should be available to all pages\n\ttemplateData = AddDefaultData(templateData, request)\n\n\t// renders the page\n\t_ = t.Execute(buf, templateData)\n\n\t_, err := buf.WriteTo(w)\n\tif err != nil {\n\t\tfmt.Println(\"Error writing template to browser\")\n\t\treturn err\n\t}\n\n\treturn nil\n\n\t//parsedTemplate, _ := template.ParseFiles(\"./templates/\" + tmpl)\n\t//err = parsedTemplate.Execute(w, nil)\n\t//if err != nil {\n\t//\tfmt.Println(\"error parsing template:\", err)\n\t//\treturn\n\t//}\n}", "func renderTemplate(w io.Writer, name string, data interface{}) {\n\tif err := templates.ExecuteTemplate(w, name+\".html\", data); err != nil {\n\t\tpanic(err)\n\t}\n}", "func Render(w io.Writer, template string, data interface{}) error {\n\tif err := renderer.HTML(w, 0, template, data); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (r *innerRenderer) Render(w io.Writer, data interface{}, ctx Context, tpl ...string) error {\n\tt, err := r.parseFiles(tpl...)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn t.Execute(w, data)\n}", "func (site *Site) renderPages() error {\n\troot := site.Pages[\"\"]\n\n\tfor _, page := range site.Pages {\n\t\tnav := buildNav(root, page)\n\t\tnav.Active = root == page // override index.md active\n\n\t\tdata := PageData{\n\t\t\tNav: nav,\n\t\t\tFront: &page.FrontMatter,\n\t\t\tPage: page,\n\t\t}\n\n\t\tvar buf bytes.Buffer\n\t\tif page.Template != \"\" {\n\t\t\tsiteTempl, err := site.BaseTemplate.Clone()\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"template clone failed: %w\", err)\n\t\t\t}\n\t\t\tt, err := siteTempl.Parse(string(page.Template))\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"template parse failed: %w\", err)\n\t\t\t}\n\t\t\tif err := t.ExecuteTemplate(&buf, \"root\", data); err != nil {\n\t\t\t\treturn fmt.Errorf(\"template failed: %w\", err)\n\t\t\t}\n\t\t} else {\n\t\t\tif err := site.Template.ExecuteTemplate(&buf, \"root\", data); err != nil {\n\t\t\t\treturn fmt.Errorf(\"template failed: %w\", err)\n\t\t\t}\n\t\t}\n\t\tpage.Rendered = buf.Bytes()\n\n\t\tif page.RSS {\n\t\t\tvar err error\n\t\t\tpage.RenderedRSS, err = site.renderRSS(data)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"rss rendering failed: %w\", err)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func Render(w http.ResponseWriter, name string, d map[string]interface{}) {\n\tif e := tpl.ExecuteTemplate(w, name, d); e != nil {\n\t\thttp.Error(w, e.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n}", "func render(template string, def definition, params map[string]interface{}) (string, error) {\n\tctx := plush.NewContext()\n\tctx.Set(\"camelize_down\", camelizeDown)\n\tctx.Set(\"def\", def)\n\tctx.Set(\"params\", params)\n\ts, err := plush.Render(string(template), ctx)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn s, nil\n}", "func (t Tmpl) RenderTemplate(w http.ResponseWriter, req *http.Request, name string, args map[string]interface{}) {\n\t// Check if app is running on dev mode\n\tif Config.Configuration.IsDev() {\n\n\t\t// Lock mutex\n\t\tt.rw.Lock()\n\t\tdefer t.rw.Unlock()\n\n\t\t// Create new template\n\t\tt = NewTemplate(\"castro\")\n\n\t\t// Set template FuncMap\n\t\tt.Tmpl.Funcs(FuncMap)\n\n\t\t// Reload all templates\n\t\tif err := t.LoadTemplates(\"views/\"); err != nil {\n\t\t\tLogger.Logger.Error(err.Error())\n\t\t\treturn\n\t\t}\n\n\t\t// Reload all templates\n\t\tif err := t.LoadTemplates(\"pages/\"); err != nil {\n\t\t\tLogger.Logger.Error(err.Error())\n\t\t\treturn\n\t\t}\n\t}\n\n\t// Check if args is a valid map\n\tif args == nil {\n\t\targs = map[string]interface{}{}\n\t}\n\n\t// Load microtime from the microtimeHandler\n\tmicrotime, ok := req.Context().Value(\"microtime\").(time.Time)\n\tif !ok {\n\t\tw.WriteHeader(500)\n\t\tw.Write([]byte(\"Cannot read microtime value\"))\n\t\treturn\n\t}\n\n\t// Get csrf token\n\ttkn, ok := req.Context().Value(\"csrf-token\").(*models.CsrfToken)\n\tif !ok {\n\t\tw.WriteHeader(500)\n\t\tw.Write([]byte(\"Cannot read csrf token value\"))\n\t\treturn\n\t}\n\n\t// Get nonce value\n\tnonce, ok := req.Context().Value(\"nonce\").(string)\n\n\tif !ok {\n\t\tw.WriteHeader(500)\n\t\tw.Write([]byte(\"Cannot read nonce value\"))\n\t\treturn\n\t}\n\n\t// Set nonce value\n\targs[\"nonce\"] = nonce\n\n\t// Set token value\n\targs[\"csrfToken\"] = tkn.Token\n\n\t// Set microtime value\n\targs[\"microtime\"] = fmt.Sprintf(\"%9.4f seconds\", time.Since(microtime).Seconds())\n\n\t// Render template and log error\n\tif err := t.Tmpl.ExecuteTemplate(w, name, args); err != nil {\n\t\tLogger.Logger.Error(err.Error())\n\t}\n}", "func Render(w http.ResponseWriter, name string, data interface{}) error {\n\terr := tpl.ExecuteTemplate(w, name, data)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (c *Controller) Render(context ...interface{}) Result {\n\tctx, err := c.buildContext(context)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tc.setContentTypeIfNotExists(\"text/html\")\n\tformat := MimeTypeFormats.Get(c.Response.ContentType)\n\tif format == \"\" {\n\t\tpanic(fmt.Errorf(\"unknown Content-Type: %v\", c.Response.ContentType))\n\t}\n\tt := appConfig.templateMap.Get(appConfig.AppName, c.Layout, c.Name, format)\n\tif t == nil {\n\t\tpanic(errors.New(\"no such template: \" + appConfig.templateMap.Ident(appConfig.AppName, c.Layout, c.Name, format)))\n\t}\n\tvar buf bytes.Buffer\n\tif err := t.Execute(&buf, ctx); err != nil {\n\t\tpanic(err)\n\t}\n\treturn &resultContent{\n\t\tBody: &buf,\n\t}\n}", "func (t *TemplateRenderer) Render(w io.Writer, name string, c echo.Context) error {\n\treturn t.templates.ExecuteTemplate(w, name, nil)\n}", "func (c *Controller) Render() error {\n\tif !c.EnableRender {\n\t\treturn nil\n\t}\n\trb, err := c.RenderBytes()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif c.Ctx.ResponseWriter.Header().Get(\"Content-Type\") == \"\" {\n\t\tc.Ctx.Output.Header(\"Content-Type\", \"text/html; charset=utf-8\")\n\t}\n\n\treturn c.Ctx.Output.Body(rb)\n}", "func renderTemplate(w http.ResponseWriter, name string, data interface{}) {\n\tif err := Tmpl.Render(w, name, data); err != nil {\n\t\thttpError(w, 500, err)\n\t}\n}", "func renderTemplate(w http.ResponseWriter, name string, data interface{}) {\n\tif err := Tmpl.Render(w, name, data); err != nil {\n\t\thttpError(w, 500, err)\n\t}\n}", "func StreamPageTemplate(qw422016 *qt422016.Writer, p Page) {\n\t//line base.qtpl:13\n\tqw422016.N().S(`\n<html>\n\t<head>\n\t\t<title>`)\n\t//line base.qtpl:16\n\tp.StreamTitle(qw422016)\n\t//line base.qtpl:16\n\tqw422016.N().S(`</title>\n\t</head>\n\t<body>\n\t\t<div>\n\t\t\t<a href=\"/\">return to main page</a>\n\t\t</div>\n\t\t`)\n\t//line base.qtpl:22\n\tp.StreamBody(qw422016)\n\t//line base.qtpl:22\n\tqw422016.N().S(`\n\t</body>\n</html>\n`)\n//line base.qtpl:25\n}", "func Render(param ...interface{}) {\n\n\tt, err := template.ParseFiles(param[1].(string))\n\tcheckErr(err)\n\tif len(param) == 2 {\n\t\terr = t.Execute(param[0].(http.ResponseWriter), nil)\n\t\tcheckErr(err)\n\t} else if len(param) == 3 {\n\t\terr = t.Execute(param[0].(http.ResponseWriter), param[2].(User))\n\t\tcheckErr(err)\n\t}\n\t//func Render(w http.ResponseWriter, url string) {\n\n}", "func (t *TemplateRenderer) Render(w io.Writer, name string, data interface{}, c echo.Context) error {\n\treturn t.templates.ExecuteTemplate(w, name, data)\n}", "func (c *Context) Render(status int, tmpl string, data interface{}) error {\n\tt, found := c.M.opt.Templates[tmpl]\n\tif !found {\n\t\t// TODO: might want to show \"invalid template name: the_name.html\" instead\n\t\treturn ErrInvalidTemplate\n\t}\n\t// If there's any errors in the template we'll catch them here using a bytes.Buffer and don't risk messing up\n\t// the output to the client (by writing directly to context.W too soon). Using a pool should speed things up\n\t// too (and play nicer with the GC etc. etc.).\n\tbuff := c.M.getTemplateBuff()\n\tdefer c.M.putTemplateBuff(buff)\n\tif err := t.Execute(buff, data); err != nil {\n\t\treturn err\n\t}\n\n\tc.SetHeader(\"Content-Type\", \"text/html; charset=utf-8\")\n\tc.W.WriteHeader(status)\n\t_, err := buff.WriteTo(c.W)\n\treturn err\n}", "func (t *echoTempl) Render(\n\tw io.Writer,\n\tname string,\n\tdata interface{},\n\tc echo.Context,\n) error {\n\treturn t.templates.ExecuteTemplate(w, name, data)\n}", "func (p *Home) Render() vecty.ComponentOrHTML {\n\treturn elem.Body(p.perspectiveList)\n}", "func (t Tmpl) Render(wr io.Writer, name string, args interface{}) error {\n\t// Check if app is running on dev mode\n\tif Config.Configuration.IsDev() {\n\n\t\t// Lock mutex\n\t\tt.rw.Lock()\n\t\tdefer t.rw.Unlock()\n\n\t\t// Reload all templates\n\t\tif err := t.LoadTemplates(\"views/\"); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Execute template and return error\n\treturn t.Tmpl.ExecuteTemplate(wr, name, args)\n}", "func (t Tmpl) Render(wr io.Writer, name string, args interface{}) error {\n\t// Check if app is running on dev mode\n\tif Config.Configuration.IsDev() {\n\n\t\t// Lock mutex\n\t\tt.rw.Lock()\n\t\tdefer t.rw.Unlock()\n\n\t\t// Reload all templates\n\t\tif err := t.LoadTemplates(\"views/\"); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Execute template and return error\n\treturn t.Tmpl.ExecuteTemplate(wr, name, args)\n}", "func (t *Renderer) Render(\n\tw io.Writer,\n\tname string,\n\tdata interface{},\n\tc echo.Context,\n) error {\n\tif t.debug {\n\t\tt.ReloadTemplates()\n\t}\n\n\treturn t.template.ExecuteTemplate(w, name, data)\n}", "func (t *TemplateManager) Render(w io.Writer, name string, data interface{}) error {\n\tstack := []*Template{}\n\ttplSrc, err := t.getSrc(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = t.push(&stack, tplSrc, name)\n\tif err != nil {\n\t\treturn err\n\t}\n\ttpl, err := t.assemble(stack)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif tpl == nil {\n\t\treturn Errorf(\"Nil template named %s\", name)\n\t}\n\n\terr = tpl.Execute(w, data)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (w *Widget) Render(context *Context, file string) template.HTML {\n\tvar err error\n\tvar result = bytes.NewBufferString(\"\")\n\tif file == \"\" {\n\t\tfile = w.Templates[0]\n\t}\n\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\terr = fmt.Errorf(\"Get error when render file %v: %v\", file, r)\n\t\t\tutils.ExitWithMsg(err)\n\t\t}\n\t}()\n\n\tif file, err = w.findTemplate(file + \".tmpl\"); err == nil {\n\t\tif tmpl, err := template.New(filepath.Base(file)).ParseFiles(file); err == nil {\n\t\t\tif err = tmpl.Execute(result, context.Options); err == nil {\n\t\t\t\treturn template.HTML(result.String())\n\t\t\t}\n\t\t}\n\t}\n\n\treturn template.HTML(err.Error())\n}", "func (t *TemplateRenderer) Render(w io.Writer, name string, data interface{}, c echo.Context) error {\n\n\t// Add global methods if data is a map\n\tif viewContext, isMap := data.(map[string]interface{}); isMap {\n\t\tviewContext[\"reverse\"] = c.Echo().Reverse\n\t}\n\n\treturn t.templates.ExecuteTemplate(w, name, data)\n}", "func (t *TemplateRenderer) Render(w io.Writer, name string, data interface{}, c echo.Context) error {\n\n\t// Add global methods if data is a map\n\tif viewContext, isMap := data.(map[string]interface{}); isMap {\n\t\tviewContext[\"reverse\"] = c.Echo().Reverse\n\t}\n\n\treturn t.templates.ExecuteTemplate(w, name, data)\n}", "func (t *TemplateRenderer) Render(w io.Writer, name string, data interface{}, c echo.Context) error {\n\n\t// Add global methods if data is a map\n\tif viewContext, isMap := data.(map[string]interface{}); isMap {\n\t\tviewContext[\"reverse\"] = c.Echo().Reverse\n\t}\n\n\treturn t.templates.ExecuteTemplate(w, name, data)\n}", "func (t *TemplateRenderer) Render(w io.Writer, name string, data interface{}, c echo.Context) error {\n\n\t// Add global methods if data is a map\n\tif viewContext, isMap := data.(map[string]interface{}); isMap {\n\t\tviewContext[\"reverse\"] = c.Echo().Reverse\n\t}\n\n\treturn t.templates.ExecuteTemplate(w, name, data)\n}", "func (t *TemplateRenderer) Render(w io.Writer, name string, data interface{}, c echo.Context) error {\n\n\t// Add global methods if data is a map\n\tif viewContext, isMap := data.(map[string]interface{}); isMap {\n\t\tviewContext[\"reverse\"] = c.Echo().Reverse\n\t}\n\n\treturn t.templates.ExecuteTemplate(w, name, data)\n}" ]
[ "0.7419679", "0.69988775", "0.6976363", "0.6867108", "0.676085", "0.6621656", "0.65539324", "0.65483695", "0.65003127", "0.6480978", "0.6468482", "0.6449129", "0.6338193", "0.6334071", "0.6327223", "0.6316062", "0.6288715", "0.6266251", "0.62608874", "0.6256042", "0.62559956", "0.62442917", "0.62175876", "0.619516", "0.61851364", "0.6183082", "0.61752915", "0.61618924", "0.6133866", "0.61243147", "0.612254", "0.611943", "0.6108829", "0.6098202", "0.60745597", "0.6056177", "0.6043818", "0.60392773", "0.60215694", "0.60214555", "0.6010839", "0.60072106", "0.600231", "0.6001955", "0.599689", "0.5990715", "0.5988489", "0.5978335", "0.5978335", "0.5963481", "0.59503", "0.5947878", "0.5929218", "0.5925707", "0.59163934", "0.5906644", "0.5904572", "0.58916265", "0.5887906", "0.5869236", "0.5867768", "0.5861516", "0.58579236", "0.58552736", "0.5850284", "0.5849592", "0.58449537", "0.5840302", "0.583944", "0.58366317", "0.58191353", "0.5817893", "0.58099526", "0.57969713", "0.5796698", "0.57949835", "0.5791694", "0.5789219", "0.5787309", "0.57767534", "0.5755344", "0.57498235", "0.57413197", "0.57413197", "0.57309216", "0.5728792", "0.57275945", "0.5714236", "0.5709224", "0.57091624", "0.5708587", "0.5708587", "0.57047755", "0.5702965", "0.5692015", "0.56862134", "0.56862134", "0.56862134", "0.56862134", "0.56862134" ]
0.6413044
12
public static native int floatToRawIntBits(float value);
func floatToRawIntBits(frame *rtda.Frame) { floatVal := frame.LocalVars().GetFloat(0) frame.OperandStack().PushInt(int32(math.Float32bits(floatVal))) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func JDK_java_lang_Float_floatToRawIntBits(value Float) Int {\n\tbits := math.Float32bits(float32(value))\n\treturn Int(int32(bits))\n}", "func float32bits(f float32) uint32 { return *(*uint32)(unsafe.Pointer(&f)) }", "func floatBits(f float64) uint64 {\n\t// Take f parameter and determine bit pattern.\n\t// Translate bit pattern into a value of type uint64\n\ti := *(*uint64)(unsafe.Pointer(&f))\n\t//fmt.Printf(\"strconv.FormatUint: %v\\n\", strconv.FormatUint(i, 2))\n\t// Return new value\n\treturn i\n}", "func Floatbits(tk obj.Token, args []oop.VarDef) oop.Val {\n\tval := args[0].Val\n\tif val.Type != oop.Int && val.Type != oop.Float {\n\t\tval.Data = 0.0\n\t}\n\tf := val.Data.(float64)\n\treturn oop.Val{Data: float64(*(*uint64)(unsafe.Pointer(&f))), Type: oop.Int}\n}", "func (f Float) Bits() (a, b uint64) {\n\treturn math.Float64bits(f.high), math.Float64bits(f.low)\n}", "func Floatfrombits(tk obj.Token, args []oop.VarDef) oop.Val {\n\tval := args[0].Val\n\tif val.Type != oop.Int {\n\t\tval.Data = 0.0\n\t}\n\tb := uint64(val.Data.(float64))\n\treturn oop.Val{Data: *(*float64)(unsafe.Pointer(&b)), Type: oop.Float}\n}", "func main() {\n\n\tvar f float64 = 1.0\n\tfmt.Printf(\"unsafe.Pointer: %064b\\n\", unsafe.Pointer(&f))\n\tfb := floatBits(f)\n\t// Print bit pattern of float64\n\tfmt.Printf(\"uint64 bit pattern: %064b\\n\", fb)\n\tfmt.Printf(\"uint64 value: %d\\n\", fb)\n}", "func IeeeFloatToInt(b [10]byte) int {\n\tvar i uint32\n\t// Negative number\n\tif (b[0] & 0x80) == 1 {\n\t\treturn 0\n\t}\n\n\t// Less than 1\n\tif b[0] <= 0x3F {\n\t\treturn 1\n\t}\n\n\t// Too big\n\tif b[0] > 0x40 {\n\t\treturn 67108864\n\t}\n\n\t// Still too big\n\tif b[0] == 0x40 && b[1] > 0x1C {\n\t\treturn 800000000\n\t}\n\n\ti = (uint32(b[2]) << 23) | (uint32(b[3]) << 15) | (uint32(b[4]) << 7) | (uint32(b[5]) >> 1)\n\ti >>= (29 - uint32(b[1]))\n\n\treturn int(i)\n}", "func Float64From32Bits(f float64) float64 {\n\tif f < 0 {\n\t\treturn 0\n\t}\n\treturn f\n}", "func (f Float) Bits() uint16 {\n\treturn f.bits\n}", "func floatToFix(x float32) int32 {\n\treturn int32(x * 64.0)\n}", "func IntToIeeeFloat(i int) [10]byte {\n\tb := [10]byte{}\n\tnum := float64(i)\n\n\tvar sign int\n\tvar expon int\n\tvar fMant, fsMant float64\n\tvar hiMant, loMant uint\n\n\tif num < 0 {\n\t\tsign = 0x8000\n\t} else {\n\t\tsign = 0\n\t}\n\n\tif num == 0 {\n\t\texpon = 0\n\t\thiMant = 0\n\t\tloMant = 0\n\t} else {\n\t\tfMant, expon = math.Frexp(num)\n\t\tif (expon > 16384) || !(fMant < 1) { /* Infinity or NaN */\n\t\t\texpon = sign | 0x7FFF\n\t\t\thiMant = 0\n\t\t\tloMant = 0 /* infinity */\n\t\t} else { /* Finite */\n\t\t\texpon += 16382\n\t\t\tif expon < 0 { /* denormalized */\n\t\t\t\tfMant = math.Ldexp(fMant, expon)\n\t\t\t\texpon = 0\n\t\t\t}\n\t\t\texpon |= sign\n\t\t\tfMant = math.Ldexp(fMant, 32)\n\t\t\tfsMant = math.Floor(fMant)\n\t\t\thiMant = uint(fsMant)\n\t\t\tfMant = math.Ldexp(fMant-fsMant, 32)\n\t\t\tfsMant = math.Floor(fMant)\n\t\t\tloMant = uint(fsMant)\n\t\t}\n\t}\n\n\tb[0] = byte(expon >> 8)\n\tb[1] = byte(expon)\n\tb[2] = byte(hiMant >> 24)\n\tb[3] = byte(hiMant >> 16)\n\tb[4] = byte(hiMant >> 8)\n\tb[5] = byte(hiMant)\n\tb[6] = byte(loMant >> 24)\n\tb[7] = byte(loMant >> 16)\n\tb[8] = byte(loMant >> 8)\n\tb[9] = byte(loMant)\n\n\treturn b\n}", "func PtrFloat(v float32) *float32 { return &v }", "func (f Float) Bits() (a, b uint64) {\n\treturn f.a, f.b\n}", "func FloatToInt(f float64) (int64, NumberType) {\n\tn := int64(f)\n\tif float64(n) == f {\n\t\treturn n, IsInt\n\t}\n\treturn 0, NaI\n}", "func ConvertFloatToIntBSV(floatValue float64) int64 {\r\n\r\n\t// Do conversion to satoshis (percentage) using decimal package to avoid float issues\r\n\t// => 1e8 * amount / currentRate\r\n\t// (use 1e8 since rate is in Bitcoin not Satoshis)\r\n\tsatoshisDecimal := decimal.NewFromInt(SatoshisPerBitcoin).Mul(decimal.NewFromFloat(floatValue))\r\n\r\n\t// Drop decimals after since can only have whole Satoshis\r\n\treturn satoshisDecimal.Ceil().IntPart()\r\n}", "func FloatSign(x *big.Float,) int", "func Float64ToBinary(v float64) []byte {\n\tvar buf [8]byte\n\tbinary.BigEndian.PutUint64(buf[:], math.Float64bits(v))\n\treturn buf[:]\n}", "func (f Float) Bits() (se uint16, m uint64) {\n\treturn f.se, f.m\n}", "func PtrFloat32(v float32) *float32 { return &v }", "func Int16Tof32(buff []byte) float32 {\n\treturn float32(int16(binary.LittleEndian.Uint16(buff)))\n}", "func (p Pointer) Float32BE(offset int) float32 {\n\treturn float32(bits.ReverseBytes32(*(*uint32)(unsafe.Pointer(uintptr(int(p) + offset)))))\n}", "func fixToFloat(x int32) float32 {\n\treturn float32(x>>6) + float32(x&0x3f)/0x3f\n}", "func (num Fixed32) ToFloat32() float32 {\n\treturn float32(num) / 65536.0\n}", "func FloatSignbit(x *big.Float,) bool", "func cv2f64(v interface{}) float64 {\r\n\r\n\tif v == nil {\r\n\t\treturn 0\r\n\t}\r\n\tswitch n := v.(type) {\r\n\tcase uint8:\r\n\t\treturn float64(n)\r\n\tcase uint16:\r\n\t\treturn float64(n)\r\n\tcase uint32:\r\n\t\treturn float64(n)\r\n\tcase uint64:\r\n\t\treturn float64(n)\r\n\tcase uint:\r\n\t\treturn float64(n)\r\n\tcase int8:\r\n\t\treturn float64(n)\r\n\tcase int16:\r\n\t\treturn float64(n)\r\n\tcase int32:\r\n\t\treturn float64(n)\r\n\tcase int64:\r\n\t\treturn float64(n)\r\n\tcase int:\r\n\t\treturn float64(n)\r\n\tcase string:\r\n\t\tsv, err := strconv.ParseFloat(n, 64)\r\n\t\tif err == nil {\r\n\t\t\treturn sv\r\n\t\t}\r\n\t\treturn 0\r\n\tdefault:\r\n\t\treturn 0\r\n\t}\r\n}", "func NewFromBits(a, b uint64) Float {\n\treturn Float{a: a, b: b}\n}", "func NewFromBits(a, b uint64) Float {\n\treturn Float{a: a, b: b}\n}", "func opI64ToF32(expr *CXExpression, fp int) {\n\toutB0 := float32(ReadI64(fp, expr.Inputs[0]))\n\tWriteF32(GetOffset_i32(fp, expr.Outputs[0]), outB0)\n}", "func opUI64ToF32(prgrm *CXProgram) {\n\texpr := prgrm.GetExpr()\n\tfp := prgrm.GetFramePointer()\n\n\toutV0 := float32(ReadUI64(fp, expr.Inputs[0]))\n\tWriteF32(GetFinalOffset(fp, expr.Outputs[0]), outV0)\n}", "func BytesToFloat(b []byte) float32 {\n\tif b == nil {\n\t\treturn 0.0\n\t} else {\n\t\treturn *(*float32)(unsafe.Pointer(&b[0]))\n\t}\n}", "func (in *InBuffer) ReadFloat32BE() float32 {\n\treturn math.Float32frombits(in.ReadUint32BE())\n}", "func (in *InBuffer) ReadFloat32BE() float32 {\n\treturn math.Float32frombits(in.ReadUint32BE())\n}", "func WriteFloat(buffer []byte, offset int, value float32) {\n WriteUInt32(buffer, offset, math.Float32bits(value))\n}", "func BinaryToFloat64(bytes []byte) float64 {\n\tbits := binary.BigEndian.Uint64(bytes)\n\tfloat := math.Float64frombits(bits)\n\treturn float\n}", "func opI8ToF32(expr *CXExpression, fp int) {\n\toutV0 := float32(ReadI8(fp, expr.Inputs[0]))\n\tWriteF32(GetOffset_f32(fp, expr.Outputs[0]), outV0)\n}", "func fltToFloat32(k string, x gosmc.SMCBytes, size uint32) (float32, error) {\n\treturn math.Float32frombits(binary.LittleEndian.Uint32(x[:size])), nil\n}", "func (g *GLTF) bytesToArrayF32(data []byte, componentType, count int) (math32.ArrayF32, error) {\n\n\t// If component is UNSIGNED_INT nothing to do\n\tif componentType == UNSIGNED_INT {\n\t\tarr := (*[1 << 30]float32)(unsafe.Pointer(&data[0]))[:count]\n\t\treturn math32.ArrayF32(arr), nil\n\t}\n\n\t// Converts UNSIGNED_SHORT or SHORT to UNSIGNED_INT\n\tif componentType == UNSIGNED_SHORT || componentType == SHORT {\n\t\tout := math32.NewArrayF32(count, count)\n\t\tfor i := 0; i < count; i++ {\n\t\t\tout[i] = float32(data[i*2]) + float32(data[i*2+1])*256\n\t\t}\n\t\treturn out, nil\n\t}\n\n\t// Converts UNSIGNED_BYTE or BYTE to UNSIGNED_INT\n\tif componentType == UNSIGNED_BYTE || componentType == BYTE {\n\t\tout := math32.NewArrayF32(count, count)\n\t\tfor i := 0; i < count; i++ {\n\t\t\tout[i] = float32(data[i])\n\t\t}\n\t\treturn out, nil\n\t}\n\n\treturn (*[1 << 30]float32)(unsafe.Pointer(&data[0]))[:count], nil\n}", "func (buff *Bytes) ToFloat32() float32 {\r\n\treturn *(*float32)(unsafe.Pointer(&(*buff)[0]))\r\n}", "func Float32Ptr(v float32) *float32 { return &v }", "func Inf32(sign int) float32 {\n\tvar v uint32\n\tif sign >= 0 {\n\t\tv = uvinf32\n\t} else {\n\t\tv = uvneginf32\n\t}\n\treturn Float32frombits(v)\n}", "func FloatIsInt(x *big.Float,) bool", "func ReadFloat(buffer []byte, offset int) float32 {\n bits := ReadUInt32(buffer, offset)\n return math.Float32frombits(bits)\n}", "func (x *Rat) Float32() (f float32, exact bool) {}", "func opUI8ToF32(inputs []ast.CXValue, outputs []ast.CXValue) {\n\toutV0 := float32(inputs[0].Get_ui8())\n outputs[0].Set_f32(outV0)\n}", "func fixedInt26ToFloat(fixedInt fixed.Int26_6) float32 {\n\tvar result float32\n\ti := int32(fixedInt)\n\tresult += float32(i >> 6)\n\tresult += float32(i&0x003F) / float32(64.0)\n\treturn result\n}", "func boolToInt(b bool) uint32 {\n\t// Yes, unsafe. Really. There is no better way to do this, which is all\n\t// sorts of fucking braindamaged.\n\treturn uint32(*(*byte)(unsafe.Pointer(&b)))\n}", "func FloatInt(x *big.Float, z *big.Int,) (*big.Int, big.Accuracy,)", "func IntBits(x *big.Int,) []big.Word", "func M128ToFloat(src []M128) []float32 {\n\theader := *(*reflect.SliceHeader)(unsafe.Pointer(&src))\n\n\t// The length and capacity of the slice are different.\n\theader.Len *= 4\n\theader.Cap *= 4\n\n\t// Convert slice header to an []int32\n\tdst := *(*[]float32)(unsafe.Pointer(&header))\n\n\treturn dst\n}", "func (rw *RW) ToFloat32() float32 {\n\treturn *(*float32)(unsafe.Pointer(&rw.Value))\n}", "func FloatPrec(x *big.Float,) uint", "func opI16ToF32(expr *CXExpression, fp int) {\n\toutB0 := float32(ReadI16(fp, expr.Inputs[0]))\n\tWriteF32(GetOffset_f32(fp, expr.Outputs[0]), outB0)\n}", "func (i *ImageBuf) GetFloatPixels() ([]float32, error) {\n\tspec := i.Spec()\n\tsize := spec.Width() * spec.Height() * spec.Depth() * spec.NumChannels()\n\tpixels := make([]float32, size)\n\tptr := unsafe.Pointer(&pixels[0])\n\n\troi := i.ROI()\n\n\tok := bool(C.ImageBuf_get_pixel_channels(\n\t\ti.ptr,\n\t\tC.int(roi.XBegin()), C.int(roi.XEnd()),\n\t\tC.int(roi.YBegin()), C.int(roi.YEnd()),\n\t\tC.int(roi.ZBegin()), C.int(roi.ZEnd()),\n\t\tC.int(roi.ChannelsBegin()), C.int(roi.ChannelsEnd()),\n\t\t(C.TypeDesc)(TypeFloat), ptr),\n\t)\n\n\tif !ok {\n\t\treturn nil, i.LastError()\n\t}\n\truntime.KeepAlive(i)\n\n\treturn pixels, nil\n}", "func IntFromFloat64(incoming float64) (i int) {\n\ti = int(incoming)\n\treturn i\n}", "func FtoB(f int) uint64 {\n\tif f < x86.REG_X0 || f > x86.REG_X15 {\n\t\treturn 0\n\t}\n\treturn 1 << uint(f-x86.REG_X0+16)\n}", "func ToFloat32(v []byte) (float32, error) {\n\tprimitivePacket, _, _, err := DecodePrimitivePacket(v)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tvalue, err := primitivePacket.ToFloat32()\n\tif err != nil {\n\t\treturn 0, nil\n\t}\n\treturn value, nil\n}", "func (v Value) Float(bitSize int) (float64, error) {\n\tif v.typ != Number {\n\t\treturn 0, v.newError(\"%s is not a number\", v.Raw())\n\t}\n\tf, err := strconv.ParseFloat(v.Raw(), bitSize)\n\tif err != nil {\n\t\treturn 0, v.newError(\"%v\", err)\n\t}\n\treturn f, nil\n}", "func (v Float) Native() interface{} {\n\treturn float64(v)\n}", "func fpToFloat32(t string, x gosmc.SMCBytes, size uint32) (float32, error) {\n\tif v, ok := AppleFPConv[t]; ok {\n\t\tres := binary.BigEndian.Uint16(x[:size])\n\t\tif v.Signed {\n\t\t\treturn float32(int16(res)) / v.Div, nil\n\t\t} else {\n\t\t\treturn float32(res) / v.Div, nil\n\t\t}\n\t}\n\n\treturn 0.0, fmt.Errorf(\"unable to convert to float32 type %q, bytes %v to float32\", t, x)\n}", "func Uint32() uint32", "func Uint32F(name string, value uint32, usage string) *uint32 {\n\treturn Global.Uint32F(name, value, usage)\n}", "func FloatInt(val *big.Int) (out *big.Float, err error) {\n\tif err != nil {\n\t\treturn\n\t}\n\tout = new(big.Float).SetInt(val)\n\treturn\n}", "func ToFloat32(i interface{}) float32 {\n\treturn cast.ToFloat32(i)\n}", "func float642Uints(val float64) (bool, int, uint64, uint64) {\n\t//dst := make([]byte, 0, 24)\n\tvar bits uint64\n\tvar flt *floatInfo\n\tbits = math.Float64bits(val)\n\tflt = &float64info\n\n\tneg := bits>>(flt.expbits+flt.mantbits) != 0\n\texp := int(bits>>flt.mantbits) & (1<<flt.expbits - 1)\n\tmant := bits & (uint64(1)<<flt.mantbits - 1)\n\n\tswitch exp {\n\tcase 1<<flt.expbits - 1:\n\t\treturn neg, 0, 0, 0\n\tcase 0:\n\t\t// denormalized\n\t\texp++\n\tdefault:\n\t\t// add implicit top bit\n\t\tmant |= uint64(1) << flt.mantbits\n\t}\n\n\texp += flt.bias\n\n\tvar prec int\n\tvar digs decimalSlice\n\tok := false\n\t// Try Grisu3 algorithm.\n\tf := new(extFloat)\n\tlower, upper := f.AssignComputeBounds(mant, exp, neg, flt)\n\tvar buf [32]byte\n\tdigs.d = buf[:]\n\tok = f.ShortestDecimal(&digs, &lower, &upper)\n\tif !ok {\n\t\td := new(decimal)\n\t\td.Assign(mant)\n\t\td.Shift(exp - int(flt.mantbits))\n\t\tvar digs decimalSlice\n\t\troundShortest(d, mant, exp, flt)\n\t\tdigs = decimalSlice{d: d.d[:], nd: d.nd, dp: d.dp}\n\t\t// Precision for shortest representation mode.\n\t\tprec = max(digs.nd-digs.dp, 0)\n\t} else {\n\t\tprec = max(digs.nd-digs.dp, 0)\n\t}\n\t//\n\tvar integer, fraction uint64\n\n\t// integer, padded with zeros as needed.\n\tif digs.dp > 0 {\n\t\tm := min(digs.nd, digs.dp)\n\t\tfor i := 0; i < m; i++ {\n\t\t\tinteger *= 10\n\t\t\tinteger += uint64(digs.d[i]-'0')\n\t\t}\n\t\tfor ; m < digs.dp; m++ {\n\t\t\tinteger *= 10\n\t\t}\n\t}\n\n\t// fraction\n\tif prec > 0 {\n\t\tfor i := 0; i < prec; i++ {\n\t\t\tch := uint64(0)\n\t\t\tif j := digs.dp + i; 0 <= j && j < digs.nd {\n\t\t\t\tch = uint64(digs.d[j]-'0')\n\t\t\t}\n\t\t\tfraction *= 10\n\t\t\tfraction += ch\n\t\t}\n\t}\n\n\treturn neg, prec, integer, fraction\n}", "func NaN32() float32 { return Float32frombits(uvnan32) }", "func PtrFloat64(v float64) *float64 { return &v }", "func BytesToInt(b []byte) int {\n\n\tif b == nil {\n\t\treturn 0.0\n\t} else {\n\t\tvar x int\n\t\tl := uint(len(b))\n\t\tfor i := uint(0); i < l; i++ {\n\t\t\tx |= int(b[i]) << (8 * i)\n\t\t}\n\t\treturn x\n\t}\n}", "func SignFloat(x float32) float32 {\r\n\tif x > 0 {\r\n\t\treturn 1\r\n\t} else if x < 0 {\r\n\t\treturn -1\r\n\t} else {\r\n\t\treturn 0\r\n\t}\r\n}", "func (x *Float) Signbit() bool {}", "func floatToInt(o float64) (int, bool) {\n\ti := int(o)\n\tif float64(i) == o {\n\t\treturn i, true\n\t}\n\treturn 0, false\n}", "func ConvertLinearTo8Bit(v float32) uint8 {\n\treturn uint8(math.Round(linearToEncoded(float64(v)) * 255))\n}", "func JDK_jang_lang_Double_doubleToRawLongBits(value Double) Long {\n\tbits := math.Float64bits(float64(value))\n\treturn Long(int64(bits))\n}", "func (v Value) Float() float64 {\n\tswitch {\n\tcase v == 0:\n\t\treturn 0\n\tcase v == 64:\n\t\treturn 0.5\n\tcase v == 127:\n\t\treturn 1\n\tcase v < 64:\n\t\treturn float64(v) / 128\n\tdefault:\n\t\treturn float64(v-1) / 126\n\t}\n}", "func (f Float) Float32() float32 {\n\tpanic(\"not yet implemented\")\n}", "func (p Pointer) Float64BE(offset int) float64 {\n\treturn float64(bits.ReverseBytes64(*(*uint64)(unsafe.Pointer(uintptr(int(p) + offset)))))\n}", "func (n *eeNum) float64() *float64 { return (*float64)(unsafe.Pointer(&n.data)) }", "func NewFromBits(a, b uint64) Float {\n\thigh := math.Float64frombits(a)\n\tlow := math.Float64frombits(b)\n\treturn Float{\n\t\thigh: high,\n\t\tlow: low,\n\t}\n}", "func f8(ctx *Context, l0 int32) int32 {\n\treturn ctx.f.F8(ctx, l0)\n}", "func IntMaxSignedValue(b int) int {\n\tswitch b {\n\tcase 8:\n\t\treturn 255 / 2\n\tcase 16:\n\t\treturn 65535 / 2\n\tcase 24:\n\t\treturn 16777215 / 2\n\tcase 32:\n\t\treturn 4294967295 / 2\n\tdefault:\n\t\treturn 0\n\t}\n}", "func Vflt32_byte(input []byte, inputStride int, output []float32, outputStride int) {\n\tC.vDSP_vflt32((*C.int)(unsafe.Pointer(&input[0])), C.vDSP_Stride(inputStride), (*C.float)(&output[0]), C.vDSP_Stride(outputStride), minLen(len(input)/(4*inputStride), len(output)/outputStride))\n}", "func boolToFloat(b bool) float64 {\n\tif b {\n\t\treturn 1\n\t}\n\treturn 0\n}", "func ByteToFloat32(bytes []byte) float32 {\n\tbits := binary.LittleEndian.Uint32(bytes)\n\treturn math.Float32frombits(bits)\n}", "func Inf(sign int) float32 {\n\tvar v uint32\n\tif sign >= 0 {\n\t\tv = uvinf\n\t} else {\n\t\tv = uvneginf\n\t}\n\treturn Float32frombits(v)\n}", "func modf(f float64) (int float64, frac float64) {\n\tif f < 1 {\n\t\tswitch {\n\t\tcase f < 0:\n\t\t\tint, frac = Modf(-f)\n\t\t\treturn -int, -frac\n\t\tcase f == 0:\n\t\t\treturn f, f // Return -0, -0 when f == -0\n\t\t}\n\t\treturn 0, f\n\t}\n\n\tx := Float64bits(f)\n\te := uint(x>>shift)&mask - bias\n\n\t// Keep the top 12+e bits, the integer part; clear the rest.\n\tif e < 64-12 {\n\t\tx &^= 1<<(64-12-e) - 1\n\t}\n\tint = Float64frombits(x)\n\tfrac = f - int\n\treturn\n}", "func NewFromBits(bits uint16) Float {\n\treturn Float{bits: bits}\n}", "func toFloat32Slice(i interface{}) []float32 {\n\tv, _ := toFloat32SliceE(i)\n\treturn v\n}", "func FastrandUint32() uint32", "func opUI16ToF32(prgrm *CXProgram) {\n\texpr := prgrm.GetExpr()\n\tfp := prgrm.GetFramePointer()\n\n\toutV0 := float32(ReadUI16(fp, expr.Inputs[0]))\n\tWriteF32(GetFinalOffset(fp, expr.Outputs[0]), outV0)\n}", "func (tv *TypedFloat) Float32() float32 {\n\tif len(tv.Bytes) == 0 {\n\t\treturn 0.0\n\t}\n\tvar value big.Float\n\t_ = value.GobDecode(tv.Bytes)\n\tflt32, _ := value.Float32()\n\treturn flt32\n}", "func (e *Encoder) Float64(v float64) (int, error) {\n\treturn e.uint64(math.Float64bits(v))\n}", "func FloatSetInt(z *big.Float, x *big.Int,) *big.Float", "func (r *Reader) Float32() float32 {\n\treturn math.Float32frombits(r.Uint32())\n}", "func DoubleToByteCode(d float64) (uint32, uint32) {\n\tu64 := math.Float64bits(d)\n\treturn uint32(u64 & 0xFFFFFFFF), uint32(u64 >> 32)\n}", "func softfloat_commonNaNToExtF80UI(aPtr *commonNaN) Uint128 {\n\tvar uiZ Uint128\n\tif aPtr.sign {\n\t\tuiZ.High = uint64(1)<<15 | 0x7FFF\n\t} else {\n\t\tuiZ.High = 0x7FFF\n\t}\n\n\tuiZ.Low = uint64(0xC000000000000000) | aPtr.v64>>1\n\treturn uiZ\n}", "func GetFloatField(env *C.JNIEnv, obj C.jobject, fieldID C.jfieldID) C.jfloat {\n\treturn C._GoJniGetFloatField(env, obj, fieldID)\n}", "func getPixVal(c color.Color) float64 {\n\tr, _, _, _ := c.RGBA()\n\treturn float64(r >> 8)\n}", "func (b Bytes) ToFloat64() float64 {\n\treturn math.Float64frombits(binary.LittleEndian.Uint64(b))\n}", "func ConvertToFloat32(value interface{}) (float32, bool) {\n\tif v, ok := ConvertToFloat64(value); ok {\n\t\tif !reflect.ValueOf(float32(0)).OverflowFloat(v) {\n\t\t\treturn float32(v), true\n\t\t}\n\t}\n\treturn 0, false\n}", "func (in *InBuffer) ReadFloat64BE() float64 {\n\treturn math.Float64frombits(in.ReadUint64BE())\n}" ]
[ "0.8932094", "0.73746973", "0.7082981", "0.6395846", "0.6257228", "0.6139659", "0.60942435", "0.6084789", "0.60047704", "0.5823916", "0.5806024", "0.57731223", "0.57533044", "0.5684423", "0.5669247", "0.5609524", "0.5561731", "0.55265313", "0.5512581", "0.54936963", "0.5460315", "0.5414458", "0.5402921", "0.54023385", "0.5401935", "0.53868407", "0.5356694", "0.5356694", "0.5350094", "0.53307885", "0.5317693", "0.5316089", "0.5316089", "0.5311094", "0.53085756", "0.52906525", "0.5290011", "0.5287572", "0.5286471", "0.52726585", "0.526972", "0.5267605", "0.52672875", "0.52597845", "0.5259669", "0.52425647", "0.5228991", "0.5226879", "0.5222364", "0.5215365", "0.52056533", "0.52052385", "0.52051866", "0.519426", "0.51899695", "0.51617074", "0.51528007", "0.51525855", "0.5146826", "0.5131929", "0.51175433", "0.5106973", "0.51027834", "0.5094596", "0.50910556", "0.5088495", "0.5070294", "0.5064761", "0.5054601", "0.50466317", "0.5041453", "0.5035845", "0.50304455", "0.5026984", "0.50185794", "0.50141305", "0.5013961", "0.49964035", "0.49931726", "0.499115", "0.4984552", "0.49655232", "0.4962759", "0.49605182", "0.49520722", "0.49484813", "0.49363077", "0.49348587", "0.49291062", "0.49265018", "0.49256027", "0.49204266", "0.49131343", "0.4912414", "0.4908537", "0.49029002", "0.4898729", "0.4896158", "0.48898637", "0.48861533" ]
0.82935
1
Describe writes all the descriptors to the prometheus desc channel.
func (collector *MetricsCollector) Describe(ch chan<- *prometheus.Desc) { for k := range collector.metrics { for idxMColl := range collector.metrics[k] { ch <- collector.metrics[k][idxMColl].metricDesc } } collector.defMetrics.describe(ch) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (c *Collector) Describe(chan<- *prometheus.Desc) {}", "func (c *collector) Describe(ch chan<- *prometheus.Desc) {\n\tfor _, d := range descriptors {\n\t\tch <- d\n\t}\n}", "func (c *filebeatCollector) Describe(ch chan<- *prometheus.Desc) {\n\n\tfor _, metric := range c.metrics {\n\t\tch <- metric.desc\n\t}\n\n}", "func (c *collector) Describe(ch chan<- *prometheus.Desc) {\n\tc.config.getAllDescs(ch)\n}", "func (c *metricbeatCollector) Describe(ch chan<- *prometheus.Desc) {\n\n\tfor _, metric := range c.metrics {\n\t\tch <- metric.desc\n\t}\n\n}", "func (c *auditdCollector) Describe(ch chan<- *prometheus.Desc) {\n\n\tfor _, metric := range c.metrics {\n\t\tch <- metric.desc\n\t}\n\n}", "func (c *beatCollector) Describe(ch chan<- *prometheus.Desc) {\n\n\tfor _, metric := range c.metrics {\n\t\tch <- metric.desc\n\t}\n\n}", "func (o *OSDCollector) Describe(ch chan<- *prometheus.Desc) {\n\tfor _, metric := range o.collectorList() {\n\t\tmetric.Describe(ch)\n\t}\n\tch <- o.OSDDownDesc\n\tch <- o.ScrubbingStateDesc\n\tch <- o.PGObjectsRecoveredDesc\n}", "func (e *Exporter) Describe(ch chan<- *prometheus.Desc) {\n\tch <- connected\n\tch <- up\n\tch <- distance\n\tch <- latency\n\tch <- users\n\tch <- channels\n\tch <- ison\n}", "func (e *Exporter) Describe(ch chan<- *prometheus.Desc) {\n\tmetricCh := make(chan prometheus.Metric)\n\tdoneCh := make(chan struct{})\n\n\tgo func() {\n\t\tfor m := range metricCh {\n\t\t\tch <- m.Desc()\n\t\t}\n\t\tclose(doneCh)\n\t}()\n\n\te.Collect(metricCh)\n\tclose(metricCh)\n\t<-doneCh\n}", "func (e *Exporter) Describe(ch chan<- *prometheus.Desc) {\n\tfor _, m := range e.serverMetrics {\n\t\tch <- m\n\t}\n\tch <- solaceUp\n}", "func (c *goCollector) Describe(ch chan<- *Desc) {\n\tc.base.Describe(ch)\n\tfor _, i := range c.msMetrics {\n\t\tch <- i.desc\n\t}\n\tfor _, m := range c.rmExposedMetrics {\n\t\tch <- m.Desc()\n\t}\n}", "func (e *Exporter) Describe(ch chan<- *prometheus.Desc) {\n\tfor _, desc := range junosDesc {\n\t\tch <- desc\n\t}\n}", "func (e *Exporter) Describe(ch chan<- *prometheus.Desc) {\n\tch <- systemStatus\n\tch <- systemTemperature\n\tch <- systemPowerStatus\n\tch <- systemFanStatus\n\tch <- systemCPUFanStatus\n\tch <- systemUpgradeAvailable\n\n\tch <- memTotalSwap\n\tch <- memAvailSwap\n\tch <- memTotalReal\n\tch <- memAvailReal\n\tch <- memTotalFree\n\tch <- memShared\n\tch <- memBuffer\n\tch <- memCached\n\n\tch <- loadShort\n\tch <- loadMid\n\tch <- loadLong\n\n\tch <- cpuUser\n\tch <- cpuNice\n\tch <- cpuSystem\n\tch <- cpuIdle\n\tch <- cpuWait\n\tch <- cpuKernel\n\tch <- cpuInterrupt\n\n\tch <- netIn\n\tch <- netOut\n}", "func (e *Exporter) Describe(ch chan<- *prometheus.Desc) {\n\tfor _, m := range e.gaugeVecs {\n\t\tm.Describe(ch)\n\t}\n}", "func (e *Exporter) Describe(ch chan<- *prometheus.Desc) {\n\tch <- metric_uptime.Desc()\n\tch <- nomad_up.Desc()\n\tch <- metric_request_response_time_total.Desc()\n\tch <- metric_request_response_time_avg.Desc()\n\n\tfor _, metric := range metric_request_status_count_current {\n\t\tch <- metric.Desc()\n\t}\n\tfor _, metric := range metric_request_status_count_total {\n\t\tch <- metric.Desc()\n\t}\n}", "func (e *UwsgiExporter) Describe(ch chan<- *prometheus.Desc) {\n\te.uwsgiUp.Describe(ch)\n\te.scrapeDurations.Describe(ch)\n\n\tfor _, descs := range e.descriptorsMap {\n\t\tfor _, desc := range descs {\n\t\t\tch <- desc\n\t\t}\n\t}\n}", "func (e *Exporter) Describe(ch chan<- *prometheus.Desc) {\n\t// We cannot know in advance what metrics the exporter will generate\n\t// from clickhouse. So we use the poor man's describe method: Run a collect\n\t// and send the descriptors of all the collected metrics.\n\n\tmetricCh := make(chan prometheus.Metric)\n\tdoneCh := make(chan struct{})\n\n\tgo func() {\n\t\tfor m := range metricCh {\n\t\t\tch <- m.Desc()\n\t\t}\n\t\tclose(doneCh)\n\t}()\n\n\te.Collect(metricCh)\n\tclose(metricCh)\n\t<-doneCh\n}", "func (sc *SlurmCollector) Describe(ch chan<- *prometheus.Desc) {\n\tfor _, element := range sc.descPtrMap {\n\t\tch <- element\n\t}\n}", "func (e *Exporter) Describe(ch chan<- *prometheus.Desc) {\n\tch <- e.up.Desc()\n\n\tfor _, vec := range e.counters {\n\t\tvec.Describe(ch)\n\t}\n\n\tfor _, vec := range e.gauges {\n\t\tvec.Describe(ch)\n\t}\n}", "func (c *prometheusCollector) Describe(ch chan<- *prometheus.Desc) {\n\tch <- c.maxOpenConnections\n\tch <- c.openConnections\n\tch <- c.inUse\n\tch <- c.idle\n\tch <- c.waitCount\n\tch <- c.waitDuration\n\tch <- c.maxIdleClosed\n\tch <- c.maxIdleTimeClosed\n\tch <- c.maxLifetimeClosed\n}", "func (c *Collector) Describe(ch chan<- *prometheus.Desc) {\n\tc.state.describe(ch)\n\tc.dbSize.describe(ch)\n\tc.uptime.describe(ch)\n\tc.clientConnections.describe(ch)\n\tc.clientReadRequests.describe(ch)\n\tc.clientWriteRequests.describe(ch)\n\tc.clientDroppedRequests.describe(ch)\n}", "func (r *RGWCollector) Describe(ch chan<- *prometheus.Desc) {\n\tfor _, metric := range r.collectorList() {\n\t\tmetric.Describe(ch)\n\t}\n}", "func (k *KACollector) Describe(ch chan<- *prometheus.Desc) {\n\tfor _, m := range k.metrics {\n\t\tch <- m\n\t}\n}", "func (pc *PBSCollector) Describe(ch chan<- *prometheus.Desc) {\n\tfor _, element := range pc.descPtrMap {\n\t\tch <- element\n\t}\n}", "func (o *OSDCollector) Describe(ch chan<- *prometheus.Desc) {\n\tfor _, metric := range o.collectorList() {\n\t\tmetric.Describe(ch)\n\t}\n\tch <- o.ScrubbingStateDesc\n}", "func (e *Exporter) Describe(ch chan<- *prometheus.Desc) {\n\te.withCollectors(func(cs []prometheus.Collector) {\n\t\tfor _, c := range cs {\n\t\t\tc.Describe(ch)\n\t\t}\n\t})\n}", "func (a collectorAdapter) Describe(ch chan<- *prometheus.Desc) {\n\t// We have to send *some* metric in Describe, but we don't know which ones\n\t// we're going to get, so just send a dummy metric.\n\tch <- prometheus.NewDesc(\"dummy_metric\", \"Dummy metric.\", nil, nil)\n}", "func (c *StorageDomainCollector) Describe(ch chan<- *prometheus.Desc) {\n\tch <- upDesc\n\tch <- masterDesc\n\tch <- availableDesc\n\tch <- usedDesc\n\tch <- committedDesc\n}", "func (c *ComputeCollector) Describe(ch chan<- *prometheus.Desc) {\n\tch <- c.Instances\n\tch <- c.ForwardingRules\n}", "func (c *VMCollector) Describe(ch chan<- *prometheus.Desc) {\n\tfor _, m := range c.getMetrics() {\n\t\tch <- m.Desc()\n\t}\n}", "func (collector *collector) Describe(ch chan<- *prometheus.Desc) {\n\tch <- cpuUsageDesc\n\tch <- memUsageDesc\n\tch <- memUsagePeakDesc\n\tch <- swapUsageDesc\n\tch <- swapUsagePeakDesc\n\tch <- processCountDesc\n\tch <- containerPIDDesc\n\tch <- runningStatusDesc\n\tch <- diskUsageDesc\n\tch <- networkUsageDesc\n}", "func (e *Exporter) Describe(ch chan<- *prometheus.Desc) {\n\tfor _, m := range replicationMetrics {\n\t\tch <- m\n\t}\n\tfor _, m := range securityMetrics {\n\t\tch <- m\n\t}\n\tfor _, m := range storageMetrics {\n\t\tch <- m\n\t}\n\tfor _, m := range systemMetrics {\n\t\tch <- m\n\t}\n\tch <- artifactoryUp\n\tch <- e.totalScrapes.Desc()\n\tch <- e.jsonParseFailures.Desc()\n}", "func (e *Exporter) Describe(ch chan<- *prometheus.Desc) {\n\tch <- domainsBeingBlocked\n\tch <- dnsQueries\n\tch <- adsBlocked\n\tch <- adsPercentage\n\tch <- domainsOverTime\n\tch <- adsOverTime\n\tch <- topQueries\n\tch <- topAds\n\tch <- topSources\n\tch <- queryTypes\n}", "func (c *OrchestratorCollector) Describe(ch chan<- *prometheus.Desc) {\n\tch <- c.upMetric.Desc()\n\n\tfor _, m := range c.metrics {\n\t\tch <- m\n\t}\n}", "func (c *KubernetesCollector) Describe(ch chan<- *prometheus.Desc) {\n\tch <- c.Up\n\tch <- c.NodePools\n\tch <- c.Nodes\n}", "func (c *InterfacesCollector) Describe(ch chan<- *prometheus.Desc) {\n\tfor _, m := range c.collectors() {\n\t\tm.Describe(ch)\n\t}\n}", "func (m *metricVec) Describe(ch chan<- *Desc) { m.metricMap.Describe(ch) }", "func (e *Exporter) Describe(ch chan<- *prometheus.Desc) {\n\tch <- e.up.Desc()\n\n\te.authCacheHits.Describe(ch)\n\te.authCacheMisses.Describe(ch)\n\te.databaseReads.Describe(ch)\n\te.databaseWrites.Describe(ch)\n\te.openDatabases.Describe(ch)\n\te.openOsFiles.Describe(ch)\n\te.requestTime.Describe(ch)\n\n\te.httpdStatusCodes.Describe(ch)\n\te.httpdRequestMethods.Describe(ch)\n\n\te.bulkRequests.Describe(ch)\n\te.clientsRequestingChanges.Describe(ch)\n\te.requests.Describe(ch)\n\te.temporaryViewReads.Describe(ch)\n\te.viewReads.Describe(ch)\n\n\te.diskSize.Describe(ch)\n\te.dataSize.Describe(ch)\n\te.diskSizeOverhead.Describe(ch)\n\n\te.activeTasks.Describe(ch)\n\te.activeTasksDatabaseCompaction.Describe(ch)\n\te.activeTasksViewCompaction.Describe(ch)\n\te.activeTasksIndexer.Describe(ch)\n\te.activeTasksReplication.Describe(ch)\n}", "func (e *Exporter) Describe(ch chan<- *prometheus.Desc) {\n\te.cpuPercent.Describe(ch)\n\te.dataIO.Describe(ch)\n\te.logIO.Describe(ch)\n\te.memoryPercent.Describe(ch)\n\te.workPercent.Describe(ch)\n\te.sessionPercent.Describe(ch)\n\te.storagePercent.Describe(ch)\n\te.dbUp.Describe(ch)\n\te.up.Describe(ch)\n}", "func (*interfaceCollector) Describe(ch chan<- *prometheus.Desc) {\n\tch <- receiveBytesDesc\n\tch <- receivePacketsDesc\n\tch <- receiveErrorsDesc\n\tch <- receiveDropsDesc\n\tch <- transmitBytesDesc\n\tch <- transmitPacketsDesc\n\tch <- transmitDropsDesc\n\tch <- transmitErrorsDesc\n\tch <- ipv6receiveBytesDesc\n\tch <- ipv6receivePacketsDesc\n\tch <- ipv6transmitBytesDesc\n\tch <- ipv6transmitPacketsDesc\n\tch <- adminStatusDesc\n\tch <- operStatusDesc\n\tch <- errorStatusDesc\n}", "func (e *Exporter) Describe(ch chan<- *prometheus.Desc) {\n\tprometheus.DescribeByCollect(e, ch)\n}", "func (e *Exporter) Describe(ch chan<- *prometheus.Desc) {\n\tfor _, cc := range e.collectors {\n\t\tcc.Describe(ch)\n\t}\n}", "func (e *Exporter) Describe(ch chan<- *prometheus.Desc) {\n\tch <- up\n\tch <- clusterServers\n\tch <- clusterLeader\n\tch <- nodeCount\n\tch <- memberStatus\n\tch <- memberWanStatus\n\tch <- serviceCount\n\tch <- serviceNodesHealthy\n\tch <- nodeChecks\n\tch <- serviceChecks\n\tch <- keyValues\n\tch <- serviceTag\n\tch <- serviceCheckNames\n}", "func (c *collector) Describe(ch chan<- *prometheus.Desc) {\n\tfor _, co := range c.collectors {\n\t\tco.describe(ch)\n\t}\n}", "func (c *OSCollector) Describe(ch chan<- *prometheus.Desc) {\n\n\tch <- c.PhysicalMemoryFreeBytes\n\tch <- c.PagingFreeBytes\n\tch <- c.VirtualMemoryFreeBytes\n\tch <- c.ProcessesMax\n\tch <- c.ProcessMemoryMaxBytes\n\tch <- c.Processes\n\tch <- c.Users\n\tch <- c.PagingMaxBytes\n\tch <- c.VirtualMemoryBytes\n\tch <- c.VisibleMemoryBytes\n}", "func (c *DebugFsStatCollector) Describe(ch chan<- *prometheus.Desc) {\n\tch <- c.hits\n\tch <- c.misses\n}", "func (c *Client) Describe(ch chan<- *prometheus.Desc) {\n\tc.metrics.functionInvocation.Describe(ch)\n\tc.metrics.queueHistogram.Describe(ch)\n\tc.metrics.functionsHistogram.Describe(ch)\n\tc.metrics.serviceReplicasGauge.Describe(ch)\n\tc.metrics.functionInvocationStarted.Describe(ch)\n}", "func (uc *UpgradeCollector) Describe(ch chan<- *prometheus.Desc) {\n\t// .spec\n\tch <- uc.managedMetrics.upgradeAt\n\tch <- uc.managedMetrics.pdbTimeout\n\n\t// .status\n\tch <- uc.managedMetrics.startTime\n\tch <- uc.managedMetrics.completeTime\n\n\t// .status.conditions[]\n\tch <- uc.managedMetrics.sendStartedNotification\n\tch <- uc.managedMetrics.preHealthCheck\n\tch <- uc.managedMetrics.extDepAvailCheck\n\tch <- uc.managedMetrics.scaleUpExtraNodes\n\tch <- uc.managedMetrics.controlPlaneMaintWindow\n\tch <- uc.managedMetrics.commenceUpgrade\n\tch <- uc.managedMetrics.controlPlaneUpgraded\n\tch <- uc.managedMetrics.removeControlPlaneMaint\n\tch <- uc.managedMetrics.workersMaintWindow\n\tch <- uc.managedMetrics.allWorkerNodesUpgraded\n\tch <- uc.managedMetrics.removeExtraScaledNodes\n\tch <- uc.managedMetrics.removeMaintWindow\n\tch <- uc.managedMetrics.postClusterHealthCheck\n\tch <- uc.managedMetrics.sendCompletedNotification\n}", "func (e *Exporter) Describe(ch chan<- *prometheus.Desc) {\n\te.statusNightscout.Describe(ch)\n}", "func (c *PostfixQueueCollector) Describe(ch chan<- *prometheus.Desc) {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\tc.ageSecondsHistogram.Describe(ch)\n\tc.sizeBytesHistogram.Describe(ch)\n\tc.scrapeDurationGauge.Describe(ch)\n\tc.scrapeSuccessGauge.Describe(ch)\n}", "func (c *Exporter) Describe(ch chan<- *prometheus.Desc) {\n\tfor _, cc := range c.collectors {\n\t\tcc.Describe(ch)\n\t}\n}", "func (c *Exporter) Describe(ch chan<- *prometheus.Desc) {\n\tfor _, cc := range c.collectors {\n\t\tcc.Describe(ch)\n\t}\n}", "func (c *collector) Describe(ch chan<- *prometheus.Desc) {\n\tch <- scrapeDurationDesc\n\tpower.Describe(ch)\n\tthermal.Describe(ch)\n}", "func (dc *daemonsetCollector) Describe(ch chan<- *prometheus.Desc) {\n\tch <- descDaemonSetCreated\n\tch <- descDaemonSetCurrentNumberScheduled\n\tch <- descDaemonSetNumberAvailable\n\tch <- descDaemonSetNumberMisscheduled\n\tch <- descDaemonSetNumberUnavailable\n\tch <- descDaemonSetDesiredNumberScheduled\n\tch <- descDaemonSetNumberReady\n\tch <- descDaemonSetUpdatedNumberScheduled\n\tch <- descDaemonSetMetadataGeneration\n\tch <- descDaemonSetLabels\n}", "func (e *Exporter) Describe(ch chan<- *prometheus.Desc) {\n\tch <- e.poolUsage.Desc()\n\tch <- e.providersOnline.Desc()\n\tch <- e.providersFaulted.Desc()\n}", "func (b Blackbox) Describe(descs chan<- *prometheus.Desc) {\n\tprometheus.DescribeByCollect(b, descs)\n}", "func (n NodeCollector) Describe(ch chan<- *prometheus.Desc) {\n\tscrapeDurations.Describe(ch)\n}", "func (c *SecurityGroupCollector) Describe(ch chan<- *prometheus.Desc) {\n\tch <- c.Defined\n\tch <- c.EnableDefault\n\tch <- c.ProjectDefault\n\tch <- c.Stateful\n\tch <- c.InboundDefault\n\tch <- c.OutboundDefault\n\tch <- c.Servers\n\tch <- c.Created\n\tch <- c.Modified\n}", "func (e *Exporter) Describe(ch chan<- *prometheus.Desc) {\n\te.up.Describe(ch)\n\te.scrapeDuration.Describe(ch)\n\te.failedScrapes.Describe(ch)\n\te.totalScrapes.Describe(ch)\n\tch <- e.priceDesc\n\tch <- e.openDesc\n\tch <- e.detailsDesc\n}", "func (n LXCCollector) Describe(ch chan<- *prometheus.Desc) {\n\tscrapeDurations.Describe(ch)\n}", "func (c *StorageQuotaCollector) Describe(ch chan<- *prometheus.Desc) {\n\tds := []*prometheus.Desc{\n\t\tc.HardDesc,\n\t\tc.UsedDesc,\n\t}\n\n\tfor _, d := range ds {\n\t\tch <- d\n\t}\n}", "func (c *MosquittoCounter) Describe(ch chan<- *prometheus.Desc) {\n\tch <- c.Desc\n}", "func (c *ClusterManager) Describe(ch chan<- *prometheus.Desc) {\n\tch <- c.OOMCountDesc\n\tch <- c.RAMUsageDesc\n}", "func (collector *Metrics) Describe(ch chan<- *prometheus.Desc) {\n\tch <- collector.issue\n}", "func (m httpReferenceDiscoveryMetrics) Describe(descs chan<- *prometheus.Desc) {\n\tprometheus.DescribeByCollect(m, descs)\n}", "func (e *ebpfConntracker) Describe(ch chan<- *prometheus.Desc) {\n\tch <- conntrackerTelemetry.registersTotal\n}", "func (m *metricMap) Describe(ch chan<- *Desc) {\n\tch <- m.desc\n}", "func (*noOpConntracker) Describe(ch chan<- *prometheus.Desc) {}", "func (o *requestMetrics) Describe(ch chan<- *prometheus.Desc) {\n\tch <- o.desc\n}", "func (w *Writer) Describe(ch chan<- *prometheus.Desc) {\n\tw.kafkaWriteStatus.Describe(ch)\n\tw.queuedForWrites.Describe(ch)\n}", "func (a *AttunityCollector) Describe(ch chan<- *prometheus.Desc) {\n\t// Hi I do nothing\n}", "func (p *plug) Describe(ch chan<- *prometheus.Desc) {\n\tp.doStats(ch, doDesc)\n}", "func (p *Metrics) Describe(c chan<- *prometheus.Desc) {\n\tprometheus.NewGauge(prometheus.GaugeOpts{Name: \"Dummy\", Help: \"Dummy\"}).Describe(c)\n}", "func (m httpPostMetrics) Describe(descs chan<- *prometheus.Desc) {\n\tprometheus.DescribeByCollect(m, descs)\n}", "func (c *solarCollector) Describe(ch chan<- *prometheus.Desc) {\n\n\t// Describe the Collector's member that are of type Desc\n\tds := []*prometheus.Desc{\n\t\tc.panelVoltage,\n\t}\n\n\tfor _, d := range ds {\n\t\tch <- d\n\t}\n\t// Describe the other types\n\tc.scrapeFailures.Describe(ch)\n}", "func (c *ImageCollector) Describe(ch chan<- *prometheus.Desc) {\n\tch <- c.Active\n\tch <- c.ImageSize\n\tch <- c.DiskSize\n\tch <- c.Created\n\tch <- c.Deprecated\n}", "func (c *SVCResponse) Describe(ch chan<- *prometheus.Desc) {\n\tfor _, metric := range c.metrics {\n\t\tch <- metric.Desc\n\t}\n\n\tch <- c.data.Desc\n\tch <- c.up.Desc()\n\tch <- c.totalScrapes.Desc()\n\tch <- c.jsonParseFailures.Desc()\n}", "func (p *Collector) Describe(c chan<- *prometheus.Desc) {\n\t// We must emit some description otherwise an error is returned. This\n\t// description isn't shown to the user!\n\tprometheus.NewGauge(prometheus.GaugeOpts{Name: \"Dummy\", Help: \"Dummy\"}).Describe(c)\n}", "func (e *Exporter) Describe(ch chan<- *prometheus.Desc) {\n\tch <- e.up.Desc()\n\n\tfor _, vec := range e.gauges {\n\t\tvec.Describe(ch)\n\t}\n}", "func (n DellHWCollector) Describe(ch chan<- *prometheus.Desc) {\n\tch <- scrapeDurationDesc\n\tch <- scrapeSuccessDesc\n}", "func (c *Collector) Describe(ch chan<- *prometheus.Desc) {\n\tfor _, scraper := range c.Scrapers {\n\t\tfor _, metric := range scraper.Metrics {\n\t\t\tch <- metric.metric\n\t\t}\n\t}\n}", "func (c *PTVMetricsCollector) Describe(ch chan<- *prometheus.Desc) {\n\tdefer timeTrack(time.Now(), \"PTVMetricsCollector.Describe\")\n\n\tc.metrics.Describe(ch)\n}", "func (e *DTRRethinkDBExporter) Describe(ch chan<- *prometheus.Desc) {\n\tch <- e.metrics.clusterClientConnections\n\tch <- e.metrics.clusterClientsActive\n\tch <- e.metrics.clusterDocsPerSecond\n\tch <- e.metrics.clusterQueriesPerSecond\n\n\tch <- e.metrics.serverClientConnections\n\tch <- e.metrics.serverClientsActive\n\tch <- e.metrics.serverQueriesPerSecond\n\tch <- e.metrics.serverQueriesTotal\n\tch <- e.metrics.serverDocsPerSecond\n\tch <- e.metrics.serverDocsTotal\n\n\tch <- e.metrics.tableDocsPerSecond\n\n\tch <- e.metrics.tableReplicaDocsPerSecond\n\tch <- e.metrics.tableReplicaCacheBytes\n\tch <- e.metrics.tableReplicaIO\n\tch <- e.metrics.tableReplicaDataBytes\n\tch <- e.metrics.tableReplicaGarbageBytes\n\tch <- e.metrics.tableReplicaMetaDataBytes\n\tch <- e.metrics.tableReplicaPreAllocatedBytes\n\n\tch <- e.metrics.scrapeLatency\n\n\tch <- e.metrics.serverHealth\n\tch <- e.metrics.serverTotal\n\n\tch <- e.metrics.tableStatus\n\n\tch <- e.metrics.jobTotals\n}", "func (e *Exporter) Describe(ch chan<- *prometheus.Desc) {\n\te.up.Describe(ch)\n\te.totalScrapes.Describe(ch)\n\te.exchangeStatus.Describe(ch)\n\te.ltp.Describe(ch)\n\te.bestBid.Describe(ch)\n\te.bestAsk.Describe(ch)\n\te.bestBidSize.Describe(ch)\n\te.bestAskSize.Describe(ch)\n\te.totalBidDepth.Describe(ch)\n\te.totalAskDepth.Describe(ch)\n\te.volume.Describe(ch)\n\te.volumeByProduct.Describe(ch)\n}", "func (collector *OpenweatherCollector) Describe(ch chan<- *prometheus.Desc) {\n\n\t//Update this section with the each metric you create for a given collector\n\tch <- collector.temperatureMetric\n\tch <- collector.humidity\n\tch <- collector.feelslike\n\tch <- collector.pressure\n\tch <- collector.windspeed\n\tch <- collector.rain1h\n\tch <- collector.winddegree\n\tch <- collector.cloudiness\n\tch <- collector.sunrise\n\tch <- collector.sunset\n\tch <- collector.currentconditions\n}", "func (c *CephExporter) Describe(ch chan<- *prometheus.Desc) {\n\tfor _, cc := range c.collectors {\n\t\tcc.Describe(ch)\n\t}\n}", "func (p *pool) Describe(ch chan<- *prometheus.Desc) {\n\tfor _, col := range p.collectors() {\n\t\tcol.Describe(ch)\n\t}\n}", "func (pc *PrometheusCollector) Describe(ch chan<- *prometheus.Desc) {\n\tprometheus.DescribeByCollect(pc, ch)\n}", "func (c *ledCollector) Describe(ch chan<- *prometheus.Desc) {\n\tch <- c.brightness\n\tch <- c.maxBrightness\n}", "func (c *Collector) Describe(ch chan<- *prometheus.Desc) {\n\tprometheus.DescribeByCollect(c, ch)\n}", "func (h *Metrics) Describe(in chan<- *prometheus.Desc) {\n\th.duration.Describe(in)\n\th.totalRequests.Describe(in)\n\th.requestSize.Describe(in)\n\th.responseSize.Describe(in)\n\th.handlerStatuses.Describe(in)\n\th.responseTime.Describe(in)\n}", "func (o *observer) Describe(ch chan<- *prometheus.Desc) {\n\to.updateError.Describe(ch)\n\to.verifyError.Describe(ch)\n\to.expiration.Describe(ch)\n}", "func (m *MetricVec) Describe(ch chan<- *Desc) {\n\tch <- m.desc\n}", "func (c *MetricsCollector) Describe(ch chan<- *prometheus.Desc) {\n\tch <- c.verify\n}", "func (dc *deploymentconfigurationCollector) Describe(ch chan<- *prometheus.Desc) {\n\tch <- descDeploymentConfigurationCreated\n\tch <- descDeploymentConfigurationStatusReadyReplicas\n\tch <- descDeploymentConfigurationStatusAvailableReplicas\n}", "func (stats *ConnPoolStats) Describe(ch chan<- *prometheus.Desc) {\n\tch <- syncClientConnectionsDesc\n\tch <- numAScopedConnectionsDesc\n\tch <- totalInUseDesc\n\tch <- totalAvailableDesc\n\tch <- totalCreatedDesc\n}", "func (c *UPSCollector) Describe(ch chan<- *prometheus.Desc) {\n\tds := []*prometheus.Desc{\n\t\tc.UPSLoadPercent,\n\t\tc.BatteryChargePercent,\n\t\tc.LineVolts,\n\t\tc.LineNominalVolts,\n\t\tc.BatteryVolts,\n\t\tc.BatteryNominalVolts,\n\t\tc.BatteryNumberTransfersTotal,\n\t\tc.BatteryTimeLeftSeconds,\n\t\tc.BatteryTimeOnSeconds,\n\t\tc.BatteryCumulativeTimeOnSecondsTotal,\n\t\tc.UPSStatus,\n\t\tc.UPSInfo,\n\t}\n\n\tfor _, d := range ds {\n\t\tch <- d\n\t}\n}", "func (t *TimestampCollector) Describe(ch chan<- *prometheus.Desc) {\n\tch <- t.Description\n}", "func (c *LocalRouterCollector) Describe(ch chan<- *prometheus.Desc) {\n\tch <- c.Up\n\tch <- c.LocalRouterInfo\n\tch <- c.SwitchInfo\n\tch <- c.NetworkInfo\n\tch <- c.PeerInfo\n\tch <- c.PeerUp\n\tch <- c.StaticRouteInfo\n\tch <- c.ReceiveBytesPerSec\n\tch <- c.SendBytesPerSec\n}" ]
[ "0.8205519", "0.81644803", "0.79042935", "0.789001", "0.7877866", "0.7847287", "0.7846724", "0.78357315", "0.78223747", "0.78189427", "0.78070813", "0.7794344", "0.7790456", "0.77866", "0.776235", "0.7761977", "0.77538306", "0.7751365", "0.7747985", "0.7731575", "0.772384", "0.77103186", "0.76896197", "0.7684635", "0.7680025", "0.7672815", "0.76705277", "0.76618385", "0.76480347", "0.76326895", "0.7632569", "0.7628375", "0.7618191", "0.7613223", "0.7600867", "0.75981", "0.75889176", "0.75778407", "0.75510955", "0.7542883", "0.7524518", "0.752418", "0.7523882", "0.750535", "0.7503546", "0.7487748", "0.7478195", "0.7476558", "0.74756944", "0.7475575", "0.7470406", "0.7467459", "0.7467459", "0.7463824", "0.74620867", "0.7459286", "0.74499273", "0.7449458", "0.7443827", "0.7438632", "0.74331695", "0.7429327", "0.7418921", "0.7414775", "0.7412225", "0.73986036", "0.73944616", "0.7393463", "0.73862827", "0.7384433", "0.7378919", "0.73771566", "0.7368743", "0.7368516", "0.73532647", "0.73497206", "0.73451847", "0.73450714", "0.73384815", "0.73299915", "0.7329277", "0.7328344", "0.73227924", "0.7322696", "0.73171663", "0.7304582", "0.7302879", "0.7302433", "0.72957146", "0.7292321", "0.7289326", "0.7284469", "0.7255776", "0.7251919", "0.7248549", "0.7236597", "0.7236378", "0.7232421", "0.7215074", "0.72118825" ]
0.7817006
10
Collect update all the descriptors is values
func (collector *MetricsCollector) Collect(ch chan<- prometheus.Metric) { filterMetricsByKind := func(kind string, orgMetrics []constMetric) (filteredMetrics []constMetric) { for _, metric := range orgMetrics { if metric.kind == kind { filteredMetrics = append(filteredMetrics, metric) } } return filteredMetrics } collector.defMetrics.reset() for k := range collector.metrics { counters := filterMetricsByKind(config.KeyMetricTypeCounter, collector.metrics[k]) gauges := filterMetricsByKind(config.KeyMetricTypeGauge, collector.metrics[k]) histograms := filterMetricsByKind(config.KeyMetricTypeHistogram, collector.metrics[k]) collectCounters(counters, collector.defMetrics, ch) collectGauges(gauges, collector.defMetrics, ch) collectHistograms(histograms, collector.defMetrics, ch) collector.cache.Reset() } collector.defMetrics.collectDefaultMetrics(ch) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (m MessageDescriptorMap) Updated() (updated []string) {\n\tfor id, descriptor := range m {\n\t\tif descriptor.Updated() {\n\t\t\tupdated = append(updated, id)\n\t\t}\n\t}\n\treturn\n}", "func (c *collector) Describe(ch chan<- *prometheus.Desc) {\n\tfor _, d := range descriptors {\n\t\tch <- d\n\t}\n}", "func updateCollector(d *schema.ResourceData, m interface{}) error {\n\td.Partial(true)\n\tclient := m.(*lmclient.LMSdkGo)\n\tcollector := makeDeviceCollectorObject(d)\n\n\t// get collector id\n\tid, err := strconv.Atoi(d.Id())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tparams := lm.NewUpdateCollectorByIDParams()\n\tparams.SetID(int32(id))\n\tparams.SetBody(&collector)\n\n\t// list of available properties\n\ts := []string{\n\t\t\"backup_collector_id\",\n\t\t\"collector_group_id\",\n\t\t\"description\",\n\t\t\"enable_failback\",\n\t\t\"enable_collector_device_failover\",\n\t\t\"escalation_chain_id\",\n\t\t\"properties\",\n\t\t\"resend_interval\",\n\t\t\"suppress_alert_clear\",\n\t}\n\n\t// loops through array of properties to see which one has changed, the ones that did not change are removed from the list\n\tfor _, v := range s {\n\t\tif d.HasChange(v) {\n\t\t} else {\n\t\t\ts = remove(s, v)\n\t\t}\n\t}\n\n\t// makes a bulk update for all properties that were changed\n\trestCollectorResponse, err := client.LM.UpdateCollectorByID(params)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, v := range s {\n\t\td.SetPartial(v)\n\t}\n\tlog.Printf(\"updating collector response %v\", restCollectorResponse.Payload)\n\td.Partial(false)\n\treturn nil\n}", "func (c *ospfCollector) Update(ch chan<- prometheus.Metric) error {\n\tcmd := \"show ip ospf vrf all interface json\"\n\n\tif len(c.instanceIDs) > 0 {\n\t\tfor _, id := range c.instanceIDs {\n\t\t\tjsonOSPFInterface, err := executeOSPFMultiInstanceCommand(cmd, id)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif err = processOSPFInterface(ch, jsonOSPFInterface, c.descriptions, id); err != nil {\n\t\t\t\treturn cmdOutputProcessError(cmd, string(jsonOSPFInterface), err)\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}\n\n\tjsonOSPFInterface, err := executeOSPFCommand(cmd)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err = processOSPFInterface(ch, jsonOSPFInterface, c.descriptions, 0); err != nil {\n\t\treturn cmdOutputProcessError(cmd, string(jsonOSPFInterface), err)\n\t}\n\treturn nil\n}", "func (s Subdirectory) descriptors() []FileDescriptor {\n\tvar descs []FileDescriptor\n\n\tdescs = append(descs, s.keyBlock.Descriptors[:]...)\n\tfor _, block := range s.blocks {\n\t\tdescs = append(descs, block.Descriptors[:]...)\n\t}\n\treturn descs\n}", "func (c *InterfacesCollector) collect() {\n\tfor s := range c.statC {\n\t\tfor _, ifi := range s {\n\t\t\tlabels := []string{\n\t\t\t\tifi.Name,\n\t\t\t\tifi.MAC.String(),\n\t\t\t}\n\n\t\t\tc.ReceivedBytes.WithLabelValues(labels...).Set(float64(ifi.Stats.ReceiveBytes))\n\t\t\tc.TransmittedBytes.WithLabelValues(labels...).Set(float64(ifi.Stats.TransmitBytes))\n\t\t}\n\t}\n}", "func (co *VMICollector) Collect(ch chan<- prometheus.Metric) {\n\tcachedObjs := co.vmiInformer.GetIndexer().List()\n\tif len(cachedObjs) == 0 {\n\t\tlog.Log.V(4).Infof(\"No VMIs detected\")\n\t\treturn\n\t}\n\n\tvmis := make([]*k6tv1.VirtualMachineInstance, len(cachedObjs))\n\n\tfor i, obj := range cachedObjs {\n\t\tvmis[i] = obj.(*k6tv1.VirtualMachineInstance)\n\t}\n\n\tco.updateVMIsPhase(vmis, ch)\n\tco.updateVMIMetrics(vmis, ch)\n\treturn\n}", "func (c networkDependencyCollector) Update(ch chan<- prometheus.Metric) error {\n\ttraffic := darkstat.Get()\n\tserverProcesses, upstreams, downstreams := socketstat.Get()\n\tlocalInventory := inventory.GetLocalInventory()\n\n\tfor _, m := range traffic {\n\t\tch <- prometheus.MustNewConstMetric(c.traffic, prometheus.GaugeValue, m.Bandwidth,\n\t\t\tm.LocalHostgroup, m.Direction, m.RemoteHostgroup, m.RemoteIPAddr, m.LocalDomain, m.RemoteDomain)\n\t}\n\tfor _, m := range upstreams {\n\t\tch <- prometheus.MustNewConstMetric(c.upstream, prometheus.GaugeValue, 1,\n\t\t\tm.LocalHostgroup, m.RemoteHostgroup, m.LocalAddress, m.RemoteAddress, m.Port, m.Protocol, m.ProcessName)\n\t}\n\tfor _, m := range downstreams {\n\t\tch <- prometheus.MustNewConstMetric(c.downstream, prometheus.GaugeValue, 1,\n\t\t\tm.LocalHostgroup, m.RemoteHostgroup, m.LocalAddress, m.RemoteAddress, m.Port, m.Protocol, m.ProcessName)\n\t}\n\tfor _, m := range serverProcesses {\n\t\tch <- prometheus.MustNewConstMetric(c.serverProcesses, prometheus.GaugeValue, 1,\n\t\t\tlocalInventory.Hostgroup, m.Bind, m.Name, m.Port)\n\t}\n\n\treturn nil\n}", "func (c *auditdCollector) Collect(ch chan<- prometheus.Metric) {\n\n\tfor _, i := range c.metrics {\n\t\tch <- prometheus.MustNewConstMetric(i.desc, i.valType, i.eval(c.stats))\n\t}\n\n}", "func (c *meminfoCollector) Update(ch chan<- prometheus.Metric) error {\n\tvar metricType prometheus.ValueType\n\tmemInfo, err := c.getMemInfo()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"couldn't get meminfo: %w\", err)\n\t}\n\tlevel.Debug(c.logger).Log(\"msg\", \"Set node_mem\", \"memInfo\", memInfo)\n\tfor k, v := range memInfo {\n\t\tif strings.HasSuffix(k, \"_total\") {\n\t\t\tmetricType = prometheus.CounterValue\n\t\t} else {\n\t\t\tmetricType = prometheus.GaugeValue\n\t\t}\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tprometheus.NewDesc(\n\t\t\t\tprometheus.BuildFQName(namespace, memInfoSubsystem, k),\n\t\t\t\tfmt.Sprintf(\"Memory information field %s.\", k),\n\t\t\t\tnil, nil,\n\t\t\t),\n\t\t\tmetricType, v,\n\t\t)\n\t}\n\treturn nil\n}", "func (h *descriptorHandler) update(key string, oldValue, newValue proto.Message, oldMetadata kvs.Metadata) (newMetadata kvs.Metadata, err error) {\n\tif h.descriptor == nil {\n\t\treturn oldMetadata, nil\n\t}\n\tdefer trackDescMethod(h.descriptor.Name, \"Update\")()\n\tnewMetadata, err = h.descriptor.Update(key, oldValue, newValue, oldMetadata)\n\tif nsErr := checkNetNs(); nsErr != nil {\n\t\terr = nsErr\n\t}\n\treturn newMetadata, err\n}", "func (pc *PBSCollector) Describe(ch chan<- *prometheus.Desc) {\n\tfor _, element := range pc.descPtrMap {\n\t\tch <- element\n\t}\n}", "func (v Volume) descriptors() []FileDescriptor {\n\tvar descs []FileDescriptor\n\n\tdescs = append(descs, v.keyBlock.Descriptors[:]...)\n\tfor _, block := range v.blocks {\n\t\tdescs = append(descs, block.Descriptors[:]...)\n\t}\n\treturn descs\n}", "func (em envmap) update(o envmap) {\n\tfor k, v := range o {\n\t\tem[k] = v\n\t}\n}", "func (c *interfaceCollector) Collect(client *rpc.Client, ch chan<- prometheus.Metric, labelValues []string) error {\n\tstats, err := c.interfaceStats(client)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, s := range stats {\n\t\tc.collectForInterface(s, ch, labelValues)\n\t}\n\n\treturn nil\n}", "func (o *observer) Collect(ch chan<- prometheus.Metric) {\n\to.updateError.Collect(ch)\n\to.verifyError.Collect(ch)\n\to.expiration.Collect(ch)\n}", "func (c *ledCollector) Collect(ch chan<- prometheus.Metric) {\n\tfor _, led := range c.leds {\n\t\tn := name(led.Name())\n\t\tbrightness, err := led.Brightness()\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.brightness,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(brightness),\n\t\t\tn,\n\t\t)\n\t\tmaxBrightness, err := led.MaxBrightness()\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.maxBrightness,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(maxBrightness),\n\t\t\tn,\n\t\t)\n\t}\n}", "func (q channelQuery) UpdateAll(ctx context.Context, exec boil.ContextExecutor, cols M) (int64, error) {\n\tqueries.SetUpdate(q.Query, cols)\n\n\tresult, err := q.Query.ExecContext(ctx, exec)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: unable to update all for channels\")\n\t}\n\n\trowsAff, err := result.RowsAffected()\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: unable to retrieve rows affected for channels\")\n\t}\n\n\treturn rowsAff, nil\n}", "func (c *InterfacesCollector) Collect(ch chan<- prometheus.Metric) {\n\tfor _, m := range c.collectors() {\n\t\tm.Collect(ch)\n\t}\n}", "func (m *manager) fingerprint() {\n\tfor {\n\t\tselect {\n\t\tcase <-m.ctx.Done():\n\t\t\treturn\n\t\tcase <-m.fingerprintResCh:\n\t\t}\n\n\t\t// Collect the data\n\t\tvar fingerprinted []*device.DeviceGroup\n\t\tfor _, i := range m.instances {\n\t\t\tfingerprinted = append(fingerprinted, i.Devices()...)\n\t\t}\n\n\t\t// Convert and update\n\t\tout := make([]*structs.NodeDeviceResource, len(fingerprinted))\n\t\tfor i, f := range fingerprinted {\n\t\t\tout[i] = convertDeviceGroup(f)\n\t\t}\n\n\t\t// Call the updater\n\t\tm.updater(out)\n\t}\n}", "func (sc *SlurmCollector) Describe(ch chan<- *prometheus.Desc) {\n\tfor _, element := range sc.descPtrMap {\n\t\tch <- element\n\t}\n}", "func (p *Collector) Collect(c chan<- prometheus.Metric) {\n\tp.Sink.mu.Lock()\n\tdefer p.Sink.mu.Unlock()\n\n\texpire := p.Sink.expiration != 0\n\tnow := time.Now()\n\tfor k, v := range p.Sink.gauges {\n\t\tlast := p.Sink.updates[k]\n\t\tif expire && last.Add(p.Sink.expiration).Before(now) {\n\t\t\tdelete(p.Sink.updates, k)\n\t\t\tdelete(p.Sink.gauges, k)\n\t\t} else {\n\t\t\tv.Collect(c)\n\t\t}\n\t}\n\tfor k, v := range p.Sink.summaries {\n\t\tlast := p.Sink.updates[k]\n\t\tif expire && last.Add(p.Sink.expiration).Before(now) {\n\t\t\tdelete(p.Sink.updates, k)\n\t\t\tdelete(p.Sink.summaries, k)\n\t\t} else {\n\t\t\tv.Collect(c)\n\t\t}\n\t}\n\tfor k, v := range p.Sink.counters {\n\t\tlast := p.Sink.updates[k]\n\t\tif expire && last.Add(p.Sink.expiration).Before(now) {\n\t\t\tdelete(p.Sink.updates, k)\n\t\t\tdelete(p.Sink.counters, k)\n\t\t} else {\n\t\t\tv.Collect(c)\n\t\t}\n\t}\n}", "func (*PatchCollectorsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{161}\n}", "func updateAndClear(conf *Configuration, slicev reflect.Value, c *mgo.Collection) reflect.Value {\n\tif err := updateRecords(conf, slicev, c); err != nil {\n\t\tlog.Printf(\"Not updated. %v\", err)\n\t\treturn slicev\n\t}\n\n\treturn reflect.MakeSlice(slicev.Type(), 0, 0)\n}", "func (*BatchUpdateReferencesRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_proto_icas_icas_proto_rawDescGZIP(), []int{1}\n}", "func (c *CadvisorCollector) Collect(ch chan<- datapoint.Datapoint) {\n\tc.collectMachineInfo(ch)\n\tc.collectVersionInfo(ch)\n\tc.collectContainersInfo(ch)\n\t//c.errors.Collect(ch)\n}", "func (a collectorAdapter) Collect(ch chan<- prometheus.Metric) {\n\tif err := a.Update(ch); err != nil {\n\t\tpanic(fmt.Sprintf(\"failed to update collector: %v\", err))\n\t}\n}", "func (c *CephExporter) Collect(ch chan<- prometheus.Metric) {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\tfor _, cc := range c.collectors {\n\t\tcc.Collect(ch)\n\t}\n}", "func (q descriptionQuery) UpdateAll(ctx context.Context, exec boil.ContextExecutor, cols M) (int64, error) {\n\tqueries.SetUpdate(q.Query, cols)\n\n\tresult, err := q.Query.ExecContext(ctx, exec)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: unable to update all for descriptions\")\n\t}\n\n\trowsAff, err := result.RowsAffected()\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: unable to retrieve rows affected for descriptions\")\n\t}\n\n\treturn rowsAff, nil\n}", "func (c *Client) Collect(ch chan<- prometheus.Metric) {\n\tc.metrics.functionInvocation.Collect(ch)\n\tc.metrics.functionsHistogram.Collect(ch)\n\tc.metrics.queueHistogram.Collect(ch)\n\tc.metrics.functionInvocationStarted.Collect(ch)\n\tc.metrics.serviceReplicasGauge.Reset()\n\tfor _, service := range c.services {\n\t\tvar serviceName string\n\t\tif len(service.Namespace) > 0 {\n\t\t\tserviceName = fmt.Sprintf(\"%s.%s\", service.Name, service.Namespace)\n\t\t} else {\n\t\t\tserviceName = service.Name\n\t\t}\n\t\tc.metrics.serviceReplicasGauge.\n\t\t\tWithLabelValues(serviceName).\n\t\t\tSet(float64(service.Replicas))\n\t}\n\tc.metrics.serviceReplicasGauge.Collect(ch)\n}", "func (c *filebeatCollector) Collect(ch chan<- prometheus.Metric) {\n\n\tfor _, i := range c.metrics {\n\t\tch <- prometheus.MustNewConstMetric(i.desc, i.valType, i.eval(c.stats))\n\t}\n\n}", "func (*UpdateCollectionReply) Descriptor() ([]byte, []int) {\n\treturn file_threads_proto_rawDescGZIP(), []int{16}\n}", "func (e *Exporter) Describe(ch chan<- *prometheus.Desc) {\n\tfor _, m := range e.gaugeVecs {\n\t\tm.Describe(ch)\n\t}\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\te.withCollectors(func(cs []prometheus.Collector) {\n\t\tfor _, c := range cs {\n\t\t\tc.Collect(ch)\n\t\t}\n\t})\n}", "func (p *promProducer) Collect(ch chan<- prometheus.Metric) {\n\tfor _, obj := range p.store.Objects() {\n\t\tmessage, ok := obj.(producers.MetricsMessage)\n\t\tif !ok {\n\t\t\tpromLog.Warnf(\"Unsupported message type %T\", obj)\n\t\t\tcontinue\n\t\t}\n\t\tdims := dimsToMap(message.Dimensions)\n\n\t\tfor _, d := range message.Datapoints {\n\t\t\tpromLog.Debugf(\"Processing datapoint %s\", d.Name)\n\t\t\tvar tagKeys []string\n\t\t\tvar tagVals []string\n\t\t\tfor k, v := range dims {\n\t\t\t\ttagKeys = append(tagKeys, sanitizeName(k))\n\t\t\t\ttagVals = append(tagVals, v)\n\t\t\t}\n\t\t\tfor k, v := range d.Tags {\n\t\t\t\ttagKeys = append(tagKeys, sanitizeName(k))\n\t\t\t\ttagVals = append(tagVals, v)\n\t\t\t}\n\n\t\t\tname := sanitizeName(d.Name)\n\t\t\tval, err := coerceToFloat(d.Value)\n\t\t\tif err != nil {\n\t\t\t\tpromLog.Warnf(\"Bad datapoint value %q: %s\", d.Value, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tdesc := prometheus.NewDesc(name, \"DC/OS Metrics Datapoint\", tagKeys, nil)\n\t\t\tmetric, err := prometheus.NewConstMetric(desc, prometheus.GaugeValue, val, tagVals...)\n\t\t\tif err != nil {\n\t\t\t\tpromLog.Warnf(\"Could not create Prometheus metric %s: %s\", name, err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tpromLog.Debugf(\"Emitting datapoint %s\", name)\n\t\t\tch <- metric\n\t\t}\n\n\t}\n}", "func (c *Exporter) Collect(ch chan<- prometheus.Metric) {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\tfor _, cc := range c.collectors {\n\t\tcc.Collect(ch)\n\t}\n}", "func (c *Exporter) Collect(ch chan<- prometheus.Metric) {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\tfor _, cc := range c.collectors {\n\t\tcc.Collect(ch)\n\t}\n}", "func (node_p *Node) collector() {\n\twg := sync.WaitGroup{}\n\tfor env := range node_p.nchan {\n\t\tswitch env.Pkt.Type {\n\t\tcase pkt.PingRes:\n\t\t\twg.Add(1)\n\t\t\tgo func() { node_p.nmap.Insert(node_p.hash, env.Pkt.Hash, env.Addr.IP, node_p.conn); wg.Done() }()\n\t\tcase pkt.Store:\n\t\t\twg.Add(1)\n\t\t\tgo func() { node_p.omap.Insert(node_p.hash, env.Pkt.Obj); wg.Done() }()\n\t\t}\n\t}\n\twg.Wait()\n}", "func UpdateAll(ui UI, data *UIData) {\n\tvar wg sync.WaitGroup\n\tfor _, service := range data.Services {\n\t\twg.Add(1)\n\t\tgo service.Update(&wg)\n\t}\n\twg.Wait()\n\n\tdata.LastTimestamp = time.Now()\n\n\tui.Update(*data)\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\te.mu.Lock()\n\tdefer e.mu.Unlock()\n\n\tfor _, cc := range e.collectors {\n\t\tcc.Collect(ch)\n\t}\n}", "func (c *metricbeatCollector) Collect(ch chan<- prometheus.Metric) {\n\n\tfor _, i := range c.metrics {\n\t\tch <- prometheus.MustNewConstMetric(i.desc, i.valType, i.eval(c.stats))\n\t}\n\n}", "func (c *ComputeCollector) Describe(ch chan<- *prometheus.Desc) {\n\tch <- c.Instances\n\tch <- c.ForwardingRules\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\te.mutex.Lock()\n\tdefer e.mutex.Unlock()\n\n\tfor _, vec := range e.gauges {\n\t\tvec.Reset()\n\t}\n\n\tdefer func() { ch <- e.up }()\n\n\t// If we fail at any point in retrieving GPU status, we fail 0\n\te.up.Set(1)\n\n\te.GetTelemetryFromNVML()\n\n\tfor _, vec := range e.gauges {\n\t\tvec.Collect(ch)\n\t}\n}", "func (c *beatCollector) Collect(ch chan<- prometheus.Metric) {\n\n\tfor _, i := range c.metrics {\n\t\tch <- prometheus.MustNewConstMetric(i.desc, i.valType, i.eval(c.stats))\n\t}\n\n}", "func (x *fastReflection_Evidence) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value {\n\tswitch fd.FullName() {\n\tcase \"tendermint.types.Evidence.duplicate_vote_evidence\":\n\t\tif x.Sum == nil {\n\t\t\tvalue := &DuplicateVoteEvidence{}\n\t\t\toneofValue := &Evidence_DuplicateVoteEvidence{DuplicateVoteEvidence: value}\n\t\t\tx.Sum = oneofValue\n\t\t\treturn protoreflect.ValueOfMessage(value.ProtoReflect())\n\t\t}\n\t\tswitch m := x.Sum.(type) {\n\t\tcase *Evidence_DuplicateVoteEvidence:\n\t\t\treturn protoreflect.ValueOfMessage(m.DuplicateVoteEvidence.ProtoReflect())\n\t\tdefault:\n\t\t\tvalue := &DuplicateVoteEvidence{}\n\t\t\toneofValue := &Evidence_DuplicateVoteEvidence{DuplicateVoteEvidence: value}\n\t\t\tx.Sum = oneofValue\n\t\t\treturn protoreflect.ValueOfMessage(value.ProtoReflect())\n\t\t}\n\tcase \"tendermint.types.Evidence.light_client_attack_evidence\":\n\t\tif x.Sum == nil {\n\t\t\tvalue := &LightClientAttackEvidence{}\n\t\t\toneofValue := &Evidence_LightClientAttackEvidence{LightClientAttackEvidence: value}\n\t\t\tx.Sum = oneofValue\n\t\t\treturn protoreflect.ValueOfMessage(value.ProtoReflect())\n\t\t}\n\t\tswitch m := x.Sum.(type) {\n\t\tcase *Evidence_LightClientAttackEvidence:\n\t\t\treturn protoreflect.ValueOfMessage(m.LightClientAttackEvidence.ProtoReflect())\n\t\tdefault:\n\t\t\tvalue := &LightClientAttackEvidence{}\n\t\t\toneofValue := &Evidence_LightClientAttackEvidence{LightClientAttackEvidence: value}\n\t\t\tx.Sum = oneofValue\n\t\t\treturn protoreflect.ValueOfMessage(value.ProtoReflect())\n\t\t}\n\tdefault:\n\t\tif fd.IsExtension() {\n\t\t\tpanic(fmt.Errorf(\"proto3 declared messages do not support extensions: tendermint.types.Evidence\"))\n\t\t}\n\t\tpanic(fmt.Errorf(\"message tendermint.types.Evidence does not contain field %s\", fd.FullName()))\n\t}\n}", "func (q peerQuery) UpdateAll(ctx context.Context, exec boil.ContextExecutor, cols M) (int64, error) {\n\tqueries.SetUpdate(q.Query, cols)\n\n\tresult, err := q.Query.ExecContext(ctx, exec)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"model: unable to update all for peers\")\n\t}\n\n\trowsAff, err := result.RowsAffected()\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"model: unable to retrieve rows affected for peers\")\n\t}\n\n\treturn rowsAff, nil\n}", "func (c *typeReflectCache) update(updates reflectCacheMap) {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\tcurrentCacheMap := c.value.Load().(reflectCacheMap)\n\n\thasNewEntries := false\n\tfor t := range updates {\n\t\tif _, ok := currentCacheMap[t]; !ok {\n\t\t\thasNewEntries = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif !hasNewEntries {\n\t\t// Bail if the updates have been set while waiting for lock acquisition.\n\t\t// This is safe since setting entries is idempotent.\n\t\treturn\n\t}\n\n\tnewCacheMap := make(reflectCacheMap, len(currentCacheMap)+len(updates))\n\tfor k, v := range currentCacheMap {\n\t\tnewCacheMap[k] = v\n\t}\n\tfor t, update := range updates {\n\t\tnewCacheMap[t] = update\n\t}\n\tc.value.Store(newCacheMap)\n}", "func (o ChannelSlice) UpdateAll(ctx context.Context, exec boil.ContextExecutor, cols M) (int64, error) {\n\tln := int64(len(o))\n\tif ln == 0 {\n\t\treturn 0, nil\n\t}\n\n\tif len(cols) == 0 {\n\t\treturn 0, errors.New(\"models: update all requires at least one column argument\")\n\t}\n\n\tcolNames := make([]string, len(cols))\n\targs := make([]interface{}, len(cols))\n\n\ti := 0\n\tfor name, value := range cols {\n\t\tcolNames[i] = name\n\t\targs[i] = value\n\t\ti++\n\t}\n\n\t// Append all of the primary key values for each column\n\tfor _, obj := range o {\n\t\tpkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), channelPrimaryKeyMapping)\n\t\targs = append(args, pkeyArgs...)\n\t}\n\n\tsql := fmt.Sprintf(\"UPDATE \\\"channels\\\" SET %s WHERE %s\",\n\t\tstrmangle.SetParamNames(\"\\\"\", \"\\\"\", 1, colNames),\n\t\tstrmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), len(colNames)+1, channelPrimaryKeyColumns, len(o)))\n\n\tif boil.IsDebug(ctx) {\n\t\twriter := boil.DebugWriterFrom(ctx)\n\t\tfmt.Fprintln(writer, sql)\n\t\tfmt.Fprintln(writer, args...)\n\t}\n\tresult, err := exec.ExecContext(ctx, sql, args...)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: unable to update all in channel slice\")\n\t}\n\n\trowsAff, err := result.RowsAffected()\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: unable to retrieve rows affected all in update all channel\")\n\t}\n\treturn rowsAff, nil\n}", "func (r *reflectorStore) Replace(list []interface{}, _ string) error {\n\tentities := make([]entityUid, 0, len(list))\n\n\tfor _, obj := range list {\n\t\tmetaObj := obj.(metav1.Object)\n\t\tentities = append(entities, entityUid{r.parser.Parse(obj), metaObj.GetUID()})\n\t}\n\n\tr.mu.Lock()\n\tdefer r.mu.Unlock()\n\n\tvar events []workloadmeta.CollectorEvent\n\n\tseenNow := make(map[string]workloadmeta.EntityID)\n\tseenBefore := r.seen\n\n\tfor _, entityuid := range entities {\n\t\tentity := entityuid.entity\n\t\tuid := string(entityuid.uid)\n\n\t\tevents = append(events, workloadmeta.CollectorEvent{\n\t\t\tType: workloadmeta.EventTypeSet,\n\t\t\tSource: collectorID,\n\t\t\tEntity: entity,\n\t\t})\n\n\t\tdelete(seenBefore, uid)\n\n\t\tseenNow[uid] = entity.GetID()\n\t}\n\n\tfor _, entityID := range seenBefore {\n\t\tevents = append(events, workloadmeta.CollectorEvent{\n\t\t\tType: workloadmeta.EventTypeUnset,\n\t\t\tSource: collectorID,\n\t\t\tEntity: &workloadmeta.KubernetesPod{\n\t\t\t\tEntityID: entityID,\n\t\t\t},\n\t\t})\n\t}\n\n\tr.wlmetaStore.Notify(events)\n\tr.seen = seenNow\n\tr.hasSynced = true\n\n\treturn nil\n}", "func (v *verifiableMetric) update(data *Data, fieldValues string, packer *numberPacker) {\n\tswitch v.wantMetric.Type {\n\tcase TypeCounter:\n\t\tv.lastCounterValue[v.verifier.internMap.Intern(fieldValues)] = packer.pack(data.Number)\n\tcase TypeHistogram:\n\t\tlastDistributionSnapshot := v.lastDistributionSnapshot[v.verifier.internMap.Intern(fieldValues)]\n\t\tlastBucketSamples := lastDistributionSnapshot.numSamples\n\t\tvar count uint64\n\t\tfor i, b := range data.HistogramValue.Buckets {\n\t\t\tlastBucketSamples[i] = packer.packInt(int64(b.Samples))\n\t\t\tcount += b.Samples\n\t\t}\n\t\tlastDistributionSnapshot.sum = packer.pack(&data.HistogramValue.Total)\n\t\tlastDistributionSnapshot.count = packer.packInt(int64(count))\n\t\tlastDistributionSnapshot.min = packer.pack(&data.HistogramValue.Min)\n\t\tlastDistributionSnapshot.max = packer.pack(&data.HistogramValue.Max)\n\t\tlastDistributionSnapshot.ssd = packer.pack(&data.HistogramValue.SumOfSquaredDeviations)\n\t}\n}", "func (c *collector) Collect(ch chan<- prometheus.Metric) {\n\tc.mu.Lock()\n\t// Get the last views\n\tviews := c.views\n\t// Now clear them out for the next accumulation\n\tc.views = c.views[:0]\n\tc.mu.Unlock()\n\n\tif len(views) == 0 {\n\t\treturn\n\t}\n\n\t// seen is necessary because within each Collect cycle\n\t// if a Metric is sent to Prometheus with the same make up\n\t// that is \"name\" and \"labels\", it will error out.\n\tseen := make(map[prometheus.Metric]bool)\n\n\tfor _, vd := range views {\n\t\tfor _, row := range vd.Rows {\n\t\t\tmetric := c.toMetric(vd.View, row)\n\t\t\tif _, ok := seen[metric]; !ok && metric != nil {\n\t\t\t\tch <- metric\n\t\t\t\tseen[metric] = true\n\t\t\t}\n\t\t}\n\t}\n}", "func (c *VMCollector) Collect(ch chan<- prometheus.Metric) {\n\tfor _, m := range c.getMetrics() {\n\t\tch <- m\n\t}\n}", "func (*UpdateCollectionRequest) Descriptor() ([]byte, []int) {\n\treturn file_threads_proto_rawDescGZIP(), []int{15}\n}", "func (d data) update(ps list) error {\n\td.src.PreUpdate()\n\tdefer d.src.PostUpdate()\n\n\tisNew := false\n\tv := reflect.ValueOf(d.src).Elem().FieldByName(ps.Name)\n\tvar val reflect.Value\n\tfor _, i := range ps.updateIndexes {\n\t\tswitch v.Kind() {\n\t\tcase reflect.Slice:\n\t\t\tif i == v.Len() {\n\t\t\t\t// Append slice element.\n\t\t\t\tisNew = true\n\t\t\t\tnewElement := reflect.Zero(v.Type().Elem())\n\t\t\t\tnewSlice := reflect.Append(v, newElement)\n\t\t\t\tv.Set(newSlice)\n\t\t\t} else if i < 0 || i > v.Len() {\n\t\t\t\treturn fmt.Errorf(\"%s[%d]: index is out of range\", ps.Name, i)\n\t\t\t}\n\t\t\tval = v.Index(i)\n\t\tcase reflect.Struct:\n\t\t\tif i != 0 {\n\t\t\t\treturn fmt.Errorf(\"property: %s is not a slice (element %d requested)\", ps.Name, i)\n\t\t\t}\n\t\t\tval = v\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"property: unknown kind: %s: %v\", ps.Name, v.Kind())\n\t\t}\n\n\t\tif val.CanSet() == false {\n\t\t\treturn fmt.Errorf(\"property: %s[%d] is not settable\", ps.Name, i)\n\t\t}\n\n\t\tfor _, p := range ps.Fields {\n\t\t\tif p.IsUpdated {\n\t\t\t\tfieldValue := val.FieldByName(p.FieldName)\n\t\t\t\tif fieldValue == (reflect.Value{}) {\n\t\t\t\t\treturn fmt.Errorf(\"%s[%d].%s: field does not exist\", ps.Name, i, p.FieldName)\n\t\t\t\t}\n\t\t\t\tif p.FieldName == \"Name\" {\n\t\t\t\t\tif len(p.Values) != 1 {\n\t\t\t\t\t\treturn fmt.Errorf(\"property: Name field must have a single value\")\n\t\t\t\t\t} else if p.Values[0] == \"\" {\n\t\t\t\t\t\treturn fmt.Errorf(\"property: Name cannot be empty\")\n\t\t\t\t\t}\n\t\t\t\t\tnewName := p.Values[0]\n\t\t\t\t\tif d.isUniqueName(ps.Name, i, newName) == false {\n\t\t\t\t\t\treturn fmt.Errorf(\"property: Name (%s) already exists\", newName)\n\t\t\t\t\t}\n\t\t\t\t\t// Don't rename new fields, otherwise this renames an existing empty id.\n\t\t\t\t\t// We need to make a copy before renaming.\n\t\t\t\t\tif isNew == false {\n\t\t\t\t\t\told := reflect.New(fieldValue.Type()).Elem()\n\t\t\t\t\t\told.Set(fieldValue)\n\t\t\t\t\t\tif err := d.src.RenameID(old, newName); err != nil {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif err := p.set(fieldValue); err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"%s[%d].%s: %s\", ps.Name, i, p.FieldName, err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func (c *grpcClientManagerCollector) Collect(ch chan<- prometheus.Metric) {\n\tfor _, con := range c.cm.Metrics().Connections {\n\t\tl := []string{con.Target}\n\t\tch <- prometheus.MustNewConstMetric(connectionStateDesc, prometheus.GaugeValue, float64(con.State), l...)\n\t}\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\tfor _, db := range e.dbs {\n\t\t// logger.Log(\"Scraping\", db.String())\n\t\tgo e.scrapeDatabase(db)\n\t}\n\te.mutex.Lock()\n\tdefer e.mutex.Unlock()\n\te.cpuPercent.Collect(ch)\n\te.dataIO.Collect(ch)\n\te.logIO.Collect(ch)\n\te.memoryPercent.Collect(ch)\n\te.workPercent.Collect(ch)\n\te.sessionPercent.Collect(ch)\n\te.storagePercent.Collect(ch)\n\te.dbUp.Collect(ch)\n\te.up.Set(1)\n}", "func (c *collector) Collect(ch chan<- prometheus.Metric) {\n\tc.m.Lock()\n\tfor _, m := range c.metrics {\n\t\tch <- m.metric\n\t}\n\tc.m.Unlock()\n}", "func (x *fastReflection_EvidenceList) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value {\n\tswitch fd.FullName() {\n\tcase \"tendermint.types.EvidenceList.evidence\":\n\t\tif x.Evidence == nil {\n\t\t\tx.Evidence = []*Evidence{}\n\t\t}\n\t\tvalue := &_EvidenceList_1_list{list: &x.Evidence}\n\t\treturn protoreflect.ValueOfList(value)\n\tdefault:\n\t\tif fd.IsExtension() {\n\t\t\tpanic(fmt.Errorf(\"proto3 declared messages do not support extensions: tendermint.types.EvidenceList\"))\n\t\t}\n\t\tpanic(fmt.Errorf(\"message tendermint.types.EvidenceList does not contain field %s\", fd.FullName()))\n\t}\n}", "func (t *File_summary_by_instance) Collect(dbh *sql.DB) {\n\tstart := time.Now()\n\t// UPDATE current from db handle\n\tt.current = merge_by_table_name(select_fsbi_rows(dbh), t.global_variables)\n\n\t// copy in initial data if it was not there\n\tif len(t.initial) == 0 && len(t.current) > 0 {\n\t\tt.initial = make(file_summary_by_instance_rows, len(t.current))\n\t\tcopy(t.initial, t.current)\n\t}\n\n\t// check for reload initial characteristics\n\tif t.initial.needs_refresh(t.current) {\n\t\tt.initial = make(file_summary_by_instance_rows, len(t.current))\n\t\tcopy(t.initial, t.current)\n\t}\n\n\t// update results to current value\n\tt.results = make(file_summary_by_instance_rows, len(t.current))\n\tcopy(t.results, t.current)\n\n\t// make relative if need be\n\tif t.WantRelativeStats() {\n\t\tt.results.subtract(t.initial)\n\t}\n\n\t// sort the results\n\tt.results.sort()\n\n\t// setup the totals\n\tt.totals = t.results.totals()\n\tlib.Logger.Println(\"File_summary_by_instance.Collect() took:\", time.Duration(time.Since(start)).String())\n}", "func (c *MetricsCollector) Collect(ch chan<- prometheus.Metric) {\n\tfor _, s := range c.status {\n\t\ts.RLock()\n\t\tdefer s.RUnlock()\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.verify,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(s.VerifyRestore),\n\t\t\t\"verify_restore\",\n\t\t\ts.BackupService,\n\t\t\ts.StorageService,\n\t\t)\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.verify,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(s.VerifyDiff),\n\t\t\t\"verify_diff\",\n\t\t\ts.BackupService,\n\t\t\ts.StorageService,\n\t\t)\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.verify,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(s.VerifyChecksum),\n\t\t\t\"verify_checksum\",\n\t\t\ts.BackupService,\n\t\t\ts.StorageService,\n\t\t)\n\t}\n\n}", "func (d *Deployment) update() error {\n\tselect {\n\tcase event, ok := <-d.events:\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"service stream closed unexpectedly: %s\", d.stream.Err())\n\t\t}\n\t\tif event.Kind == discoverd.EventKindServiceMeta {\n\t\t\td.meta = event.ServiceMeta\n\t\t}\n\tdefault:\n\t}\n\treturn nil\n}", "func (x *fastReflection_ServiceCommandDescriptor) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value {\n\tswitch fd.FullName() {\n\tcase \"cosmos.autocli.v1.ServiceCommandDescriptor.rpc_command_options\":\n\t\tif x.RpcCommandOptions == nil {\n\t\t\tx.RpcCommandOptions = []*RpcCommandOptions{}\n\t\t}\n\t\tvalue := &_ServiceCommandDescriptor_2_list{list: &x.RpcCommandOptions}\n\t\treturn protoreflect.ValueOfList(value)\n\tcase \"cosmos.autocli.v1.ServiceCommandDescriptor.sub_commands\":\n\t\tif x.SubCommands == nil {\n\t\t\tx.SubCommands = make(map[string]*ServiceCommandDescriptor)\n\t\t}\n\t\tvalue := &_ServiceCommandDescriptor_3_map{m: &x.SubCommands}\n\t\treturn protoreflect.ValueOfMap(value)\n\tcase \"cosmos.autocli.v1.ServiceCommandDescriptor.service\":\n\t\tpanic(fmt.Errorf(\"field service of message cosmos.autocli.v1.ServiceCommandDescriptor is not mutable\"))\n\tcase \"cosmos.autocli.v1.ServiceCommandDescriptor.enhance_custom_command\":\n\t\tpanic(fmt.Errorf(\"field enhance_custom_command of message cosmos.autocli.v1.ServiceCommandDescriptor is not mutable\"))\n\tdefault:\n\t\tif fd.IsExtension() {\n\t\t\tpanic(fmt.Errorf(\"proto3 declared messages do not support extensions: cosmos.autocli.v1.ServiceCommandDescriptor\"))\n\t\t}\n\t\tpanic(fmt.Errorf(\"message cosmos.autocli.v1.ServiceCommandDescriptor does not contain field %s\", fd.FullName()))\n\t}\n}", "func (o FeatureCvtermDbxrefSlice) UpdateAllP(exec boil.Executor, cols M) {\n\tif err := o.UpdateAll(exec, cols); err != nil {\n\t\tpanic(boil.WrapErr(err))\n\t}\n}", "func (o InstrumentClassSlice) UpdateAllP(exec boil.Executor, cols M) {\n\tif err := o.UpdateAll(exec, cols); err != nil {\n\t\tpanic(boil.WrapErr(err))\n\t}\n}", "func (o StockCvtermSlice) UpdateAllP(exec boil.Executor, cols M) {\n\tif err := o.UpdateAll(exec, cols); err != nil {\n\t\tpanic(boil.WrapErr(err))\n\t}\n}", "func (ctr *ContainerTaskRels) update(as agentState) {\n\trels := map[string]*TaskInfo{}\n\tfor _, f := range as.Frameworks {\n\t\tfor _, e := range f.Executors {\n\t\t\tfor _, t := range e.Tasks {\n\t\t\t\tfor _, s := range t.Statuses {\n\t\t\t\t\trels[s.ContainerStatusInfo.ID.Value] = &TaskInfo{\n\t\t\t\t\t\tID: t.ID,\n\t\t\t\t\t\tName: t.Name,\n\t\t\t\t\t\tLabels: t.Labels,\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tctr.Lock()\n\tctr.rels = rels\n\tctr.Unlock()\n}", "func (o FeatureRelationshipSlice) UpdateAllP(exec boil.Executor, cols M) {\n\tif err := o.UpdateAll(exec, cols); err != nil {\n\t\tpanic(boil.WrapErr(err))\n\t}\n}", "func (e *Exporter) Describe(ch chan<- *prometheus.Desc) {\n\tch <- up\n\tch <- clusterServers\n\tch <- clusterLeader\n\tch <- nodeCount\n\tch <- memberStatus\n\tch <- memberWanStatus\n\tch <- serviceCount\n\tch <- serviceNodesHealthy\n\tch <- nodeChecks\n\tch <- serviceChecks\n\tch <- keyValues\n\tch <- serviceTag\n\tch <- serviceCheckNames\n}", "func (mb *MutableBag) update(dictionary dictionary, attrs *mixerpb.Attributes) error {\n\t// check preconditions up front and bail if there are any\n\t// errors without mutating the bag.\n\tif err := checkPreconditions(dictionary, attrs); err != nil {\n\t\treturn err\n\t}\n\n\tvar log *bytes.Buffer\n\tif glog.V(2) {\n\t\tlog = pool.GetBuffer()\n\t}\n\n\tif attrs.ResetContext {\n\t\tif log != nil {\n\t\t\tlog.WriteString(\" resetting bag to empty state\\n\")\n\t\t}\n\t\tmb.Reset()\n\t}\n\n\t// delete requested attributes\n\tfor _, d := range attrs.DeletedAttributes {\n\t\tif name, present := dictionary[d]; present {\n\t\t\tif log != nil {\n\t\t\t\tlog.WriteString(fmt.Sprintf(\" attempting to delete attribute %s\\n\", name))\n\t\t\t}\n\n\t\t\tdelete(mb.values, name)\n\t\t}\n\t}\n\n\t// apply all attributes\n\tfor k, v := range attrs.StringAttributes {\n\t\tif log != nil {\n\t\t\tlog.WriteString(fmt.Sprintf(\" updating string attribute %s from '%v' to '%v'\\n\", dictionary[k], mb.values[dictionary[k]], v))\n\t\t}\n\t\tmb.values[dictionary[k]] = v\n\t}\n\n\tfor k, v := range attrs.Int64Attributes {\n\t\tif log != nil {\n\t\t\tlog.WriteString(fmt.Sprintf(\" updating int64 attribute %s from '%v' to '%v'\\n\", dictionary[k], mb.values[dictionary[k]], v))\n\t\t}\n\t\tmb.values[dictionary[k]] = v\n\t}\n\n\tfor k, v := range attrs.DoubleAttributes {\n\t\tif log != nil {\n\t\t\tlog.WriteString(fmt.Sprintf(\" updating double attribute %s from '%v' to '%v'\\n\", dictionary[k], mb.values[dictionary[k]], v))\n\t\t}\n\t\tmb.values[dictionary[k]] = v\n\t}\n\n\tfor k, v := range attrs.BoolAttributes {\n\t\tif log != nil {\n\t\t\tlog.WriteString(fmt.Sprintf(\" updating bool attribute %s from '%v' to '%v'\\n\", dictionary[k], mb.values[dictionary[k]], v))\n\t\t}\n\t\tmb.values[dictionary[k]] = v\n\t}\n\n\tfor k, v := range attrs.TimestampAttributes {\n\t\tif log != nil {\n\t\t\tlog.WriteString(fmt.Sprintf(\" updating time attribute %s from '%v' to '%v'\\n\", dictionary[k], mb.values[dictionary[k]], v))\n\t\t}\n\t\tmb.values[dictionary[k]] = v\n\t}\n\n\tfor k, v := range attrs.DurationAttributes {\n\t\tif log != nil {\n\t\t\tlog.WriteString(fmt.Sprintf(\" updating duration attribute %s from '%v' to '%v'\\n\", dictionary[k], mb.values[dictionary[k]], v))\n\t\t}\n\t\tmb.values[dictionary[k]] = v\n\t}\n\n\tfor k, v := range attrs.BytesAttributes {\n\t\tif log != nil {\n\t\t\tlog.WriteString(fmt.Sprintf(\" updating bytes attribute %s from '%v' to '%v'\\n\", dictionary[k], mb.values[dictionary[k]], v))\n\t\t}\n\t\tmb.values[dictionary[k]] = v\n\t}\n\n\tfor k, v := range attrs.StringMapAttributes {\n\t\tm, ok := mb.values[dictionary[k]].(map[string]string)\n\t\tif !ok {\n\t\t\tm = make(map[string]string)\n\t\t\tmb.values[dictionary[k]] = m\n\t\t}\n\n\t\tif log != nil {\n\t\t\tlog.WriteString(fmt.Sprintf(\" updating stringmap attribute %s from\\n\", dictionary[k]))\n\n\t\t\tif len(m) > 0 {\n\t\t\t\tfor k2, v2 := range m {\n\t\t\t\t\tlog.WriteString(fmt.Sprintf(\" %s:%s\\n\", k2, v2))\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tlog.WriteString(\" <empty>\\n\")\n\t\t\t}\n\n\t\t\tlog.WriteString(\" to\\n\")\n\t\t}\n\n\t\tfor k2, v2 := range v.Map {\n\t\t\tm[dictionary[k2]] = v2\n\t\t}\n\n\t\tif log != nil {\n\t\t\tif len(m) > 0 {\n\t\t\t\tfor k2, v2 := range m {\n\t\t\t\t\tlog.WriteString(fmt.Sprintf(\" %s:%s\\n\", k2, v2))\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tlog.WriteString(\" <empty>\\n\")\n\t\t\t}\n\t\t}\n\t}\n\n\tif log != nil {\n\t\tif log.Len() > 0 {\n\t\t\tglog.Infof(\"Updating attribute bag %d:\\n%s\", mb.id, log.String())\n\t\t}\n\t}\n\n\treturn nil\n}", "func (gw *gRPCWatcher) Next() ([]*naming.Update, error) {\n\tif gw.wch == nil {\n\t\t// first Next() returns all addresses\n\t\treturn gw.firstNext()\n\t}\n\tif gw.err != nil {\n\t\treturn nil, gw.err\n\t}\n\n\t// process new events on target/*\n\twr, ok := <-gw.wch\n\tif !ok {\n\t\tgw.err = status.Error(codes.Unavailable, ErrWatcherClosed.Error())\n\t\treturn nil, gw.err\n\t}\n\tif gw.err = wr.Err(); gw.err != nil {\n\t\treturn nil, gw.err\n\t}\n\n\tupdates := make([]*naming.Update, 0, len(wr.Events))\n\tfor _, e := range wr.Events {\n\t\tvar jupdate naming.Update\n\t\tvar err error\n\t\tswitch e.Type {\n\t\tcase etcd.EventTypePut:\n\t\t\terr = json.Unmarshal(e.Kv.Value, &jupdate)\n\t\t\tjupdate.Op = naming.Add\n\t\tcase etcd.EventTypeDelete:\n\t\t\terr = json.Unmarshal(e.PrevKv.Value, &jupdate)\n\t\t\tjupdate.Op = naming.Delete\n\t\t}\n\t\tif err == nil {\n\t\t\tupdates = append(updates, &jupdate)\n\t\t}\n\t}\n\treturn updates, nil\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\tok := e.collectPeersMetric(ch)\n\tok = e.collectLeaderMetric(ch) && ok\n\tok = e.collectNodesMetric(ch) && ok\n\tok = e.collectMembersMetric(ch) && ok\n\tok = e.collectMembersWanMetric(ch) && ok\n\tok = e.collectServicesMetric(ch) && ok\n\tok = e.collectHealthStateMetric(ch) && ok\n\tok = e.collectKeyValues(ch) && ok\n\n\tif ok {\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tup, prometheus.GaugeValue, 1.0,\n\t\t)\n\t} else {\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tup, prometheus.GaugeValue, 0.0,\n\t\t)\n\t}\n}", "func (rc *RecordCollection) updateStoredFields(compPairs []recomputePair) {\n\tfor _, rp := range compPairs {\n\t\tif rp.recs.IsEmpty() {\n\t\t\t// recs have been fetched in retrieveComputeData\n\t\t\t// if it is empty now, it must be because the records have been unlinked in between\n\t\t\tcontinue\n\t\t}\n\t\trp.recs.applyMethod(rp.method)\n\t}\n}", "func (o PeerPropertySlice) UpdateAll(ctx context.Context, exec boil.ContextExecutor, cols M) (int64, error) {\n\tln := int64(len(o))\n\tif ln == 0 {\n\t\treturn 0, nil\n\t}\n\n\tif len(cols) == 0 {\n\t\treturn 0, errors.New(\"models: update all requires at least one column argument\")\n\t}\n\n\tcolNames := make([]string, len(cols))\n\targs := make([]interface{}, len(cols))\n\n\ti := 0\n\tfor name, value := range cols {\n\t\tcolNames[i] = name\n\t\targs[i] = value\n\t\ti++\n\t}\n\n\t// Append all of the primary key values for each column\n\tfor _, obj := range o {\n\t\tpkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), peerPropertyPrimaryKeyMapping)\n\t\targs = append(args, pkeyArgs...)\n\t}\n\n\tsql := fmt.Sprintf(\"UPDATE \\\"peer_properties\\\" SET %s WHERE %s\",\n\t\tstrmangle.SetParamNames(\"\\\"\", \"\\\"\", 1, colNames),\n\t\tstrmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), len(colNames)+1, peerPropertyPrimaryKeyColumns, len(o)))\n\n\tif boil.IsDebug(ctx) {\n\t\twriter := boil.DebugWriterFrom(ctx)\n\t\tfmt.Fprintln(writer, sql)\n\t\tfmt.Fprintln(writer, args...)\n\t}\n\tresult, err := exec.ExecContext(ctx, sql, args...)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: unable to update all in peerProperty slice\")\n\t}\n\n\trowsAff, err := result.RowsAffected()\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: unable to retrieve rows affected all in update all peerProperty\")\n\t}\n\treturn rowsAff, nil\n}", "func (*Update) Descriptor() ([]byte, []int) {\n\treturn file_proto_gnmi_gnmi_proto_rawDescGZIP(), []int{1}\n}", "func (e *exporter) Collect(ch chan<- prometheus.Metric) {\n\twg := sync.WaitGroup{}\n\twg.Add(len(e.Collectors))\n\tfor name, c := range e.Collectors {\n\t\tgo func(name string, c Collector) {\n\t\t\texecute(name, c, ch)\n\t\t\twg.Done()\n\t\t}(name, c)\n\t}\n\twg.Wait()\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\tvar (\n\t\tdata *Data\n\t\terr error\n\t)\n\n\te.mutex.Lock() // To protect metrics from concurrent collects.\n\tdefer e.mutex.Unlock()\n\n\te.resetGaugeVecs() // Clean starting point\n\n\tvar endpointOfAPI []string\n\tif strings.HasSuffix(rancherURL, \"v3\") || strings.HasSuffix(rancherURL, \"v3/\") {\n\t\tendpointOfAPI = endpointsV3\n\t} else {\n\t\tendpointOfAPI = endpoints\n\t}\n\n\tcacheExpired := e.IsCacheExpired()\n\n\t// Range over the pre-configured endpoints array\n\tfor _, p := range endpointOfAPI {\n\t\tif cacheExpired {\n\t\t\tdata, err = e.gatherData(e.rancherURL, e.resourceLimit, e.accessKey, e.secretKey, p, ch)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"Error getting JSON from URL %s\", p)\n\t\t\t\treturn\n\t\t\t}\n\t\t\te.cache[p] = data\n\t\t} else {\n\t\t\td, ok := e.cache[p]\n\t\t\tif !ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tdata = d\n\t\t}\n\n\t\tif err := e.processMetrics(data, p, e.hideSys, ch); err != nil {\n\t\t\tlog.Errorf(\"Error scraping rancher url: %s\", err)\n\t\t\treturn\n\t\t}\n\t\tlog.Infof(\"Metrics successfully processed for %s\", p)\n\t}\n\n\tif cacheExpired {\n\t\te.RenewCache()\n\t}\n\n\tfor _, m := range e.gaugeVecs {\n\t\tm.Collect(ch)\n\t}\n}", "func (x *fastReflection_Supply) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value {\n\tswitch fd.FullName() {\n\tcase \"cosmos.bank.v1beta1.Supply.total\":\n\t\tif x.Total == nil {\n\t\t\tx.Total = []*v1beta1.Coin{}\n\t\t}\n\t\tvalue := &_Supply_1_list{list: &x.Total}\n\t\treturn protoreflect.ValueOfList(value)\n\tdefault:\n\t\tif fd.IsExtension() {\n\t\t\tpanic(fmt.Errorf(\"proto3 declared messages do not support extensions: cosmos.bank.v1beta1.Supply\"))\n\t\t}\n\t\tpanic(fmt.Errorf(\"message cosmos.bank.v1beta1.Supply does not contain field %s\", fd.FullName()))\n\t}\n}", "func (o PhenotypepropSlice) UpdateAllP(exec boil.Executor, cols M) {\n\tif err := o.UpdateAll(exec, cols); err != nil {\n\t\tpanic(boil.WrapErr(err))\n\t}\n}", "func addValuesToChannel(c chan<- int) {\n\tfor i := 0; i < 100; i++ {\n\t\tc <- i\n\t}\n\tclose(c)\n}", "func (collector *atlassianUPMCollector) Describe(ch chan<- *prometheus.Desc) {\n\tch <- collector.atlassianUPMTimeMetric\n\tch <- collector.atlassianUPMUpMetric\n\tch <- collector.atlassianUPMPlugins\n\tch <- collector.atlassianUPMVersionsMetric\n}", "func (o *OSDCollector) Collect(ch chan<- prometheus.Metric) {\n\tif err := o.collectOSDPerf(); err != nil {\n\t\tlog.Println(\"failed collecting osd perf stats:\", err)\n\t}\n\n\tif err := o.collectOSDDump(); err != nil {\n\t\tlog.Println(\"failed collecting osd dump:\", err)\n\t}\n\n\tif err := o.collectOSDDF(); err != nil {\n\t\tlog.Println(\"failed collecting osd metrics:\", err)\n\t}\n\n\tif err := o.collectOSDTreeDown(ch); err != nil {\n\t\tlog.Println(\"failed collecting osd metrics:\", err)\n\t}\n\n\tfor _, metric := range o.collectorList() {\n\t\tmetric.Collect(ch)\n\t}\n\n\tif err := o.collectOSDScrubState(ch); err != nil {\n\t\tlog.Println(\"failed collecting osd scrub state:\", err)\n\t}\n}", "func (*MsgSetValidatorUpdates) Descriptor() ([]byte, []int) {\n\treturn file_core_abci_v1alpha1_abci_proto_rawDescGZIP(), []int{13}\n}", "func (s PowerSupplyConSumByEach) Collect(ch chan<- prometheus.Metric) {\n\tvar (\n\t\tvalue float64\n\t\tok bool\n\t)\n\tmetric := config.GOFISH.Service\n\n\tchass, _ := metric.Chassis()\n\n\tfor _, v := range chass {\n\t\tpowers, _ := v.Power()\n\t\tif powers != nil {\n\t\t\tfor _, p := range powers.PowerSupplies {\n\t\t\t\tch <- prometheus.MustNewConstMetric(config.C_powerconsumedbyeach, prometheus.GaugeValue, float64(p.PowerOutputWatts),\n\t\t\t\t\tfmt.Sprintf(\"%v\", p.PowerCapacityWatts),\n\t\t\t\t\tfmt.Sprintf(\"%v\", p.MemberID),\n\t\t\t\t\tfmt.Sprintf(\"%v\", p.Model),\n\t\t\t\t\tfmt.Sprintf(\"%v\", \"\"),\n\t\t\t\t)\n\n\t\t\t\tif value, ok = config.Status[string(p.Status.Health)]; !ok {\n\t\t\t\t\tvalue = float64(1)\n\t\t\t\t}\n\n\t\t\t\tch <- prometheus.MustNewConstMetric(config.C_powersupplystatus, prometheus.GaugeValue, float64(value),\n\t\t\t\t\tfmt.Sprintf(\"%v\", p.PowerCapacityWatts),\n\t\t\t\t\tfmt.Sprintf(\"%v\", p.MemberID),\n\t\t\t\t\tfmt.Sprintf(\"%v\", p.Model),\n\t\t\t\t\tfmt.Sprintf(\"%v\", \"\"),\n\t\t\t\t\tfmt.Sprintf(\"%v\", p.HotPluggable),\n\t\t\t\t\tfmt.Sprintf(\"%v\", p.Status.Health),\n\t\t\t\t\tfmt.Sprintf(\"%v\", \"\"),\n\t\t\t\t\tfmt.Sprintf(\"%v\", p.SparePartNumber),\n\t\t\t\t\tfmt.Sprintf(\"%v\", p.FirmwareVersion),\n\t\t\t\t)\n\t\t\t}\n\t\t}\n\t}\n}", "func (c *goCollector) Describe(ch chan<- *Desc) {\n\tc.base.Describe(ch)\n\tfor _, i := range c.msMetrics {\n\t\tch <- i.desc\n\t}\n\tfor _, m := range c.rmExposedMetrics {\n\t\tch <- m.Desc()\n\t}\n}", "func (o DescriptionSlice) UpdateAll(ctx context.Context, exec boil.ContextExecutor, cols M) (int64, error) {\n\tln := int64(len(o))\n\tif ln == 0 {\n\t\treturn 0, nil\n\t}\n\n\tif len(cols) == 0 {\n\t\treturn 0, errors.New(\"models: update all requires at least one column argument\")\n\t}\n\n\tcolNames := make([]string, len(cols))\n\targs := make([]interface{}, len(cols))\n\n\ti := 0\n\tfor name, value := range cols {\n\t\tcolNames[i] = name\n\t\targs[i] = value\n\t\ti++\n\t}\n\n\t// Append all of the primary key values for each column\n\tfor _, obj := range o {\n\t\tpkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), descriptionPrimaryKeyMapping)\n\t\targs = append(args, pkeyArgs...)\n\t}\n\n\tsql := fmt.Sprintf(\"UPDATE `descriptions` SET %s WHERE %s\",\n\t\tstrmangle.SetParamNames(\"`\", \"`\", 0, colNames),\n\t\tstrmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 0, descriptionPrimaryKeyColumns, len(o)))\n\n\tif boil.DebugMode {\n\t\tfmt.Fprintln(boil.DebugWriter, sql)\n\t\tfmt.Fprintln(boil.DebugWriter, args...)\n\t}\n\n\tresult, err := exec.ExecContext(ctx, sql, args...)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: unable to update all in description slice\")\n\t}\n\n\trowsAff, err := result.RowsAffected()\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: unable to retrieve rows affected all in update all description\")\n\t}\n\treturn rowsAff, nil\n}", "func (c *NFSCollector) Collect(ch chan<- prometheus.Metric) {\n\tnfss, err := c.client.Find(c.ctx)\n\tif err != nil {\n\t\tc.errors.WithLabelValues(\"nfs\").Add(1)\n\t\tc.logger.Warn(\n\t\t\t\"can't list nfs\",\n\t\t\tslog.Any(\"err\", err),\n\t\t)\n\t}\n\n\tvar wg sync.WaitGroup\n\twg.Add(len(nfss))\n\n\tfor i := range nfss {\n\t\tfunc(nfs *platform.NFS) {\n\t\t\tdefer wg.Done()\n\n\t\t\tnfsLabels := c.nfsLabels(nfs)\n\n\t\t\tvar up float64\n\t\t\tif nfs.InstanceStatus.IsUp() {\n\t\t\t\tup = 1.0\n\t\t\t}\n\t\t\tch <- prometheus.MustNewConstMetric(\n\t\t\t\tc.Up,\n\t\t\t\tprometheus.GaugeValue,\n\t\t\t\tup,\n\t\t\t\tnfsLabels...,\n\t\t\t)\n\t\t\tch <- prometheus.MustNewConstMetric(\n\t\t\t\tc.NFSInfo,\n\t\t\t\tprometheus.GaugeValue,\n\t\t\t\tfloat64(1.0),\n\t\t\t\tc.nfsInfoLabels(nfs)...,\n\t\t\t)\n\n\t\t\tch <- prometheus.MustNewConstMetric(\n\t\t\t\tc.NICInfo,\n\t\t\t\tprometheus.GaugeValue,\n\t\t\t\tfloat64(1.0),\n\t\t\t\tc.nicInfoLabels(nfs)...,\n\t\t\t)\n\n\t\t\tif nfs.Availability.IsAvailable() && nfs.InstanceStatus.IsUp() {\n\t\t\t\tnow := time.Now()\n\t\t\t\t// Free disk size\n\t\t\t\twg.Add(1)\n\t\t\t\tgo func() {\n\t\t\t\t\tc.collectFreeDiskSize(ch, nfs, now)\n\t\t\t\t\twg.Done()\n\t\t\t\t}()\n\n\t\t\t\t// NICs\n\t\t\t\twg.Add(1)\n\t\t\t\tgo func() {\n\t\t\t\t\tc.collectNICMetrics(ch, nfs, now)\n\t\t\t\t\twg.Done()\n\t\t\t\t}()\n\n\t\t\t\t// maintenance info\n\t\t\t\tvar maintenanceScheduled float64\n\t\t\t\tif nfs.InstanceHostInfoURL != \"\" {\n\t\t\t\t\tmaintenanceScheduled = 1.0\n\t\t\t\t\twg.Add(1)\n\t\t\t\t\tgo func() {\n\t\t\t\t\t\tc.collectMaintenanceInfo(ch, nfs)\n\t\t\t\t\t\twg.Done()\n\t\t\t\t\t}()\n\t\t\t\t}\n\t\t\t\tch <- prometheus.MustNewConstMetric(\n\t\t\t\t\tc.MaintenanceScheduled,\n\t\t\t\t\tprometheus.GaugeValue,\n\t\t\t\t\tmaintenanceScheduled,\n\t\t\t\t\tnfsLabels...,\n\t\t\t\t)\n\t\t\t}\n\t\t}(nfss[i])\n\t}\n\n\twg.Wait()\n}", "func (x *fastReflection_MsgUpdateParams) Descriptor() protoreflect.MessageDescriptor {\n\treturn md_MsgUpdateParams\n}", "func (m *metricVec) Describe(ch chan<- *Desc) { m.metricMap.Describe(ch) }", "func (c *isisCollector) Collect(client collector.Client, ch chan<- prometheus.Metric, labelValues []string) error {\n\tadjancies, err := c.isisAdjancies(client)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tch <- prometheus.MustNewConstMetric(upCount, prometheus.GaugeValue, adjancies.Up, labelValues...)\n\tch <- prometheus.MustNewConstMetric(totalCount, prometheus.GaugeValue, adjancies.Total, labelValues...)\n\n\tif adjancies.Adjacencies != nil {\n\t\tfor _, adj := range adjancies.Adjacencies {\n\t\t\tlocalLabelvalues := append(labelValues, adj.InterfaceName, adj.SystemName, strconv.Itoa(int(adj.Level)))\n\t\t\tstate := 0.0\n\t\t\tswitch adj.AdjacencyState {\n\t\t\tcase \"Down\":\n\t\t\t\tstate = 0.0\n\t\t\tcase \"Up\":\n\t\t\t\tstate = 1.0\n\t\t\tcase \"New\":\n\t\t\t\tstate = 2.0\n\t\t\tcase \"One-way\":\n\t\t\t\tstate = 3.0\n\t\t\tcase \"Initializing\":\n\t\t\t\tstate = 4.0\n\t\t\tcase \"Rejected\":\n\t\t\t\tstate = 5.0\n\t\t\t}\n\n\t\t\tch <- prometheus.MustNewConstMetric(adjState, prometheus.GaugeValue, state, localLabelvalues...)\n\t\t}\n\t}\n\n\treturn nil\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\tif ch == nil {\n\t\tglog.Info(\"Prometheus channel is closed. Skipping\")\n\t\treturn\n\t}\n\n\te.mutex.Lock()\n\tdefer func() {\n\t\te.mutex.Unlock()\n\t\te.cleanup.Range(func(key, value interface{}) bool {\n\t\t\tswitch chiName := key.(type) {\n\t\t\tcase string:\n\t\t\t\te.cleanup.Delete(key)\n\t\t\t\te.removeInstallationReference(chiName)\n\t\t\t}\n\t\t\treturn true\n\t\t})\n\t}()\n\n\tglog.Info(\"Starting Collect\")\n\tvar wg = sync.WaitGroup{}\n\t// Getting hostnames of Pods and requesting the metrics data from ClickHouse instances within\n\tfor chiName := range e.chInstallations {\n\t\t// Loop over all hostnames of this installation\n\t\tglog.Infof(\"Collecting metrics for %s\\n\", chiName)\n\t\tfor _, hostname := range e.chInstallations[chiName].hostnames {\n\t\t\twg.Add(1)\n\t\t\tgo func(name, hostname string, c chan<- prometheus.Metric) {\n\t\t\t\tdefer wg.Done()\n\n\t\t\t\tglog.Infof(\"Querying metrics for %s\\n\", hostname)\n\t\t\t\tmetricsData := make([][]string, 0)\n\t\t\t\tfetcher := e.newFetcher(hostname)\n\t\t\t\tif err := fetcher.clickHouseQueryMetrics(&metricsData); err != nil {\n\t\t\t\t\t// In case of an error fetching data from clickhouse store CHI name in e.cleanup\n\t\t\t\t\tglog.Infof(\"Error querying metrics for %s: %s\\n\", hostname, err)\n\t\t\t\t\te.cleanup.Store(name, struct{}{})\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tglog.Infof(\"Extracted %d metrics for %s\\n\", len(metricsData), hostname)\n\t\t\t\twriteMetricsDataToPrometheus(c, metricsData, name, hostname)\n\n\t\t\t\tglog.Infof(\"Querying table sizes for %s\\n\", hostname)\n\t\t\t\ttableSizes := make([][]string, 0)\n\t\t\t\tif err := fetcher.clickHouseQueryTableSizes(&tableSizes); err != nil {\n\t\t\t\t\t// In case of an error fetching data from clickhouse store CHI name in e.cleanup\n\t\t\t\t\tglog.Infof(\"Error querying table sizes for %s: %s\\n\", hostname, err)\n\t\t\t\t\te.cleanup.Store(name, struct{}{})\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tglog.Infof(\"Extracted %d table sizes for %s\\n\", len(tableSizes), hostname)\n\t\t\t\twriteTableSizesDataToPrometheus(c, tableSizes, name, hostname)\n\n\t\t\t}(chiName, hostname, ch)\n\t\t}\n\t}\n\twg.Wait()\n\tglog.Info(\"Finished Collect\")\n}", "func (c *PCPCounterVector) SetAll(val int64) {\n\tfor ins := range c.indom.instances {\n\t\tc.MustSet(val, ins)\n\t}\n}", "func (s *Set) Update(vals []interface{}) {\n\tfor _, val := range vals {\n\t\ts.set[val] = true\n\t}\n}", "func (c *InterfacesCollector) Describe(ch chan<- *prometheus.Desc) {\n\tfor _, m := range c.collectors() {\n\t\tm.Describe(ch)\n\t}\n}", "func (q peerPropertyQuery) UpdateAll(ctx context.Context, exec boil.ContextExecutor, cols M) (int64, error) {\n\tqueries.SetUpdate(q.Query, cols)\n\n\tresult, err := q.Query.ExecContext(ctx, exec)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: unable to update all for peer_properties\")\n\t}\n\n\trowsAff, err := result.RowsAffected()\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: unable to retrieve rows affected for peer_properties\")\n\t}\n\n\treturn rowsAff, nil\n}", "func pvCollect(ch chan<- prometheus.Metric, pvs []map[string]string, vgName string) {\n for _, pv := range pvs {\n pvSizeF, err := strconv.ParseFloat(strings.Trim(pv[\"pv_size\"], \"B\"), 64)\n if err != nil {\n log.Print(err)\n return\n }\n ch <- prometheus.MustNewConstMetric(pvSizeMetric, prometheus.GaugeValue, pvSizeF, pv[\"pv_name\"], pv[\"pv_uuid\"], vgName)\n\n pvFreeF, err := strconv.ParseFloat(strings.Trim(pv[\"pv_free\"], \"B\"), 64)\n if err != nil {\n log.Print(err)\n return\n }\n ch <- prometheus.MustNewConstMetric(pvFreeMetric, prometheus.GaugeValue, pvFreeF, pv[\"pv_name\"], pv[\"pv_uuid\"], vgName)\n\n pvUsedF, err := strconv.ParseFloat(strings.Trim(pv[\"pv_used\"], \"B\"), 64)\n if err != nil {\n log.Print(err)\n return\n }\n ch <- prometheus.MustNewConstMetric(pvUsedMetric, prometheus.GaugeValue, pvUsedF, pv[\"pv_name\"], pv[\"pv_uuid\"], vgName)\n }\n}", "func (c *Characteristic) SetDescriptors(descs []*Descriptor) { c.descs = descs }", "func (*UpdateDataAttributeBindingRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_dataplex_v1_data_taxonomy_proto_rawDescGZIP(), []int{16}\n}", "func (m httpReferenceDiscoveryMetrics) Collect(metrics chan<- prometheus.Metric) {\n\tm.firstPacket.Collect(metrics)\n\tm.totalTime.Collect(metrics)\n\tm.advertisedRefs.Collect(metrics)\n}", "func (s *EntityStorage) update() {\n\ts.outdated = false\n\ts.occupied = s.occupied[:0]\n\tl := len(s.vec)\n\tfor i := 0; i < l; i++ {\n\t\tif s.vec[i].occupied {\n\t\t\ts.occupied = append(s.occupied, i)\n\t\t}\n\t}\n}", "func (e *Exporter) Collect(ch chan<- prometheus.Metric) {\n\te.mutex.Lock()\n\tdefer e.mutex.Unlock()\n\n\te.scrape()\n\n\te.up.Collect(ch)\n\te.totalScrapes.Collect(ch)\n\te.exchangeStatus.Collect(ch)\n\te.ltp.Collect(ch)\n\te.bestBid.Collect(ch)\n\te.bestAsk.Collect(ch)\n\te.bestBidSize.Collect(ch)\n\te.bestAskSize.Collect(ch)\n\te.totalBidDepth.Collect(ch)\n\te.totalAskDepth.Collect(ch)\n\te.volume.Collect(ch)\n\te.volumeByProduct.Collect(ch)\n}" ]
[ "0.58088684", "0.5562258", "0.55334586", "0.55037814", "0.5347168", "0.53121257", "0.5310797", "0.5289114", "0.5254953", "0.52527744", "0.52251166", "0.52201605", "0.52137506", "0.52137166", "0.5206045", "0.52008325", "0.5198924", "0.5187874", "0.5168878", "0.516446", "0.51290613", "0.5123346", "0.51011056", "0.5093007", "0.5077647", "0.50681156", "0.5059179", "0.50553733", "0.5049291", "0.5030075", "0.5025074", "0.5017162", "0.5001562", "0.49951145", "0.4992267", "0.49911627", "0.49911627", "0.499036", "0.49878943", "0.49870616", "0.49859664", "0.49835482", "0.49759", "0.49757472", "0.49697182", "0.4959413", "0.49510664", "0.49498925", "0.49489602", "0.49471077", "0.49464178", "0.4941526", "0.49398237", "0.49322447", "0.49279314", "0.49255764", "0.4924567", "0.49191716", "0.49188235", "0.4912022", "0.4908654", "0.49041602", "0.49015263", "0.48994848", "0.48914585", "0.4890951", "0.48846617", "0.48827052", "0.48721668", "0.48647106", "0.48631823", "0.48577398", "0.48472747", "0.48454216", "0.48331782", "0.4832318", "0.48275164", "0.482306", "0.4814459", "0.48127478", "0.481149", "0.48082775", "0.4806696", "0.48046935", "0.48015934", "0.4800502", "0.47994456", "0.47978643", "0.47960344", "0.47946006", "0.47918227", "0.47908115", "0.47804332", "0.4778649", "0.47715047", "0.4771343", "0.47713146", "0.4770241", "0.47696117", "0.47694737" ]
0.48819733
68
This implements Stringer interface from package fmt.
func (p Person) String() string { return fmt.Sprintf("%v (%v years)", p.Name, p.Age) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (this *Name) String() string {\n\tif this.DoubleValue != nil {\n\t\treturn this.Before.String() + strconv.FormatFloat(this.GetDoubleValue(), 'f', -1, 64)\n\t}\n\tif this.IntValue != nil {\n\t\treturn this.Before.String() + strconv.FormatInt(this.GetIntValue(), 10)\n\t}\n\tif this.UintValue != nil {\n\t\treturn this.Before.String() + \"uint(\" + strconv.FormatUint(this.GetUintValue(), 10) + \")\"\n\t}\n\tif this.BoolValue != nil {\n\t\treturn this.Before.String() + strconv.FormatBool(this.GetBoolValue())\n\t}\n\tif this.StringValue != nil {\n\t\tif isId(this.GetStringValue()) {\n\t\t\treturn this.Before.String() + this.GetStringValue()\n\t\t}\n\t\treturn this.Before.String() + strconv.Quote(this.GetStringValue())\n\t}\n\tif this.BytesValue != nil {\n\t\treturn this.Before.String() + fmt.Sprintf(\"%#v\", this.GetBytesValue())\n\t}\n\tpanic(\"unreachable\")\n}", "func (self StringEqualer) String() string {\n\treturn fmt.Sprintf(\"%s\", self.S)\n}", "func (s SequencerData) String() string {\n\treturn fmt.Sprintf(\"%T len %v\", s, s.Len())\n}", "func (e Str) String() string {\n\treturn fmt.Sprintf(\"%v\", e)\n}", "func (iface *Interface) String() string {\n\tiface.lock.RLock()\n\tdefer iface.lock.RUnlock()\n\n\tvar str string\n\tstr = fmt.Sprintf(\"Name: %s\", iface.Name)\n\tstr = fmt.Sprintf(\"Type: %d\", iface.Type)\n\tfor _, subiface := range iface.Subinterfaces {\n\t\tstr = fmt.Sprintf(\"%s, Interfaces(%s): {%s}\", str, subiface.Name, subiface.String())\n\t}\n\n\treturn str\n}", "func (obj *ExtFormat) String() string {\n\treturn fmt.Sprintf(`%s(0x%02x): type=%d`, obj.TypeName, obj.FirstByte, obj.ExtType)\n}", "func (t DataFormat) String() string {\n\treturn dataFormatToString[t]\n}", "func (bc ByteCount) String() string {\n\treturn fmt.Sprintf(\"% .1s\", bc)\n}", "func (s *Siegfried) String() string {\n\tstr := fmt.Sprintf(\n\t\t\"%s (%v)\\nidentifiers: \\n\",\n\t\tconfig.Signature(),\n\t\ts.C.Format(time.RFC3339))\n\tfor _, id := range s.ids {\n\t\td := id.Describe()\n\t\tstr += fmt.Sprintf(\" - %v: %v\\n\", d[0], d[1])\n\t}\n\treturn str\n}", "func (a *AddrInfo) String() string {\n\tvar aFmt string\n\tif a.Addr.EndpointType() == layers.EndpointMAC {\n\t\taFmt = \"MAC: %-39s (age: %.f, pkts: %d)\"\n\t} else {\n\t\taFmt = \"IP: %-40s (age: %.f, pkts: %d)\"\n\t}\n\n\treturn fmt.Sprintf(aFmt, a.Addr, a.Age(), a.Packets)\n}", "func (s *S) String() string {\n\treturn fmt.Sprintf(\"%s\", s) // Sprintf will call s.String()\n}", "func (s PathFormat) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (r *Record) String() string {\n\tend := r.End()\n\treturn fmt.Sprintf(\"%s %v %v %d %s:%d..%d (%d) %d %s:%d %d %s %v %v\",\n\t\tr.Name,\n\t\tr.Flags,\n\t\tr.Cigar,\n\t\tr.MapQ,\n\t\tr.Ref.Name(),\n\t\tr.Pos,\n\t\tend,\n\t\tr.Bin(),\n\t\tend-r.Pos,\n\t\tr.MateRef.Name(),\n\t\tr.MatePos,\n\t\tr.TempLen,\n\t\tr.Seq.Expand(),\n\t\tr.Qual,\n\t\tr.AuxFields,\n\t)\n}", "func (sid *Shortid) String() string {\n\treturn fmt.Sprintf(\"Shortid(worker=%v, epoch=%v, abc=%v)\", sid.worker, sid.epoch, sid.abc)\n}", "func (op relOp) StringFormat() string {\n\tswitch op {\n\tcase relOpEqual:\n\t\treturn \"equals %v\"\n\tcase relOpNotEqual:\n\t\treturn \"not equal to %v\"\n\tcase relOpLess:\n\t\treturn \"less than %v\"\n\tcase relOpLessOrEqual:\n\t\treturn \"less than or equal to %v\"\n\tcase relOpGreater:\n\t\treturn \"greater than %v\"\n\tcase relOpGreaterOrEqual:\n\t\treturn \"greater than or equal to %v\"\n\t}\n\treturn \"(%v)\"\n}", "func (s RecordsIngested) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (e Says) String() string {\n\treturn fmt.Sprintf(\"%v\", e)\n}", "func (p *PrefixInfo) String() string {\n\tprefixFmt := \"Prefix: %-34s (age: %.f)\"\n\tpfLen := uint8(p.Prefix.Data[0])\n\tpf := net.IP(p.Prefix.Data[14:])\n\tps := fmt.Sprintf(\"%v/%v\", pf, pfLen)\n\treturn fmt.Sprintf(prefixFmt, ps, p.Age())\n}", "func (sk StringKey) String() string {\n\treturn fmt.Sprintf(\"StringKey{%s, str:%q}\", sk.Base.String(), sk.str)\n}", "func (c ComponentIdentifier) String() string {\n\tvar buf bytes.Buffer\n\n\tbuf.WriteString(c.Format)\n\tbuf.WriteString(\":\")\n\tbuf.WriteString(c.Coordinates.String())\n\n\treturn buf.String()\n}", "func (id SoldierID) String() string {\n\treturn fmt.Sprintf(\"%s (%s)\", id.Name, id.Faction)\n}", "func (a Align) String() string {\n\tswitch a {\n\tcase AlignLeft:\n\t\treturn \"Left\"\n\tcase AlignRight:\n\t\treturn \"Right\"\n\tcase AlignCenter:\n\t\treturn \"Center\"\n\t}\n\treturn fmt.Sprintf(\"Align(%d)\", a)\n}", "func (f colFmt) String() string {\n\tout := \"name: \" + f.name\n\n\tif f.DB != \"\" {\n\t\tout += \"\\ndb: \" + f.DB\n\t}\n\tif f.owner != \"\" {\n\t\tout += \"\\nowner: \" + f.owner\n\t}\n\tif f.table != \"\" {\n\t\tout += \"\\ntable: \" + f.table\n\t}\n\tif f.realName != \"\" {\n\t\tout += \"\\nrealName: \" + f.realName\n\t}\n\tout += \"\\n\" + f.colType.String()\n\n\tflags := \"\"\n\tfor _, flag := range [...]columnFlag{hidden, key, writable, nullable, identity} {\n\t\tif f.flags&uint32(flag) != 0 {\n\t\t\tif flags == \"\" {\n\t\t\t\tflags = fmt.Sprint(flag)\n\t\t\t} else {\n\t\t\t\tflags += \",\" + fmt.Sprint(flag)\n\t\t\t}\n\t\t}\n\t}\n\tif flags == \"\" {\n\t\tflags = \"none\"\n\t}\n\tout += \"\\nflags: \" + fmt.Sprintf(\"%#x\", byte(f.flags)) + \" (\" + flags + \")\"\n\treturn out\n}", "func (id ID) String() string {\n\treturn fmt.Sprintf(\"ID{EthAddr: %v, NetAddr: %v}\", id.EthAddress, id.NetworkAddress)\n}", "func (v Validator) String() string {\n\tbechConsPubKey, err := Bech32ifyConsPub(v.ConsPubKey)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn fmt.Sprintf(`Validator\n Operator Address: %s\n Validator Consensus Pubkey: %s\n Jailed: %v\n Status: %s\n Tokens: %s\n Delegator Shares: %s\n Description: %s\n Unbonding Height: %d\n Unbonding Completion Time: %v\n Minimum Self Delegation: %v\n Commission: %s`,\n\t\tv.OperatorAddress, bechConsPubKey,\n\t\tv.Jailed, v.Status, v.Tokens,\n\t\tv.DelegatorShares, v.Description,\n\t\tv.UnbondingHeight, v.UnbondingCompletionTime, v.MinSelfDelegation,\n\t\tv.Commission)\n}", "func (q MetricTicks) String() string {\n\treturn fmt.Sprintf(\"%v MetricTicks\", q.Ticks4th())\n}", "func (id ResourceGroupProviderId) String() string {\n\tcomponents := []string{\n\t\tfmt.Sprintf(\"Subscription: %q\", id.SubscriptionId),\n\t\tfmt.Sprintf(\"Resource Group Name: %q\", id.ResourceGroupName),\n\t\tfmt.Sprintf(\"Provider Name: %q\", id.ProviderName),\n\t\tfmt.Sprintf(\"Resource Parent Type: %q\", id.ResourceParentType),\n\t\tfmt.Sprintf(\"Resource Parent Name: %q\", id.ResourceParentName),\n\t\tfmt.Sprintf(\"Resource Type: %q\", id.ResourceType),\n\t\tfmt.Sprintf(\"Resource Name: %q\", id.ResourceName),\n\t}\n\treturn fmt.Sprintf(\"Resource Group Provider (%s)\", strings.Join(components, \"\\n\"))\n}", "func (sg shufflingGroupsT) String() string {\n\treturn fmt.Sprintf(\"orig %02v -> shuff %02v - G%v Sd%v strt%02v seq%v\", sg.Orig, sg.Shuffled, sg.GroupID, sg.RandomizationSeed, sg.Start, sg.Idx)\n}", "func (p *ubDebugPayload) String() string {\n\treturn fmt.Sprintf(\"[%s][%s][%s][%s]\", formatFlags(p.flags), p.suffix, p.original, p.list)\n}", "func (s UsageRecord) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (i NotSpecific) String() string { return toString(i) }", "func (n NamespacedName) String() string {\n\treturn fmt.Sprintf(\"%s%c%s\", n.Namespace, Separator, n.Name)\n}", "func (k Key) String() string {\n\treturn fmt.Sprintf(keyFormat, k.Name, k.Version)\n}", "func (a Amount) String() string {\n\tvar n float64\n\tvar suffix string\n\tif a.Quantity >= 1000000 {\n\t\tn = a.Quantity / 1000000\n\t\tsuffix = \"M\"\n\t} else if a.Quantity >= 1000 {\n\t\tn = a.Quantity / 1000\n\t\tsuffix = \"K\"\n\t} else {\n\t\tn = a.Quantity\n\t}\n\tnum := trimDecimal(n)\n\tif a.Currency != \"\" {\n\t\treturn fmt.Sprintf(\"%s%s %s\", num, suffix, a.Currency)\n\t}\n\treturn fmt.Sprint(num, suffix)\n}", "func (s Subject) String() string {\n\treturn fmt.Sprintf(\"%s, %s, %s\", s.AuthenticationInfo, s.AuthorizationInfo, s.Session)\n}", "func String() string {\n\treturn fmt.Sprintf(`Version: \"%s\", BuildTime: \"%s\", Commit: \"%s\" `, Version, BuildTime, Commit)\n}", "func (this *Terminal) String() string {\n\tif this.DoubleValue != nil {\n\t\treturn this.Before.String() + strconv.FormatFloat(this.GetDoubleValue(), 'f', -1, 64)\n\t}\n\tif this.IntValue != nil {\n\t\treturn this.Before.String() + strconv.FormatInt(this.GetIntValue(), 10)\n\t}\n\tif this.UintValue != nil {\n\t\treturn this.Before.String() + \"uint(\" + strconv.FormatUint(this.GetUintValue(), 10) + \")\"\n\t}\n\tif this.BoolValue != nil {\n\t\treturn this.Before.String() + strconv.FormatBool(this.GetBoolValue())\n\t}\n\tif this.StringValue != nil {\n\t\treturn this.Before.String() + strconv.Quote(this.GetStringValue())\n\t}\n\tif this.BytesValue != nil {\n\t\treturn this.Before.String() + fmt.Sprintf(\"%#v\", this.GetBytesValue())\n\t}\n\tif this.Variable != nil {\n\t\treturn this.Before.String() + this.Variable.String()\n\t}\n\tpanic(\"unreachable\")\n}", "func (m MetricDto) String() string {\n\treturn fmt.Sprintf(\"Metric--> key: %s - value: %d\", m.Key, m.Value)\n}", "func (s MeterStats) String() string {\n\treturn fmt.Sprintf(\"Meter <%s>: %d since %v, rate: %.3f current, %.3f average\\n\",\n\t\ts.Name, s.TotalCount, s.Start.Format(timeFormat), s.IntervalRatePerS, s.TotalRatePerS)\n}", "func (d InterfaceBandwidthSampleDelta) String() string {\n\tvar rxtx string = \"tx\"\n\tif d.IsRx {\n\t\trxtx = \"rx\"\n\t}\n\tlog.Printf(\"%#v\\n\", d)\n\treturn fmt.Sprintf(\"%s-%s %10s\",\n\t\td.Interface.FullName(),\n\t\trxtx,\n\t\td.BitsString(),\n\t)\n}", "func (i *Info) String() string {\n\treturn fmt.Sprintf(\n\t\t\"net (%d NICs)\",\n\t\tlen(i.NICs),\n\t)\n}", "func (e Speaksfor) String() string {\n\treturn fmt.Sprintf(\"%v\", e)\n}", "func (m Meter) String() string {\n\treturn fmt.Sprint(\"Meter[name=\", m.name,\n\t\t\", snapshotInterval=\", m.printInterval,\n\t\t\", start=\", m.start.Format(timeFormat),\n\t\t\", totalCount=\", m.totalCount,\n\t\t\", lastIntervalStart=\", m.lastIntervalStart.Format(timeFormat),\n\t\t\", lastCount=\", m.lastCount,\n\t\t\", lastStats=\", m.lastStats, \"]\")\n}", "func (i *IE) String() string {\n\tif i == nil {\n\t\treturn \"nil\"\n\t}\n\treturn fmt.Sprintf(\"{%s: {Type: %d, Length: %d, Payload: %#v}}\",\n\t\ti.Name(),\n\t\ti.Type,\n\t\ti.Length,\n\t\ti.Payload,\n\t)\n}", "func (p person) String() string {\n\treturn fmt.Sprintf(\"Object %s: %d\", p.Name, p.Age)\n}", "func (pfx Prefix) String() string {\n\treturn fmt.Sprintf(\"%s/%d\", net.IP(convert.Uint32Byte(pfx.addr)), pfx.pfxlen)\n}", "func (s NetworkPathComponent) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (t *SplitAttemptInfo) String() string {\n\tcandidates := make([]string, 0, 4)\n\tfor _, ct := range t.Candidates {\n\t\tif ct.Merit == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\ts := fmt.Sprintf(\"%s=%.2f\", ct.Feature, ct.Merit)\n\t\tif candidates = append(candidates, s); len(candidates) == 4 {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn fmt.Sprintf(\"Weight: %.1f, Success: %v, MeritGain: %.2f, HBound: %.2f, Candidates: [%s]\",\n\t\tt.Weight, t.Success, t.MeritGain, t.HoeffdingBound, strings.Join(candidates, \" \"))\n}", "func (f *FieldHeaderNames) String() string {\n\treturn fmt.Sprintf(\"%+v\", *f)\n}", "func (g *Generic) String() string {\n\tparamStr := make([]string, len(g.Params))\n\tfor i, pr := range g.Params {\n\t\tparamStr[i] = pr.String()\n\t}\n\n\treturn fmt.Sprintf(\"{Header: %s, Params: %s}\",\n\t\tg.Header.String(),\n\t\tparamStr,\n\t)\n}", "func (c class) String() string {\n\treturn fmt.Sprintf(\"%c00\", c)\n}", "func (e entry) String() string {\n\treturn fmt.Sprintf(`\nid %v\nday %v\nreceived %v\ndispatched %v\narrived %v\ncleared %v\ncall_type %v\ngrid_location %v\nas_observed %v\naddress %v\nclearance_code %v\nresp_officer %v\nunits %v\ndescription %v\ncall_comments %v\n`,\n\t\te.id, e.day, e.received, e.dispatched, e.arrived, e.cleared, e.call_type, e.grid_location, e.as_observed, e.address,\n\t\te.clearance_code, e.resp_officer, e.units, e.description, e.call_comments)\n\n}", "func (proto IPProto) String() string {\n\tswitch proto {\n\tcase syscall.IPPROTO_TCP:\n\t\treturn \"TCP\"\n\tcase syscall.IPPROTO_UDP:\n\t\treturn \"UDP\"\n\t}\n\treturn fmt.Sprintf(\"IP(%d)\", proto)\n}", "func (e Endpoint) String() string {\n\treturn fmt.Sprintf(\"%s -h %s -p %d -t %d -d %s\", e.Proto, e.Host, e.Port, e.Timeout, e.Container)\n}", "func (t TestSpec) String() string {\n\treturn fmt.Sprintf(\"L3:%s L4:%s L7:%s Destination:%s\",\n\t\tt.l3.name, t.l4.name, t.l7.name, t.Destination.Kind)\n}", "func String(prefix interface{}, values map[string]interface{}, wrapped error) string {\n\tmsgs := []string{}\n\n\tmsg := \"\"\n\tif prefix != nil {\n\t\tswitch p := prefix.(type) {\n\t\tcase string:\n\t\t\tmsg = p\n\t\tcase []byte:\n\t\t\tmsg = string(p)\n\t\tdefault:\n\t\t\tmsg = strings.TrimPrefix(reflect.TypeOf(prefix).String(), \"*\")\n\t\t}\n\t}\n\n\tif msg != \"\" {\n\t\tmsgs = append(msgs, msg)\n\t}\n\n\tif len(values) > 0 {\n\t\tkeys := []string{}\n\t\tfor k := range values {\n\t\t\tkeys = append(keys, k)\n\t\t}\n\t\tsort.Strings(keys)\n\n\t\tkv := []string{}\n\t\tfor _, k := range keys {\n\t\t\tkv = append(kv, fmt.Sprintf(\"%s=%v\", k, values[k]))\n\t\t}\n\n\t\tmsgs = append(msgs, \"(\"+strings.Join(kv, \" \")+\")\")\n\n\t}\n\n\tmsg = strings.Join(msgs, \" \")\n\terrMsg := \"\"\n\tif wrapped != nil {\n\t\terrMsg = wrapped.Error()\n\t}\n\n\tswitch {\n\tcase msg == \"\" && errMsg == \"\":\n\t\treturn \"unspecified error\"\n\tcase msg == \"\" && errMsg != \"\":\n\t\treturn errMsg\n\tcase msg != \"\" && errMsg == \"\":\n\t\treturn msg\n\t}\n\n\treturn fmt.Sprintf(\"%s: %s\", msg, errMsg)\n}", "func (p paramHeader) String() string {\n\treturn fmt.Sprintf(\"%s (%d): %s\", p.typ, p.len, p.raw)\n}", "func (s *SSN) String() string {\n\treturn s.Formatted\n}", "func (id SubscriptionId) String() string {\n\tcomponents := []string{\n\t\tfmt.Sprintf(\"Group: %q\", id.GroupId),\n\t\tfmt.Sprintf(\"Subscription: %q\", id.SubscriptionId),\n\t}\n\treturn fmt.Sprintf(\"Subscription (%s)\", strings.Join(components, \"\\n\"))\n}", "func (r *Registry) String() string {\n\tout := make([]string, 0, len(r.nameToObject))\n\tfor name, object := range r.nameToObject {\n\t\tout = append(out, fmt.Sprintf(\"* %s:\\n%s\", name, object.serialization))\n\t}\n\treturn strings.Join(out, \"\\n\\n\")\n}", "func (ps *PrjnStru) String() string {\n\tstr := \"\"\n\tif ps.Recv == nil {\n\t\tstr += \"recv=nil; \"\n\t} else {\n\t\tstr += ps.Recv.Name() + \" <- \"\n\t}\n\tif ps.Send == nil {\n\t\tstr += \"send=nil\"\n\t} else {\n\t\tstr += ps.Send.Name()\n\t}\n\tif ps.Pat == nil {\n\t\tstr += \" Pat=nil\"\n\t} else {\n\t\tstr += \" Pat=\" + ps.Pat.Name()\n\t}\n\treturn str\n}", "func (r Label) String() string {\n\treturn fmt.Sprintf(\"%v:%v:%v:%v\",\n\t\tr.User, r.Role, r.Type, r.SecurityRange)\n}", "func (id ID) String() string {\n\tname, ok := registry[id]\n\tif !ok {\n\t\treturn fmt.Sprintf(\"%d\", id)\n\t}\n\treturn fmt.Sprintf(\"%s\", name)\n}", "func (i ExtraLeadingSpace) String() string { return toString(i) }", "func (k Key) String() string {\n\tswitch k {\n\tcase Jade:\n\t\treturn \"jade\"\n\tcase Copper:\n\t\treturn \"copper\"\n\tcase Crystal:\n\t\treturn \"crystal\"\n\t}\n\n\treturn fmt.Sprintf(\"<Key %d>\", k)\n}", "func (ms MyString) String() string {\n\treturn fmt.Sprintf(\"%s (%d)\", ms.str, ms.age)\n}", "func (t *Timeslot) String() string {\n\treturn fmt.Sprintf(\"%d:%02d - %d:%02d\",\n\t\tt.From/100, t.From%100,\n\t\tt.To/100, t.To%100)\n}", "func (f DataFormat) String() string {\n\ts, ok := map[DataFormat]string{\n\t\tUnknownDataFormat: \"\",\n\t\tCSVDataFormat: \"csv\",\n\t\tJSONDataFormat: \"json\",\n\t\tXMLDataFormat: \"xml\",\n\t\tXLSXDataFormat: \"xlsx\",\n\t\tCBORDataFormat: \"cbor\",\n\t\tNDJSONDataFormat: \"ndjson\",\n\t}[f]\n\n\tif !ok {\n\t\treturn \"\"\n\t}\n\n\treturn s\n}", "func (m *Metric) String() string {\n\treturn fmt.Sprintf(\"%#v\", m)\n}", "func (vl VerbLevel) String() string {\n\treturn fmt.Sprintf(\"%d=%s\", vl, vl.Name())\n}", "func (i Insets) String() string {\n\treturn fmt.Sprintf(\"%v, %v, %v, %v\", i.Top, i.Left, i.Bottom, i.Right)\n}", "func (p *Pool) String() string {\n\ts := fmt.Sprintf(\"%+v\", p.Stats)\n\ts = strings.Replace(s, \":\", \"=\", -1)\n\ts = strings.Trim(s, \"{}\")\n\treturn s\n}", "func (s FormatOptions) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (id ScheduleID) String() string {\n\treturn fmt.Sprintf(\"%d.%d.%d\", id.Shard, id.Realm, id.Schedule)\n}", "func (t *Trace) String() string {\n\treturn fmt.Sprintf(\"[Total=%+q, Start=%+q, End=%+q]\", t.EndTime.Sub(t.StartTime), t.StartTime, t.EndTime)\n}", "func (t Track) String() string {\n\tvar buf bytes.Buffer\n\tfmt.Fprintf(&buf, \"(%d) %s\\t\", t.ID, t.Name)\n\tfor i := 0; i < 16; i++ {\n\t\tif i%4 == 0 {\n\t\t\tfmt.Fprintf(&buf, \"|\")\n\t\t}\n\t\tif t.Steps[i] {\n\t\t\tfmt.Fprintf(&buf, \"x\")\n\t\t} else {\n\t\t\tfmt.Fprintf(&buf, \"-\")\n\t\t}\n\t}\n\tfmt.Fprintf(&buf, \"|\")\n\treturn buf.String()\n}", "func (s Steps) String() string {\n\tvar ss string\n\tfor i, b := range s {\n\t\tif i%4 == 0 {\n\t\t\tss += \"|\"\n\t\t}\n\n\t\tswitch b {\n\t\tcase 0x00:\n\t\t\tss += \"-\"\n\t\tcase 0x01:\n\t\t\tss += \"x\"\n\t\t}\n\t}\n\tss += \"|\"\n\n\treturn ss\n}", "func (info Info) String() string {\n\treturn fmt.Sprintf(\"\")\n}", "func (n name) String() string {\n\treturn fmt.Sprintf(n.Name)\n}", "func (counter *Counter) String() string {\n\treturn fmt.Sprintf(\"{count: %d}\", counter.count)\n}", "func (self *T) String() string {\n\treturn fmt.Sprintf(\"%f %f %f %f\", self[0], self[1], self[2], self[3])\n}", "func (k Key) String() string {\n\treturn fmt.Sprintf(\"Identity=%d,DestPort=%d,Nexthdr=%d,TrafficDirection=%d\", k.Identity, k.DestPort, k.Nexthdr, k.TrafficDirection)\n}", "func (e SubPrin) String() string {\n\treturn fmt.Sprintf(\"%v\", e)\n}", "func (e Not) String() string {\n\treturn fmt.Sprintf(\"%v\", e)\n}", "func (f *Field) String() string {\n\treturn fmt.Sprintf(\"Name: %s, ID: %d\", f.Name, f.ID)\n}", "func (s *sliceFlag) String() string {\n\treturn (\"Implementation of the String interface\")\n}", "func (addr GroupAddr) String() string {\n\treturn fmt.Sprintf(\"%d/%d/%d\", uint8(addr>>11)&31, uint8(addr>>8)&7, uint8(addr))\n}", "func (addr GroupAddr) String() string {\n\treturn fmt.Sprintf(\"%d/%d/%d\", uint8(addr>>11)&0x1F, uint8(addr>>8)&0x7, uint8(addr))\n}", "func (i ID) String() string {\n\treturn fmt.Sprintf(\"<round:%d,category:%s>\", i.Round(), i.Category().String())\n}", "func (id TopicID) String() string {\n\treturn fmt.Sprintf(\"%d.%d.%d\", id.Shard, id.Realm, id.Topic)\n}", "func (r WriteCounter) String() string {\n\treturn fmt.Sprintf(\"There were %v write operations totaling %v bytes\", r.numWrites, r.numBytes)\n}", "func (e *Entry) String() string {\n\treturn fmt.Sprintf(\"%v(%v)\", e.Vector, e.Value)\n}", "func (q *UnsafeQueue64) String() string {\n\treturn fmt.Sprintf(\"Queue{capacity: %v, capMod: %v, putPos: %v, getPos: %v}\",\n\t\tq.capacity, q.capMod, q.getPos, q.getPos)\n}", "func (p *ubPayload) String() string {\n\treturn fmt.Sprintf(\"[%s][%s]\", formatFlags(p.flags), p.suffix)\n}", "func (g Grade) String() string {\n\tswitch g {\n\tcase Bad:\n\t\treturn \"Bad\"\n\tcase Legacy:\n\t\treturn \"Legacy\"\n\tcase Good:\n\t\treturn \"Good\"\n\tcase Skipped:\n\t\treturn \"Skipped\"\n\tdefault:\n\t\treturn \"Invalid\"\n\t}\n}", "func (ls LevelSpec) String() string {\n\treturn fmt.Sprint(\"LevelSpec[Level=\", ls.Level, \", Name=\", ls.Name, \", Abbreviated=\", ls.Abbreviated, \"]\")\n}", "func (i Info) String() string {\n\treturn fmt.Sprintf(\"%v.%v.%v+%v\", i.Major, i.Minor, i.Patch, i.GitCommit)\n}", "func (bbw *Writer) String() string {\n\treturn fmt.Sprintf(\"{len(buf)=%d, clsdPos=%d, offs=%d, noExt=%t}\", len(bbw.buf), bbw.clsdPos, bbw.offs, bbw.noExt)\n}", "func (k Key) String() string {\n\treturn fmt.Sprintf(\"%s:%s\", k.Digest, k.Kind)\n}", "func (r Release) String() string {\n\ts := fmt.Sprintf(\"%04d.%d.%d\", r.Year, r.Month, r.Iteration)\n\tif r.Extra != \"\" {\n\t\ts += \"-\" + r.Extra\n\t}\n\n\treturn s\n}", "func (c *Curator) String() string {\n\treturn fmt.Sprintf(\"%s [%s]\", c.Name(), c.serviceAddr)\n}" ]
[ "0.6654755", "0.66311586", "0.65776306", "0.6569977", "0.65669614", "0.65365326", "0.6522435", "0.651368", "0.6511166", "0.64904517", "0.6490063", "0.64900064", "0.647282", "0.64517486", "0.64481103", "0.6431935", "0.643003", "0.64241385", "0.6418602", "0.6406918", "0.6406384", "0.6405143", "0.6396608", "0.63869065", "0.6381248", "0.6377072", "0.6377005", "0.63700265", "0.6364716", "0.63609374", "0.6351552", "0.6340593", "0.633561", "0.6327004", "0.6326638", "0.6324273", "0.6310545", "0.62939197", "0.6292112", "0.62876046", "0.6283975", "0.62795806", "0.62757164", "0.62748563", "0.62714165", "0.62679225", "0.626557", "0.62631136", "0.6262959", "0.62627035", "0.6260403", "0.6260394", "0.62567943", "0.6254965", "0.6253306", "0.6252692", "0.62518686", "0.62504214", "0.6248534", "0.6248475", "0.6247876", "0.62476975", "0.6246383", "0.624461", "0.6243275", "0.62410444", "0.6239988", "0.6238573", "0.62384367", "0.62380445", "0.62343186", "0.6234066", "0.62313366", "0.62278235", "0.62273246", "0.6223993", "0.6220134", "0.62186706", "0.6217491", "0.6216697", "0.6213808", "0.6211382", "0.621137", "0.6208704", "0.6207418", "0.6207211", "0.6206049", "0.6205898", "0.62042737", "0.620227", "0.6194967", "0.61948884", "0.61944646", "0.6193306", "0.6192265", "0.61922485", "0.6190948", "0.6190482", "0.61879927", "0.6186246", "0.61858034" ]
0.0
-1
/ this is general function collection it's can use in every time you need get offset and limit for paging
func Calcpage(page int)(int,int){ page -= 1 limit := 10 offset := (page * limit) + 1 return offset,limit }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (m *MGOStore) PaginateOffset(limit, offset int64) (int64, error) {\n\tq := m.collection.Find(m.filter)\n\n\trecordCount, _ := q.Count()\n\tc := int64(recordCount)\n\n\tq.Limit(int(limit))\n\tq.Skip(int(offset))\n\treturn c, q.All(m.items)\n}", "func (d *mongoDB) mongoPagination(pagination *crud.Pagination) (skip int64, limit int64) {\n\tskip = int64(pagination.StartIndex - 1)\n\tlimit = int64(pagination.Count)\n\treturn\n}", "func (m *MGOStore) PaginateCursor(limit int64, cursor interface{}, fieldName string, reverse bool) error {\n\toperator := \"$gt\"\n\tif reverse {\n\t\toperator = \"$lt\"\n\t}\n\n\tm.filter[fieldName] = bson.M{operator: cursor}\n\tq := m.collection.Find(m.filter)\n\tq.Limit(int(limit))\n\n\treturn q.All(m.items)\n}", "func (d *Database) Paginate(query PaginateQuery) (uint64, error) {\n\tvar total uint64\n\tvar err error\n\ttotal = 0\n\n\tif query.Order == \"\" {\n\t\tquery.Order = \"id desc\"\n\t}\n\n\tif query.Where != \"\" {\n\t\tif len(query.Preloads) == 0 {\n\t\t\terr = DB.Reader.Model(query.Model).Where(query.Where).Select(query.Fields).Count(&total).Error\n\t\t\terr = DB.Reader.Model(query.Model).Where(query.Where).Select(query.Fields).Limit(query.Limit).Offset(query.Offset).Order(query.Order).Group(query.Group).Find(query.Result).Error\n\t\t} else {\n\t\t\terr = DB.Reader.Model(query.Model).Where(query.Where).Select(query.Fields).Count(&total).Error\n\t\t\tqu := DB.Reader.Model(query.Model)\n\t\t\tfor key, fun := range query.Preloads {\n\t\t\t\tqu = qu.Preload(key, fun)\n\t\t\t}\n\t\t\terr = qu.Where(query.Where).Select(query.Fields).Limit(query.Limit).Offset(query.Offset).Order(query.Order).Group(query.Group).Find(query.Result).Error\n\t\t}\n\t\t\n\t} else {\n\t\tif len(query.Preloads) == 0 {\n\t\t\terr = DB.Reader.Model(query.Model).Select(query.Fields).Count(&total).Error\n\t\t\terr = DB.Reader.Model(query.Model).Select(query.Fields).Limit(query.Limit).Offset(query.Offset).Order(query.Order).Group(query.Group).Find(query.Result).Error\n\t\t} else {\n\t\t\terr = DB.Reader.Model(query.Model).Where(query.Where).Select(query.Fields).Count(&total).Error\n\t\t\tqu := DB.Reader.Model(query.Model)\n\t\t\tfor key, fun := range query.Preloads {\n\t\t\t\tqu = qu.Preload(key, fun)\n\t\t\t}\n\t\t\terr = qu.Where(query.Where).Select(query.Fields).Limit(query.Limit).Offset(query.Offset).Order(query.Order).Group(query.Group).Find(query.Result).Error\n\t\t}\n\t}\n\treturn total, err\n}", "func GetNewsPaginated(pageId int, pageSize int) ([]News,int) {\n\tvar news []News\n\tvar count int64\n\tpageId-=1\n\tdb.Model(&news).Count(&count)\n\n\tdb.Order(\"CreatedTime desc\").Offset(pageSize*pageId).Limit(pageSize).Find(&news)\n\n\tpageCount:=int(count)/pageSize\n\tif int(count)%pageSize!=0 {\n\t\tpageCount++\n\t}\n\n\treturn news,pageCount\n}", "func (s *GORMStore) PaginateOffset(limit, offset int64) (int64, error) {\n\tq := s.db\n\tq = q.Limit(int(limit))\n\tq = q.Offset(int(offset))\n\tq = q.Find(s.items)\n\tq = q.Limit(-1)\n\tq = q.Offset(-1)\n\n\tvar count int64\n\tif err := q.Count(&count).Error; err != nil {\n\t\treturn count, err\n\t}\n\n\treturn count, nil\n}", "func (p *PaginatedList) Offset() int {\r\n\treturn (p.Page - 1) * p.PerPage\r\n}", "func pageOffset(pos, pageSize int64) int64 {\n\trem := pos % pageSize\n\tif rem != 0 {\n\t\treturn pos - rem\n\t}\n\treturn pos\n}", "func pageOffset(pos, pageSize int64) int64 {\n\trem := pos % pageSize\n\tif rem != 0 {\n\t\treturn pos - rem\n\t}\n\treturn pos\n}", "func paginateResult(results []Document, page int) (resSlice []Document) {\n\tif len(results) >= (page - 1) * ResultsPerPage {\n\t\tresSlice = results[(page - 1) * ResultsPerPage : min(page * ResultsPerPage, len(results))]\n\t}\n\treturn\n}", "func Paginate(limit int, offset int, count *int64) QueryProcessor {\n\treturn func(db *gorm.DB, out interface{}) (*gorm.DB, microappError.DatabaseError) {\n\t\tif out != nil && count != nil {\n\t\t\tif err := db.Model(out).Count(count).Error; err != nil {\n\t\t\t\treturn db, microappError.NewDatabaseError(err)\n\t\t\t}\n\t\t}\n\t\tif limit != -1 {\n\t\t\tdb = db.Limit(limit)\n\t\t}\n\t\tif offset > 0 {\n\t\t\tdb = db.Offset(offset)\n\t\t}\n\t\treturn db, nil\n\t}\n}", "func (p PaginationParameters) Offset() int {\n\treturn (p.Page - 1) * p.ItemsPerPage\n}", "func (o OffsetPaginator) Page() uint {\n\tif o.Count == 0 {\n\t\treturn 1\n\t}\n\n\treturn uint(math.Ceil(float64(o.Offset)/float64(o.Limit))) + 1\n}", "func Paginate(ctx *context.Context, next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) {\n\n\t\tpagingQuery := common.NewPagingQuery().WithLimit(20)\n\n\t\theader := req.Header.Get(\"X-Plik-Paging\")\n\t\tif header != \"\" {\n\t\t\terr := json.Unmarshal([]byte(header), &pagingQuery)\n\t\t\tif err != nil {\n\t\t\t\tctx.InvalidParameter(\"paging header\")\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\tlimitStr := req.URL.Query().Get(\"limit\")\n\t\t\tif limitStr != \"\" {\n\t\t\t\tlimit, err := strconv.Atoi(limitStr)\n\t\t\t\tif err != nil {\n\t\t\t\t\tctx.InvalidParameter(\"limit : %s\", err)\n\t\t\t\t}\n\t\t\t\tpagingQuery.WithLimit(limit)\n\t\t\t}\n\n\t\t\torder := req.URL.Query().Get(\"order\")\n\t\t\tif order != \"\" {\n\t\t\t\tpagingQuery.WithOrder(order)\n\t\t\t}\n\n\t\t\tbefore := req.URL.Query().Get(\"before\")\n\t\t\tif before != \"\" {\n\t\t\t\tpagingQuery.WithBeforeCursor(before)\n\t\t\t}\n\n\t\t\tafter := req.URL.Query().Get(\"after\")\n\t\t\tif after != \"\" {\n\t\t\t\tpagingQuery.WithAfterCursor(after)\n\t\t\t}\n\t\t}\n\n\t\tif pagingQuery.Limit != nil && *pagingQuery.Limit <= 0 {\n\t\t\tctx.InvalidParameter(\"limit\")\n\t\t}\n\n\t\tif pagingQuery.Order != nil && !(*pagingQuery.Order == \"asc\" || *pagingQuery.Order == \"desc\") {\n\t\t\tctx.InvalidParameter(\"order\")\n\t\t}\n\n\t\tif pagingQuery.Before != nil && pagingQuery.After != nil {\n\t\t\tctx.BadRequest(\"both before and after cursors set\")\n\t\t}\n\n\t\tctx.SetPagingQuery(pagingQuery)\n\n\t\tnext.ServeHTTP(resp, req)\n\t})\n}", "func (a *API) Paginate(ctx *fasthttp.RequestCtx, orderFields ...string) (model.Pagination, map[string]string, error) {\n\tvar err error\n\terrs := make(map[string]string)\n\tpagination := model.NewPagination()\n\tqueryParams := a.ParseQuery(ctx)\n\n\tif val, ok := queryParams[\"limit\"]; ok {\n\t\tpagination.Limit, err = strconv.Atoi(val)\n\t\tif err != nil {\n\t\t\terrs[\"limit\"] = \"is not valid\"\n\t\t}\n\t}\n\n\tif val, ok := queryParams[\"offset\"]; ok {\n\t\tpagination.Offset, err = strconv.ParseInt(val, 10, 64)\n\t\tif err != nil {\n\t\t\terrs[\"offset\"] = \"is not valid\"\n\t\t}\n\t}\n\n\tif val, ok := queryParams[\"order_by\"]; ok {\n\t\tpagination.OrderBy = val\n\t}\n\n\tif val, ok := queryParams[\"order_field\"]; ok {\n\t\tpagination.OrderField = val\n\t}\n\n\tif err != nil {\n\t\tpanic(pluggableError.New(fasthttp.StatusMessage(fasthttp.StatusBadRequest),\n\t\t\tfasthttp.StatusBadRequest,\n\t\t\t\"paginate params not valid\",\n\t\t\terrs))\n\t}\n\n\terrs, err = pagination.Validate(orderFields...)\n\n\tif err != nil {\n\t\tpanic(pluggableError.New(fasthttp.StatusMessage(fasthttp.StatusBadRequest),\n\t\t\tfasthttp.StatusBadRequest,\n\t\t\t\"paginate params not valid\",\n\t\t\terrs))\n\t}\n\n\treturn pagination, errs, err\n}", "func (q *Query) Page(page, page_size int) *Query {\n\tif page < 1 || page_size < 1 {\n\t\tlogrus.Warn(\"illegal page or page_size: \", page, \", \", page_size)\n\t\treturn q\n\t}\n\tq.offset = (page - 1) * page_size\n\tq.limit = page_size\n\treturn q\n}", "func getOffset(page, limit int64) int64 {\n\toffset := (page - 1) * limit\n\tif offset < 0 {\n\t\treturn 0\n\t}\n\treturn offset\n}", "func (qs *QuoteServer) paginationHelper(q string, count, page, total int) map[string]interface{} {\n\tout := make(map[string]interface{})\n\n\ttype element struct {\n\t\tText string\n\t\tLink string\n\t\tActive bool\n\t}\n\n\tif page > 1 {\n\t\tout[\"Prev\"] = path.Join(\"/search\", q, strconv.Itoa(page-1), strconv.Itoa(count))\n\t}\n\n\tif page*count < total {\n\t\tout[\"Next\"] = path.Join(\"/search\", q, strconv.Itoa(page+1), strconv.Itoa(count))\n\t}\n\n\tmaxPage := int(math.Ceil(float64(total) / float64(count)))\n\tqs.log.Trace(\"Pagination should have a max pages\", \"max\", maxPage)\n\n\telements := []element{}\n\tstart := 0\n\tif page-3 >= 0 {\n\t\tstart = page - 3\n\t}\n\tend := maxPage\n\tif page+2 < maxPage {\n\t\tend = page + 2\n\t}\n\tif start+5 > end && start+5 < maxPage {\n\t\tend = start + 5\n\t}\n\tif end-5 > 0 {\n\t\tstart = end - 5\n\t}\n\tfor i := start; i < end; i++ {\n\t\telement := element{}\n\t\telement.Text = strconv.Itoa(i + 1)\n\t\telement.Link = path.Join(\"/search\", q, strconv.Itoa(i+1), strconv.Itoa(count))\n\t\tif i+1 == page {\n\t\t\telement.Active = true\n\t\t}\n\t\telements = append(elements, element)\n\t}\n\tout[\"Elements\"] = elements\n\treturn out\n}", "func (r *Repository) pagination(page int) ([]*domain.UserInfoModel, error) {\n\n\tusers := make([]*domain.UserInfoModel, 0)\n\n\tlimit := 10\n\toffset := limit * (page - 1)\n\n\tctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)\n\tdefer cancel()\n\n\tquery := `SELECT id,namee,email,password FROM users ORDER BY id LIMIT $2 OFFSET $1`\n\n\tqueryStart := time.Now().Nanosecond() / 1000\n\trows, err := r.db.QueryContext(ctx, query, offset, limit)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tqueryEnd := time.Now().Nanosecond() / 1000\n\texecutionTime := queryEnd - queryStart\n\tr.insertTimeSpent(\"Pagination\"+strconv.Itoa(page), executionTime)\n\n\tdefer rows.Close()\n\tfor rows.Next() {\n\t\tp := &domain.UserInfoModel{}\n\t\terr = rows.Scan(&p.ID, &p.Name, &p.Email, &p.PassWord)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tusers = append(users, p)\n\t}\n\n\treturn nil, nil\n}", "func (w *Wrapper) Paginate(pageCount int, columns ...string) (err error) {\n\terr = w.WithTotalCount().Limit(w.PageLimit*(pageCount-1), w.PageLimit).Get(columns...)\n\tw.TotalPage = w.TotalCount / w.PageLimit\n\treturn\n}", "func (m Model) ItemsOnPage(totalItems int) int {\n\tif totalItems < 1 {\n\t\treturn 0\n\t}\n\tstart, end := m.GetSliceBounds(totalItems)\n\treturn end - start\n}", "func (s *GORMStore) PaginateCursor(limit int64, cursor interface{}, fieldName string, reverse bool) error {\n\tq := s.db\n\n\tq = q.Limit(int(limit))\n\n\tif reverse {\n\t\tq = q.Where(fmt.Sprintf(\"%s < ?\", fieldName), cursor)\n\t} else {\n\t\tq = q.Where(fmt.Sprintf(\"%s > ?\", fieldName), cursor)\n\t}\n\n\tq = q.Find(s.items)\n\treturn q.Error\n}", "func paging(start, length, totalCount int) (int, int) {\n\tif length == 0 {\n\t\tlength = 20\n\t}\n\n\tstart = totalCount - start - length\n\tif start < 0 {\n\t\tlength += start\n\t\tstart = 0\n\t}\n\treturn start, length\n}", "func (c *serviceContext) Paginate(ctx context.Context, limit, offset int, asc bool, status *Status) ([]Batch, error) {\n\tfilter := bson.M{}\n\tif status != nil {\n\t\tfilter[\"status\"] = *status\n\t}\n\n\tsort := -1\n\tif asc {\n\t\tsort = 1\n\t}\n\topt := options.Find().SetSort(bson.M{\"createdDate\": sort}).SetSkip(int64(offset)).SetLimit(int64(limit))\n\n\tcursor, err := c.mongo.Find(ctx, _collectionName, filter, opt)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer func() { _ = cursor.Close(ctx) }()\n\n\tvar batches []Batch\n\tif err := cursor.All(ctx, &batches); err != nil {\n\t\treturn nil, err\n\t}\n\tif batches == nil {\n\t\tbatches = make([]Batch, 0)\n\t}\n\treturn batches, nil\n}", "func (p *Paginator) Offset() int {\n\treturn (p.Page() - 1) * p.PerPageNums\n}", "func Paging(p *Param, result interface{}) (*Paged, error) {\n\tdb := p.DB\n\tif p.ShowSQL {\n\t\tdb = db.Debug()\n\t}\n\tif p.Page < 1 {\n\t\tp.Page = 1\n\t}\n\tif p.Limit == 0 {\n\t\tp.Limit = 10\n\t}\n\tif len(p.OrderBy) > 0 {\n\t\tfor _, o := range p.OrderBy {\n\t\t\tdb = db.Order(o)\n\t\t}\n\t}\n\tvar (\n\t\tpaged = new(Paged)\n\t\tcount, offset int\n\t)\n\tif err := db.Model(result).Count(&count).Error; err != nil {\n\t\treturn nil, err\n\t}\n\tif p.Page == 1 {\n\t\toffset = 0\n\t} else {\n\t\toffset = (p.Page - 1) * p.Limit\n\t}\n\tif err := db.Limit(p.Limit).Offset(offset).Find(result).Error; err != nil {\n\t\treturn nil, err\n\t}\n\tpaged.TotalRecord = count\n\tpaged.Records = result\n\tpaged.Page = p.Page\n\tpaged.Offset = offset\n\tpaged.Limit = p.Limit\n\tpaged.TotalPage = int(math.Ceil(float64(count) / float64(p.Limit)))\n\tif p.Page > 1 {\n\t\tpaged.PrevPage = p.Page - 1\n\t} else {\n\t\tpaged.PrevPage = p.Page\n\t}\n\tif p.Page == paged.TotalPage {\n\t\tpaged.NextPage = p.Page\n\t} else {\n\t\tpaged.NextPage = p.Page + 1\n\t}\n\treturn paged, nil\n}", "func (stmt *statement) Paginate(page, pageSize int) Statement {\n\tif page < 1 {\n\t\tpage = 1\n\t}\n\tif pageSize < 1 {\n\t\tpageSize = 1\n\t}\n\tif page > 1 {\n\t\tstmt.Offset((page - 1) * pageSize)\n\t}\n\tstmt.Limit(pageSize)\n\treturn stmt\n}", "func (p *Paginator) Offset() int {\n\treturn (p.CurrentPage() - 1) * p.ItemsPerPage()\n}", "func (o OffsetPaginator) Paginate(input sq.SelectBuilder) sq.SelectBuilder {\n\treturn input.OrderBy(fmt.Sprintf(\"%s %s\", o.OrderBy, o.Order)).Limit(o.Limit).Offset(o.Offset)\n}", "func (p *PaginatedList) Limit() int {\r\n\treturn p.PerPage\r\n}", "func getPaginationInfo(n int, pageSize uint32, f func(int) bool) (*buildqueuestate.PaginationInfo, int) {\n\tstartIndex := uint32(sort.Search(n, f))\n\tendIndex := uint32(n)\n\tif endIndex-startIndex > pageSize {\n\t\tendIndex = startIndex + pageSize\n\t}\n\treturn &buildqueuestate.PaginationInfo{\n\t\tStartIndex: startIndex,\n\t\tTotalEntries: uint32(n),\n\t}, int(endIndex)\n}", "func FilteredPaginate(\n\tprefixStore types.KVStore,\n\tpageRequest *PageRequest,\n\tonResult func(key []byte, value []byte, accumulate bool) (bool, error),\n) (*PageResponse, error) {\n\n\t// if the PageRequest is nil, use default PageRequest\n\tif pageRequest == nil {\n\t\tpageRequest = &PageRequest{}\n\t}\n\n\toffset := pageRequest.Offset\n\tkey := pageRequest.Key\n\tlimit := pageRequest.Limit\n\tcountTotal := pageRequest.CountTotal\n\treverse := pageRequest.Reverse\n\n\tif offset > 0 && key != nil {\n\t\treturn nil, fmt.Errorf(\"invalid request, either offset or key is expected, got both\")\n\t}\n\n\tif limit == 0 {\n\t\tlimit = DefaultLimit\n\n\t\t// count total results when the limit is zero/not supplied\n\t\tcountTotal = true\n\t}\n\n\tif len(key) != 0 {\n\t\titerator := getIterator(prefixStore, key, reverse)\n\t\tdefer iterator.Close()\n\n\t\tvar numHits uint64\n\t\tvar nextKey []byte\n\n\t\tfor ; iterator.Valid(); iterator.Next() {\n\t\t\tif numHits == limit {\n\t\t\t\tnextKey = iterator.Key()\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tif iterator.Error() != nil {\n\t\t\t\treturn nil, iterator.Error()\n\t\t\t}\n\n\t\t\thit, err := onResult(iterator.Key(), iterator.Value(), true)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tif hit {\n\t\t\t\tnumHits++\n\t\t\t}\n\t\t}\n\n\t\treturn &PageResponse{\n\t\t\tNextKey: nextKey,\n\t\t}, nil\n\t}\n\n\titerator := getIterator(prefixStore, nil, reverse)\n\tdefer iterator.Close()\n\n\tend := offset + limit\n\n\tvar numHits uint64\n\tvar nextKey []byte\n\n\tfor ; iterator.Valid(); iterator.Next() {\n\t\tif iterator.Error() != nil {\n\t\t\treturn nil, iterator.Error()\n\t\t}\n\n\t\taccumulate := numHits >= offset && numHits < end\n\t\thit, err := onResult(iterator.Key(), iterator.Value(), accumulate)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif hit {\n\t\t\tnumHits++\n\t\t}\n\n\t\tif numHits == end+1 {\n\t\t\tif nextKey == nil {\n\t\t\t\tnextKey = iterator.Key()\n\t\t\t}\n\n\t\t\tif !countTotal {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\tres := &PageResponse{NextKey: nextKey}\n\tif countTotal {\n\t\tres.Total = numHits\n\t}\n\n\treturn res, nil\n}", "func (tt *TransactionTransformer) transformCollection(request *http.Request, data interface{}, totalData int, limit string) *TransactionResponse {\n\ttransactionResponse := &TransactionResponse{}\n\ttransactionResponse.TotalCount = totalData\n\ttransactionResponse.Data = data\n\n\t// Convert limit to int if not empty\n\tif limit != \"\" {\n\t\tlimitInt, _ := strconv.Atoi(limit)\n\n\t\tif limitInt != -1 && limitInt != 0 {\n\t\t\t// If limit not euqal to `-1` and `0' set transaction links. Transaction links contain link to current page,\n\t\t\t// first page, next page and last page.\n\t\t\ttransactionResponse.Links = PaginationReponse.BuildPaginationLinks(request, totalData)\n\t\t}\n\t}\n\n\treturn transactionResponse\n}", "func paginate(size int, limit, page uint32) (uint32, uint32) {\n\tif page == 0 {\n\t\tpage = 1\n\t}\n\toffset := (page - 1) * limit\n\tstart, end := offset, uint32(size)\n\tif start >= end {\n\t\treturn 0, 0\n\t}\n\tif limit > 0 && start+limit < end {\n\t\tend = start + limit\n\t}\n\treturn start, end\n}", "func find(findStruct *Find) ([]interface{}, error) {\n\n\tvar records []interface{}\n\tvar err error\n\n\tlimit, isLimit := findStruct.Options[\"limit\"]\n\tskip, isSkip := findStruct.Options[\"isSkip\"]\n\n\tif isLimit && isSkip {\n\t\terr = findStruct.Collection.Find(findStruct.Query).Skip(skip).Limit(limit).All(&records)\n\t} else if isLimit {\n\t\terr = findStruct.Collection.Find(findStruct.Query).Limit(limit).All(&records)\n\t} else if isSkip {\n\t\terr = findStruct.Collection.Find(findStruct.Query).Skip(skip).All(&records)\n\t} else {\n\t\terr = findStruct.Collection.Find(findStruct.Query).All(&records)\n\t}\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\treturn records, err\n}", "func findPageEx(ctx context.Context, col *mongo.Collection, maxPageSize int, filter bson.M, pa core.PageArgs, results interface{}) (core.PageInfo, int, error) {\n\t// Ensure page options are valid.\n\tf, l, a, b, err := parsePageOpts(maxPageSize, pa)\n\tif err != nil {\n\t\treturn core.PageInfo{}, 0, err\n\t}\n\n\t// Run aggregation pipeline.\n\tcur, err := col.Aggregate(ctx, getPipeline(filter, f, l, a, b))\n\tif err != nil {\n\t\treturn core.PageInfo{}, 0, err\n\t}\n\tdefer cur.Close(ctx)\n\n\t// Advance cursor to first (only) document.\n\tif ok := cur.Next(ctx); !ok {\n\t\treturn core.PageInfo{}, 0, cur.Err()\n\t}\n\n\t// Unmarshal document.\n\tpr := pageResult{}\n\tif err := cur.Decode(&pr); err != nil {\n\t\treturn core.PageInfo{}, 0, err\n\t}\n\n\t// Populate page info.\n\trvs, pi, err := getPageInfo(f, l, pr)\n\tif err != nil {\n\t\treturn core.PageInfo{}, 0, err\n\t}\n\n\t// Unmarshal results.\n\tif err := unmarshal(rvs, results); err != nil {\n\t\treturn core.PageInfo{}, 0, err\n\t}\n\treturn pi, pr.Count, nil\n}", "func (conn *Connection) GetLimitedRecords(q interface{}, n int, doc interface{}, fields ...string) error {\n\tif len(fields) == 0 {\n\t\treturn conn.collection.Find(q).Limit(n).All(doc)\n\t}\n\n\treturn conn.collection.Find(q).Sort(fields...).Limit(n).All(doc)\n}", "func getPageInfo(first, last int, pr pageResult) ([]bson.RawValue, core.PageInfo, error) {\n\trvs := pr.Results\n\tvar pi core.PageInfo\n\n\t// Determine whether there is a next page.\n\tif first != 0 && len(rvs) > first {\n\t\tif last <= 0 {\n\t\t\tpi.HasNextPage = true\n\t\t}\n\t\trvs = rvs[:first]\n\t}\n\n\t// Determine whether there is a previous page.\n\tif last != 0 && len(rvs) > last {\n\t\tif first <= 0 {\n\t\t\tpi.HasPreviousPage = true\n\t\t}\n\t\trvs = rvs[len(rvs)-last:]\n\t}\n\n\tif len(rvs) > 0 {\n\t\t// Get cursor value of first result.\n\t\tsc, err := getCursor(rvs[0])\n\t\tif err != nil {\n\t\t\treturn nil, core.PageInfo{}, err\n\t\t}\n\t\tpi.StartCursor = &sc\n\n\t\t// Get cursor value of last result.\n\t\tec, err := getCursor(rvs[len(rvs)-1])\n\t\tif err != nil {\n\t\t\treturn nil, core.PageInfo{}, err\n\t\t}\n\t\tpi.EndCursor = &ec\n\t}\n\n\treturn rvs, pi, nil\n}", "func (o OffsetPaginator) Next() OffsetPaginator {\n\tnext := o\n\tnext.Offset = next.Offset + next.Limit\n\treturn next\n}", "func Paginate(\n\tprefixStore types.KVStore,\n\tpageRequest *PageRequest,\n\tonResult func(key, value []byte) error,\n) (*PageResponse, error) {\n\tpageRequest = initPageRequestDefaults(pageRequest)\n\n\tif pageRequest.Offset > 0 && pageRequest.Key != nil {\n\t\treturn nil, fmt.Errorf(\"invalid request, either offset or key is expected, got both\")\n\t}\n\n\titerator := getIterator(prefixStore, pageRequest.Key, pageRequest.Reverse)\n\tdefer iterator.Close()\n\n\tvar count uint64\n\tvar nextKey []byte\n\n\tif len(pageRequest.Key) != 0 {\n\t\tfor ; iterator.Valid(); iterator.Next() {\n\t\t\tif count == pageRequest.Limit {\n\t\t\t\tnextKey = iterator.Key()\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif iterator.Error() != nil {\n\t\t\t\treturn nil, iterator.Error()\n\t\t\t}\n\t\t\terr := onResult(iterator.Key(), iterator.Value())\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tcount++\n\t\t}\n\n\t\treturn &PageResponse{\n\t\t\tNextKey: nextKey,\n\t\t}, nil\n\t}\n\n\tend := pageRequest.Offset + pageRequest.Limit\n\n\tfor ; iterator.Valid(); iterator.Next() {\n\t\tcount++\n\n\t\tif count <= pageRequest.Offset {\n\t\t\tcontinue\n\t\t}\n\t\tif count <= end {\n\t\t\terr := onResult(iterator.Key(), iterator.Value())\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t} else if count == end+1 {\n\t\t\tnextKey = iterator.Key()\n\n\t\t\tif !pageRequest.CountTotal {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif iterator.Error() != nil {\n\t\t\treturn nil, iterator.Error()\n\t\t}\n\t}\n\n\tres := &PageResponse{NextKey: nextKey}\n\tif pageRequest.CountTotal {\n\t\tres.Total = count\n\t}\n\n\treturn res, nil\n}", "func (p *Pagination) Offset() int {\n\treturn (p.CurrentPage - 1) * p.Limit\n}", "func (b *Bucket) Page(page int, perPage int) ([]brazier.Item, error) {\n\tvar skip int\n\n\tif page <= 0 {\n\t\treturn nil, nil\n\t}\n\n\tif perPage >= 0 {\n\t\tskip = (page - 1) * perPage\n\t}\n\n\tvar items []brazier.Item\n\terr := b.node.Select().Bucket(\"items\").Skip(skip).Limit(perPage).RawEach(func(k, v []byte) error {\n\t\titems = append(items, brazier.Item{\n\t\t\tKey: string(k),\n\t\t\tData: v,\n\t\t})\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"boltdb.bucket.Page failed to fetch items\")\n\t}\n\n\treturn items, nil\n}", "func (d *Demo) GetAllWithSkipLimit(g *gom.Gom) int64 {\n\ttoolkit.Println(\"===== Get All With Skip Limit =====\")\n\tres := []models.Hero{}\n\n\tvar cTotal int64\n\tvar err error\n\tif d.useParams {\n\t\tcTotal, err = g.Set(&gom.SetParams{\n\t\t\tTableName: \"hero\",\n\t\t\tResult: &res,\n\t\t\tTimeout: 10,\n\t\t\tSkip: 2,\n\t\t\tLimit: 1,\n\t\t}).Cmd().Get()\n\t} else {\n\t\tcTotal, err = g.Set(nil).Timeout(10).Table(\"hero\").Skip(2).Limit(1).Result(&res).Cmd().Get()\n\t}\n\n\tif err != nil {\n\t\ttoolkit.Println(err.Error())\n\t\treturn 0\n\t}\n\n\ttoolkit.Println(len(res), \"of\", cTotal)\n\n\tfor _, h := range res {\n\t\ttoolkit.Println(h)\n\t}\n\n\treturn int64(len(res))\n}", "func (this *Sorter) Limit(offset, count int) *Sorter {\n\tthis.limit = &sortLimit{\n\t\toffset: offset,\n\t\tcount: count,\n\t}\n\treturn this\n}", "func (it *ModelIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }", "func createQuery(iterOptions *iterOptions) func(coll *mgo.Collection) error {\n\treturn func(coll *mgo.Collection) error {\n\t\t// find the total count\n\t\tquery := coll.Find(iterOptions.Filter)\n\t\ttotalCount, err := query.Count()\n\t\tif err != nil {\n\t\t\titerOptions.Log.Error(\"While getting count, exiting: %v\", err)\n\t\t\treturn err\n\t\t}\n\t\titerOptions.Log.Info(\"Totaly we have %v items for operation\", totalCount)\n\n\t\tskip := iterOptions.Skip // this is a starting point\n\t\tindex := skip // this is the item count to be processed\n\t\tlimit := iterOptions.Limit // this will be the ending point\n\t\tcount := index + limit // total count\n\t\tsort := iterOptions.Sort\n\n\t\tif len(sort) == 0 {\n\t\t\tsort = []string{\"$natural\"}\n\t\t}\n\n\t\titeration := 0\n\t\tfor {\n\t\t\t// if we reach to the end of the all collection, exit\n\t\t\tif index >= totalCount {\n\t\t\t\titerOptions.Log.Info(\"All items are processed, exiting\")\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\t// this is the max re-iterating count\n\t\t\tif iteration == iterOptions.RetryCount {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\t// if we processed all items then exit\n\t\t\tif index == count {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\titer := query.Sort(sort...).Skip(index).Limit(count - index).Iter()\n\n\t\t\tfor iter.Next(iterOptions.Result) {\n\t\t\t\tif err := iterOptions.F(iterOptions.Result); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tindex++\n\t\t\t\titerOptions.Log.Debug(\"Index: %v\", index)\n\t\t\t}\n\n\t\t\tif err := iter.Close(); err != nil {\n\t\t\t\titerOptions.Log.Error(\"Iteration failed: %v\", err)\n\t\t\t}\n\n\t\t\tif iter.Timeout() {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\titerOptions.Log.Info(\"iter existed, starting over from %v -- %v item(s) are processsed on this iter\", index+1, index-skip)\n\t\t\titeration++\n\t\t}\n\n\t\tif iteration == iterOptions.RetryCount {\n\t\t\titerOptions.Log.Info(\"Max iteration count %v reached, exiting\", iteration)\n\t\t}\n\t\titerOptions.Log.Info(\"Deleted %v items on this process\", index-skip)\n\n\t\treturn nil\n\t}\n}", "func (s UserResource) PaginatedFindAll(r api2go.Request) (uint, api2go.Responder, error) {\n\tusers, err := s.UserStorage.GetAll()\n\tif err != nil {\n\t\treturn 0, &Response{}, err\n\t}\n\n\tvar (\n\t\tresult []model.User\n\t\tkeys []string\n\t\tnumber, size string\n\t)\n\n\tfor k := range users {\n\t\ti := k\n\t\tif err != nil {\n\t\t\treturn 0, &Response{}, err\n\t\t}\n\n\t\tkeys = append(keys, i)\n\t}\n\t//sort.Ints(keys)\n\n\tnumberQuery, ok := r.QueryParams[\"page[number]\"]\n\tif ok {\n\t\tnumber = numberQuery[0]\n\t}\n\tsizeQuery, ok := r.QueryParams[\"page[size]\"]\n\tif ok {\n\t\tsize = sizeQuery[0]\n\t}\n\n\tif size != \"\" {\n\t\tsizeI, err := strconv.ParseUint(size, 10, 64)\n\t\tif err != nil {\n\t\t\treturn 0, &Response{}, err\n\t\t}\n\n\t\tnumberI, err := strconv.ParseUint(number, 10, 64)\n\t\tif err != nil {\n\t\t\treturn 0, &Response{}, err\n\t\t}\n\n\t\tstart := sizeI * (numberI - 1)\n\t\tfor i := start; i < start + sizeI; i++ {\n\t\t\tif i >= uint64(len(users)) {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tresult = append(result, *users[keys[i]])\n\t\t}\n\t}\n\n\treturn uint(len(users)), &Response{Res: result}, nil\n}", "func (p *Pagination) Offset() (res int) {\n\treturn p.offset\n}", "func getPagedBunQuery(\n\tctx context.Context, query *bun.SelectQuery, offset, limit int,\n) (*apiv1.Pagination, *bun.SelectQuery, error) {\n\t// Count number of items without any limits or offsets.\n\ttotal, err := query.Count(ctx)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t// Calculate end and start indexes.\n\tstartIndex := offset\n\tif offset > total || offset < -total {\n\t\tstartIndex = total\n\t} else if offset < 0 {\n\t\tstartIndex = total + offset\n\t}\n\n\tendIndex := startIndex + limit\n\tswitch {\n\tcase limit == -2:\n\t\tendIndex = startIndex\n\tcase limit == -1:\n\t\tendIndex = total\n\tcase limit == 0:\n\t\tendIndex = 100 + startIndex\n\t\tif total < endIndex {\n\t\t\tendIndex = total\n\t\t}\n\tcase startIndex+limit > total:\n\t\tendIndex = total\n\t}\n\n\t// Add start and end index to query.\n\tquery.Offset(startIndex)\n\tquery.Limit(endIndex - startIndex)\n\n\treturn &apiv1.Pagination{\n\t\tOffset: int32(offset),\n\t\tLimit: int32(limit),\n\t\tTotal: int32(total),\n\t\tStartIndex: int32(startIndex),\n\t\tEndIndex: int32(endIndex),\n\t}, query, nil\n}", "func (p *Pagination) OffsetLimit(total int) (offset int, limit int) {\n\tfrom, to := p.Page(total)\n\tif to == 0 || from == 0 {\n\t\treturn 0, 0\n\t}\n\treturn from - 1, to - from + 1\n}", "func fn_get__amount_pages(_c_amount_pages chan int) {\n\n\tres := fn_get__http()\n\n\tdefer res.Body.Close()\n\n\tdoc, err := goquery.NewDocumentFromReader(res.Body)\n\n\tfn_check__error(err)\n\n\t// first page + extra pages\n\tint_pages := doc.Find(\".tplPagination > ul a\").Length() + 1\n\n\t_c_amount_pages <- int_pages\n}", "func (p *Pagination) Offset() int {\n\treturn (p.CurrentPage() - 1) * p.ItemsPerPage()\n}", "func (current OffsetPageBase) getPageSize() (int, error) {\n\tvar pageSize int\n\n\tswitch pb := current.Body.(type) {\n\tcase map[string]interface{}:\n\t\tfor k, v := range pb {\n\t\t\t// ignore xxx_links\n\t\t\tif !strings.HasSuffix(k, \"links\") {\n\t\t\t\t// check the field's type. we only want []interface{} (which is really []map[string]interface{})\n\t\t\t\tswitch vt := v.(type) {\n\t\t\t\tcase []interface{}:\n\t\t\t\t\tpageSize = len(vt)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\tcase []interface{}:\n\t\tpageSize = len(pb)\n\tdefault:\n\t\terr := golangsdk.ErrUnexpectedType{}\n\t\terr.Expected = \"map[string]interface{}/[]interface{}\"\n\t\terr.Actual = fmt.Sprintf(\"%T\", pb)\n\t\treturn 0, err\n\t}\n\n\treturn pageSize, nil\n}", "func ReadPostsLimit(Offset int, Limit int) []models.PostsModel {\n\tdb, err := driver.Connect()\n\n\tif err != nil {\n\t\tfmt.Println(err.Error())\n\t\treturn nil\n\t}\n\n\tdefer db.Close()\n\n\tvar result []models.PostsModel\n\n\titems, err := db.Query(\"select title, content, category, status from posts Limit ?, ?\", Offset, Limit)\n\n\tif err != nil {\n\t\tfmt.Println(err.Error())\n\t\treturn nil\n\t}\n\n\tfmt.Printf(\"%T\\n\", items)\n\n\tfor items.Next() {\n\t\tvar each = models.PostsModel{}\n\t\tvar err = items.Scan(&each.Title, &each.Content, &each.Category, &each.Status)\n\n\t\tif err != nil {\n\t\t\tfmt.Println(err.Error())\n\t\t\treturn nil\n\t\t}\n\n\t\tresult = append(result, each)\n\n\t}\n\n\tif err = items.Err(); err != nil {\n\t\tfmt.Println(err.Error())\n\t\treturn nil\n\t}\n\n\treturn result\n}", "func getPagination(r *http.Request) (int, int) {\n\tstart := 0\n\tlimit := 25\n\tif val := strings.TrimSpace(r.URL.Query().Get(\"start\")); len(val) > 0 {\n\t\tif intVal, err := strconv.Atoi(val); err == nil || intVal > 0 {\n\t\t\tstart = intVal\n\t\t}\n\t}\n\tif val := strings.TrimSpace(r.URL.Query().Get(\"limit\")); len(val) > 0 {\n\t\tif intVal, err := strconv.Atoi(val); err == nil || intVal > 0 {\n\t\t\tlimit = intVal\n\t\t}\n\t}\n\treturn start, limit\n}", "func PaginateUserSlice(items []*models.User, page, pageSize int) []*models.User {\n\tif page != 0 {\n\t\tpage--\n\t}\n\n\tif page*pageSize >= len(items) {\n\t\treturn items[len(items):]\n\t}\n\n\titems = items[page*pageSize:]\n\n\tif len(items) > pageSize {\n\t\treturn items[:pageSize]\n\t}\n\treturn items\n}", "func (q *Query) Offset(offset, limit int) *Query {\n\tif limit < 1 || offset < 0 {\n\t\tlogrus.Warn(\"illegal offset or limit: \", offset, \", \", limit)\n\t\treturn q\n\t}\n\tq.offset = offset\n\tq.limit = limit\n\treturn q\n}", "func getArticles(p int) {\n\tdb, err := bolt.Open(\"../.db\", 0600, nil)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer db.Close()\n\n\t//display 10 articles per page\n\tIdIndex := (p-1)*10 + 1\n\tvar articles ArticlesResponse\n\tvar article Article\n\terr = db.View(func(tx *bolt.Tx) error {\n\t\tb := tx.Bucket([]byte(\"Article\"))\n\t\tif b != nil {\n\t\t\tc := b.Cursor()\n\t\t\tk, v := c.Seek(itob(IdIndex))\n\t\t\tif k == nil {\n\t\t\t\tfmt.Println(\"Page is out of index\")\n\t\t\t\treturn errors.New(\"Page is out of index\")\n\t\t\t}\n\t\t\tkey := binary.BigEndian.Uint64(k)\n\t\t\tfmt.Print(key)\n\t\t\tif int(key) != IdIndex {\n\t\t\t\tfmt.Println(\"Page is out of index\")\n\t\t\t\treturn errors.New(\"Page is out of index\")\n\t\t\t}\n\t\t\tcount := 0\n\t\t\tvar ori_artc Article\n\t\t\tfor ; k != nil && count < 10; k, v = c.Next() {\n\t\t\t\terr = json.Unmarshal(v, &ori_artc)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tarticle.Id = ori_artc.Id\n\t\t\t\tarticle.Name = ori_artc.Name\n\t\t\t\tarticles.Articles = append(articles.Articles, article)\n\t\t\t\tcount = count + 1\n\t\t\t}\n\t\t\treturn nil\n\t\t} else {\n\t\t\treturn errors.New(\"Article Not Exists\")\n\t\t}\n\t})\n\tfor i := 0; i < len(articles.Articles); i++ {\n\t\tfmt.Println(articles.Articles[i])\n\t}\n}", "func (sch *schema) Index(page, count int, selectData []string, filter []byte) []bson.M {\n\tif page < 1 {\n\t\tpage = 1\n\t}\n\tskip := count * (page - 1)\n\tvar filterQuery interface{}\n\tbson.UnmarshalJSON(filter, &filterQuery)\n\tquery := sch.Collection.Find(filterQuery).Sort(\"_id\").Skip(skip).Limit(count)\n\n\tif len(selectData) > 1 {\n\t\tselectQuery := bson.M{}\n\t\tfor _, v := range selectData {\n\t\t\tselectQuery[v] = 1\n\t\t}\n\t\tquery = query.Select(selectQuery)\n\t}\n\n\tr := query.Iter()\n\tvar resultSet []bson.M\n\tvar p interface{}\n\tfor {\n\t\tflag := r.Next(&p)\n\t\tif !flag {\n\t\t\tbreak\n\t\t}\n\t\tresultSet = append(resultSet, p.(bson.M))\n\t}\n\n\treturn resultSet\n}", "func (c *client) readPaginatedResults(path, accept, org string, newObj func() interface{}, accumulate func(interface{})) error {\n\treturn c.readPaginatedResultsWithContext(context.Background(), path, accept, org, newObj, accumulate)\n}", "func ListArticlesWithPage(w http.ResponseWriter, r *http.Request) {\n\tvar err error\n\tvar lim_int int\n\tvar off_int int\n\tvar articles []*models.Posts\n\n\tlimit := chi.URLParam(r, \"limit\")\n\toffset := chi.URLParam(r, \"offset\")\n\n\tlim_int, err = strconv.Atoi(limit)\n\tif err != nil {\n\t\trender.Render(w, r, ErrNotFound)\n\t\treturn\n\t}\n\n\toff_int, err = strconv.Atoi(offset)\n\tif err != nil {\n\t\trender.Render(w, r, ErrNotFound)\n\t\treturn\n\t}\n\n\tarticles, err = dbListArticlePage(&lim_int, &off_int)\n\tif err != nil {\n\t\trender.Render(w, r, ErrNotFound)\n\t\treturn\n\t}\n\n\n\tif err := render.RenderList(w, r, NewArticleListResponse(articles)); err != nil {\n\t\trender.Render(w, r, ErrRender(err))\n\t\treturn\n\t}\n}", "func OffsetPagination(page int, perPage int) gitlab.OptionFunc {\n\treturn func(req *retryablehttp.Request) error {\n\t\tq := req.URL.Query()\n\t\tq.Add(\"page\", strconv.Itoa(page))\n\t\tq.Add(\"per_page\", strconv.Itoa(perPage))\n\t\treq.URL.RawQuery = q.Encode()\n\t\treturn nil\n\t}\n}", "func (p *Pagination) LimitOffset() string {\n\tif p.PageSize < 0 {\n\t\treturn \" \"\n\t}\n\tl := strconv.Itoa(int(p.Limit()))\n\to := strconv.Itoa(int(p.Offset()))\n\treturn ` LIMIT ` + l + ` OFFSET ` + o\n}", "func DBPageStdQuery(query *gorm.DB, request *ListStdRequest, count int) *gorm.DB {\n\tif request.Limit == nil {\n\t\trequest.Limit = aws.Int(10)\n\t}\n\tif request.Page == nil {\n\t\trequest.Page = aws.Int(1)\n\t}\n\tquery = query.Limit(*request.Limit)\n\tif *request.Page > 1 {\n\t\toffset := (*request.Page - 1) * *request.Limit\n\t\tif count <= offset {\n\t\t\toffset = int(math.Floor(float64(count) / float64(*request.Limit)))\n\t\t\t*request.Page = int(math.Ceil(float64(count) / float64(*request.Limit)))\n\t\t}\n\t\tquery = query.Offset(offset)\n\t}\n\treturn query\n}", "func (g *NgGrid) Limit() int64 {\n\treturn g.GetPageSize()\n}", "func PaginateForWeb(w http.ResponseWriter, r *http.Request) QueryProcessor {\n\tqueryParams := r.URL.Query()\n\tlimitParam := queryParams[\"limit\"]\n\toffsetParam := queryParams[\"offset\"]\n\n\tvar err error\n\tlimit := -1\n\tif limitParam != nil && len(limitParam) > 0 {\n\t\tlimit, err = strconv.Atoi(limitParam[0])\n\t\tif err != nil {\n\t\t\tlimit = -1\n\t\t}\n\t}\n\toffset := 0\n\tif offsetParam != nil && len(offsetParam) > 0 {\n\t\toffset, err = strconv.Atoi(offsetParam[0])\n\t\tif err != nil {\n\t\t\toffset = 0\n\t\t}\n\t}\n\n\treturn func(db *gorm.DB, out interface{}) (*gorm.DB, microappError.DatabaseError) {\n\n\t\tif out != nil {\n\t\t\tvar totalRecords int64\n\t\t\tif err := db.Model(out).Count(&totalRecords).Error; err != nil {\n\t\t\t\treturn db, microappError.NewDatabaseError(err)\n\t\t\t}\n\t\t\tw.Header().Add(\"Access-Control-Expose-Headers\", \"X-Total-Count\")\n\t\t\tw.Header().Set(\"X-Total-Count\", strconv.Itoa(int(totalRecords)))\n\t\t}\n\n\t\tif limit != -1 {\n\t\t\tdb = db.Limit(limit)\n\t\t}\n\t\tif offset > 0 {\n\t\t\tdb = db.Offset(offset)\n\t\t}\n\n\t\treturn db, nil\n\t}\n}", "func (or *mongoOrderRepository) FetchByRange(skip int, limit int) ([]models.Order, error) {\n\tvar orders []models.Order\n\t//Find documents\n\terr := or.Conn.C(COLLECTION).Find(bson.M{}).Skip(skip).Limit(limit).All(&orders)\n\treturn orders, err\n}", "func (c *ContactService) Scroll(scrollParam string) (ContactList, error) {\n return c.Repository.scroll(scrollParam)\n}", "func Pagination(w http.ResponseWriter, query url.Values, m martini.Context) {\n\tpage, pageErr := strconv.Atoi(query.Get(\"page\"))\n\titemsPerPage, itemsPerPageErr := strconv.Atoi(query.Get(\"items_per_page\"))\n\n\tif pageErr != nil || itemsPerPageErr != nil || page < 1 || itemsPerPage < 1 {\n\t\tm.Map(PaginationParameters{Page: 1, ItemsPerPage: 9})\n\t\treturn\n\t}\n\n\tm.Map(PaginationParameters{Page: page, ItemsPerPage: itemsPerPage})\n}", "func Pagination(limit, offset int) string {\n\tif limit < 0 || offset < 0 {\n\t\treturn \"\"\n\t}\n\treturn fmt.Sprintf(\"LIMIT %d OFFSET %d\", limit, offset)\n}", "func (db *DB) page(id pgid) *page {\n\t// buffer := db.data[]\n\t// offset := id*sizeof(PageSize)\n\t// addr := &db.data[offset]\n\t// p := (*page)addr\n\t//\n\t// return p\n\tpos := id * pgid(db.pageSize)\n\treturn (*page)(unsafe.Pointer(&db.data[pos]))\n}", "func (q *Query) Pager(page, size int) *Query {\n\tif page < 1 {\n\t\tpage = 1\n\t}\n\tif size < 1 {\n\t\tsize = 1\n\t}\n\tq.limit = [2]int{(page - 1) * size, page * size}\n\treturn q\n}", "func calPageList(p, maxPageNum int) []*page {\n\tlistSize := 15\n\thls := listSize / 2\n\tpl := make([]*page, 0, listSize)\n\n\tstart, end := p-hls, p+hls\n\tif p < hls+1 {\n\t\tstart, end = 1, listSize\n\t}\n\n\tif end > maxPageNum {\n\t\tend = maxPageNum\n\t}\n\n\tfor i := start; i <= end; i++ {\n\t\tpl = append(pl, &page{\n\t\t\tIsActive: i == p,\n\t\t\tPageNum: i,\n\t\t})\n\t}\n\treturn pl\n}", "func TestPaginateParams(t *testing.T) {\n\ttype TestCase struct {\n\t\treq SearchRequest\n\t\terr error\n\t}\n\n\tcases := []TestCase{\n\t\t{\n\t\t\treq: SearchRequest{Limit: -5, Offset: 3},\n\t\t\terr: fmt.Errorf(\"limit must be > 0\"),\n\t\t},\n\t\t{\n\t\t\treq: SearchRequest{Offset: -3, Limit: 2},\n\t\t\terr: fmt.Errorf(\"offset must be > 0\"),\n\t\t},\n\t}\n\n\tts := NewTestServer(allowedAccessToken)\n\tdefer ts.Close()\n\n\tfor caseNum, item := range cases {\n\t\t_, err := ts.Client.FindUsers(item.req)\n\t\tif err.Error() != item.err.Error() {\n\t\t\tt.Errorf(\"[%d] invalid error, expected %, got %v\", caseNum, item.err, err)\n\t\t}\n\t}\n}", "func (p *Pagination) LimitOffset() string {\n\tif p.PageSize < 0 {\n\t\treturn \"\"\n\t}\n\tl := Int32ToString(p.Limit())\n\to := Int32ToString(p.Offset())\n\treturn `LIMIT ` + l + ` OFFSET ` + o\n}", "func pageRange(p *params, n int) (int, int) {\n\tif p.Count == 0 && p.Offset == 0 {\n\t\treturn 0, n\n\t}\n\tif p.Count < 0 {\n\t\t// Items from the back of the array, like Python arrays. Do a postive mod n.\n\t\treturn (((n + p.Count) % n) + n) % n, n\n\t}\n\tstart := p.Offset\n\tif start < 0 {\n\t\tstart = 0\n\t}\n\tif p.Count == 0 { // No count specified. Just take the offset parameter.\n\t\treturn start, n\n\t}\n\tend := start + p.Count\n\tif end > n {\n\t\tend = n\n\t}\n\treturn start, end\n}", "func (p *Pagination) Limit() (res int) {\n\treturn p.limit\n}", "func (qb *BasePaginatedMongoListQueryBuilder) PaginationQuery(parameters resource.ListQuery) (bson.M, *options.FindOptions) {\n\tquery := bson.M{}\n\tqueryOptions := options.Find()\n\n\t// if params do not match the required format, log and return empty filter\n\tval := reflect.ValueOf(parameters).Elem()\n\tif val.Kind() != reflect.Struct {\n\t\tqb.Logger.Error(\"Unexpected type of parameters for PaginationQuery\")\n\t\treturn query, queryOptions\n\t}\n\tpaginationParameters := val.FieldByName(\"PaginatedListQuery\")\n\thasPaginationParams := paginationParameters.IsValid() && !paginationParameters.IsNil()\n\n\t// Parsing page number\n\tvar page int64\n\tpage = 0\n\tif hasPaginationParams {\n\t\tpageValue := val.FieldByName(\"Page\")\n\t\tif !pageValue.IsValid() || pageValue.Kind() != reflect.Int {\n\t\t\tqb.Logger.Info(\"Page in in invalid format, Using default value\")\n\t\t} else {\n\t\t\tpage = pageValue.Int()\n\t\t}\n\t}\n\n\t// Parsing page size\n\tvar pageSize int64\n\tpageSize = DefaultPageSize\n\tif hasPaginationParams {\n\t\tpageSizeValue := val.FieldByName(\"PageSize\")\n\t\tif !pageSizeValue.IsValid() || pageSizeValue.Kind() != reflect.Int {\n\t\t\tqb.Logger.Info(\"PageSize in in invalid format, Using default value\")\n\t\t} else {\n\t\t\tpageSize = pageSizeValue.Int()\n\t\t}\n\t}\n\n\t// Applying Pagination to query\n\tlimit := pageSize\n\toffset := page * pageSize\n\n\tqueryOptions.SetSkip(offset)\n\tqueryOptions.SetLimit(limit)\n\n\t// Parsing orderBy\n\tvar orderBy = \"\"\n\tif hasPaginationParams {\n\t\torderByValue := val.FieldByName(\"OrderBy\")\n\t\tif orderByValue.IsValid() && orderByValue.Kind() == reflect.String {\n\t\t\torderBy = orderByValue.String()\n\t\t}\n\t}\n\n\t// Return if order by is not present\n\tif orderBy == \"\" {\n\t\treturn query, queryOptions\n\t}\n\n\t// Parsing orderBy descending or ascending\n\tvar orderDesc = false\n\tif hasPaginationParams {\n\t\torderDescValue := val.FieldByName(\"OrderDesc\")\n\t\tif !orderDescValue.IsValid() || orderDescValue.Kind() != reflect.Bool {\n\t\t\tqb.Logger.Info(\"OrderDesc in in invalid format, Using default value\")\n\t\t} else {\n\t\t\torderDesc = orderDescValue.Bool()\n\t\t}\n\t}\n\n\t// Adding sort to query\n\tif len(orderBy) > 0 {\n\t\tif orderDesc {\n\t\t\tqueryOptions.SetSort(bson.M{orderBy: -1})\n\t\t} else {\n\t\t\tqueryOptions.SetSort(bson.M{orderBy: 1})\n\t\t}\n\t}\n\n\treturn query, queryOptions\n}", "func (c *Collection) Pages() []Page {\n\treturn c.pages\n}", "func (pagination *Pagination) Limit(limit int) *Pagination {\n\tpagination.defaultLimit = limit\n\treturn pagination\n}", "func (it *DatasetIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }", "func (p PageRequest) Offset() int64 {\n\treturn p.PerPage * (p.Page - 1)\n}", "func (d *Demo) GetAllWithLimit(g *gom.Gom) int64 {\n\ttoolkit.Println(\"===== Get All With Limit =====\")\n\tres := []models.Hero{}\n\n\tvar cTotal int64\n\tvar err error\n\tif d.useParams {\n\t\tcTotal, err = g.Set(&gom.SetParams{\n\t\t\tTableName: \"hero\",\n\t\t\tResult: &res,\n\t\t\tTimeout: 10,\n\t\t\tLimit: 2,\n\t\t}).Cmd().Get()\n\t} else {\n\t\tcTotal, err = g.Set(nil).Timeout(10).Table(\"hero\").Limit(3).Result(&res).Cmd().Get()\n\t}\n\n\tif err != nil {\n\t\ttoolkit.Println(err.Error())\n\t\treturn 0\n\t}\n\n\ttoolkit.Println(len(res), \"of\", cTotal)\n\n\tfor _, h := range res {\n\t\ttoolkit.Println(h)\n\t}\n\n\treturn int64(len(res))\n}", "func getPageRequest(req hasPageRequest) *query.PageRequest {\n\tvar pageRequest *query.PageRequest\n\tif req != nil {\n\t\tpageRequest = req.GetPagination()\n\t}\n\tif pageRequest == nil {\n\t\tpageRequest = &query.PageRequest{}\n\t}\n\tif pageRequest.Limit == 0 {\n\t\tpageRequest.Limit = defaultLimit\n\t}\n\treturn pageRequest\n}", "func (e *linodeTypePaginated) pageNumber() int {\n\treturn e.Page\n}", "func setLimitAndOffset(params dragonfruit.QueryParams) (limit int,\n\toffset int) {\n\n\tlimit, offset = 10, 0\n\n\tl := params.QueryParams.Get(\"limit\")\n\n\tif l != \"\" {\n\t\tswitch l := l.(type) {\n\t\tcase int64:\n\t\t\tlimit = int(l)\n\t\tcase int:\n\t\t\tlimit = l\n\t\t}\n\n\t\tparams.QueryParams.Del(\"limit\")\n\t}\n\n\to := params.QueryParams.Get(\"offset\")\n\tif o != \"\" {\n\t\tswitch o := o.(type) {\n\t\tcase int64:\n\t\t\toffset = int(o)\n\t\tcase int:\n\t\t\toffset = o\n\t\t}\n\t\tparams.QueryParams.Del(\"offset\")\n\t}\n\n\treturn\n}", "func (db *DB) Offset(offset int) (tx *DB) {\n\ttx = db.getInstance()\n\ttx.Statement.AddClause(clause.Limit{Offset: offset})\n\treturn\n}", "func (p *Pagination) SimplePage() (from int, to int) {\n\tif p.Num == 0 || p.Size == 0 {\n\t\tp.Num, p.Size = 1, DefaultSize\n\t}\n\tfrom = (p.Num-1)*p.Size + 1\n\tto = from + p.Size - 1\n\treturn\n}", "func TestGetPageSize(t *testing.T) {\n\tcases := []struct {\n\t\toffset, total, limit, expected int64\n\t\tname string\n\t}{\n\t\t{1, 0, 50, 0, \"no results\"},\n\t\t{1, 1, 50, 1, \"first partial page\"},\n\t\t{1, 50, 50, 50, \"first full page\"},\n\t\t{2, 100, 50, 50, \"second page full\"},\n\t\t{2, 51, 50, 1, \"last partial page\"},\n\t}\n\tfor _, tt := range cases {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tactual := getPageSize(tt.offset, tt.total, tt.limit)\n\t\t\tassert.Equal(t, tt.expected, actual, \"actual didn't match expected\")\n\t\t})\n\t}\n}", "func FetchPage(sqlCountRows, sqlFetchRows string, pageNo, pageSize int, args ...interface{}) (*model.Page, error) {\n\tif pageSize <= 0 {\n\t\treturn nil, errors.New(\"pageSize can't greater than 0\")\n\t}\n\n\tstm, err := common.GDBConn.Prepare(sqlCountRows)\n\tdefer stm.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\trowCount := 0\n\terr = stm.QueryRow(args...).Scan(&rowCount)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpageCount := rowCount / pageSize\n\tif rowCount > pageSize*pageCount {\n\t\tpageCount++\n\t}\n\n\tif pageNo > pageCount {\n\t\treturn nil, errors.New(\"pageNo can't greater than pageCount\")\n\t}\n\n\tstartRow := (pageNo - 1) * pageSize\n\tselectSQL := fmt.Sprintf(\"%v limit %v , %v\", sqlFetchRows, startRow, pageSize)\n\tsstm, err := common.GDBConn.Prepare(selectSQL)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\trows, err := sstm.Query(args...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tpageItems := make([]interface{}, 0, pageSize)\n\tfor rows.Next() {\n\t\tconfigInfo := &model.ConfigInfo{}\n\t\terr := rows.Scan(&configInfo.ID, &configInfo.DataID, &configInfo.Group, &configInfo.Content, &configInfo.MD5, &configInfo.LastModified)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tpageItems = append(pageItems, configInfo)\n\t}\n\n\tpage := &model.Page{PageNO: pageNo, PageAvailable: pageCount, TotalCount: rowCount, PageItems: pageItems}\n\treturn page, nil\n}", "func (s BmUnitResource) PaginatedFindAll(r api2go.Request) (uint, api2go.Responder, error) {\n\tvar (\n\t\tresult []BmModel.Unit\n\t\tnumber, size, offset, limit string\n\t)\n\n\tnumberQuery, ok := r.QueryParams[\"page[number]\"]\n\tif ok {\n\t\tnumber = numberQuery[0]\n\t}\n\tsizeQuery, ok := r.QueryParams[\"page[size]\"]\n\tif ok {\n\t\tsize = sizeQuery[0]\n\t}\n\toffsetQuery, ok := r.QueryParams[\"page[offset]\"]\n\tif ok {\n\t\toffset = offsetQuery[0]\n\t}\n\tlimitQuery, ok := r.QueryParams[\"page[limit]\"]\n\tif ok {\n\t\tlimit = limitQuery[0]\n\t}\n\n\tif size != \"\" {\n\t\tsizeI, err := strconv.ParseInt(size, 10, 64)\n\t\tif err != nil {\n\t\t\treturn 0, &Response{}, err\n\t\t}\n\n\t\tnumberI, err := strconv.ParseInt(number, 10, 64)\n\t\tif err != nil {\n\t\t\treturn 0, &Response{}, err\n\t\t}\n\n\t\tstart := sizeI * (numberI - 1)\n\t\tfor _, iter := range s.BmUnitStorage.GetAll(r, int(start), int(sizeI)) {\n\t\t\tresult = append(result, *iter)\n\t\t}\n\n\t} else {\n\t\tlimitI, err := strconv.ParseUint(limit, 10, 64)\n\t\tif err != nil {\n\t\t\treturn 0, &Response{}, err\n\t\t}\n\n\t\toffsetI, err := strconv.ParseUint(offset, 10, 64)\n\t\tif err != nil {\n\t\t\treturn 0, &Response{}, err\n\t\t}\n\n\t\tfor _, iter := range s.BmUnitStorage.GetAll(r, int(offsetI), int(limitI)) {\n\t\t\tresult = append(result, *iter)\n\t\t}\n\t}\n\n\tin := BmModel.Unit{}\n\tcount := s.BmUnitStorage.Count(in)\n\n\treturn uint(count), &Response{Res: result}, nil\n}", "func RandomPaginationOffset(b *testing.B, numLinks, i int) *store.MapFilter {\n\treturn &store.MapFilter{\n\t\tPagination: store.Pagination{\n\t\t\tOffset: rand.Int() % numLinks,\n\t\t\tLimit: store.DefaultLimit,\n\t\t},\n\t}\n}", "func WithPagination(offset uint64, limit uint64) func(b *sq.SelectBuilder) {\n\treturn func(b *sq.SelectBuilder) {\n\t\t*b = b.Offset(offset).Limit(limit)\n\t}\n}", "func (s *NewsService) Query(rs app.RequestScope, offset, limit int) ([]models.News, error) {\n\treturn s.dao.Query(rs, offset, limit)\n}", "func (s *SearchService) Context(opts map[string]interface{}, page models.Page) ([]map[string]interface{}, error) {\n\tvar err error\n\terr = s.resetESClient()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\toffsetLabel := config.LogOffsetLabel()\n\toffset_, ok := opts[offsetLabel]\n\tif !ok {\n\t\treturn nil, errors.New(\"offset should not be nil\")\n\t}\n\n\tdelete(opts, offsetLabel)\n\toffset, err := strconv.ParseInt(offset_.(string), 10, 64)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar results []map[string]interface{}\n\tvar querys []elastic.Query\n\tfor k, v := range opts {\n\t\tquerys = append(querys, elastic.NewTermQuery(k, v))\n\t}\n\n\tif page.PageFrom == 0 {\n\t\tbquery := elastic.NewBoolQuery().\n\t\t\tFilter(elastic.NewRangeQuery(offsetLabel).Lt(offset)).\n\t\t\tMust(querys...)\n\n\t\tresult, err := s.ESClient.Search().\n\t\t\tIndex(\"dataman-*\").\n\t\t\tQuery(bquery).\n\t\t\tSort(\"logtime.sort\", false).\n\t\t\tFrom(0).\n\t\t\tSize(page.PageSize).\n\t\t\tPretty(true).\n\t\t\tDo()\n\n\t\tif err != nil && err.(*elastic.Error).Status == http.StatusNotFound {\n\t\t\treturn nil, nil\n\t\t}\n\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tfor i := len(result.Hits.Hits) - 1; i >= 0; i-- {\n\t\t\tdata := make(map[string]interface{})\n\t\t\tjson.Unmarshal(*result.Hits.Hits[i].Source, &data)\n\t\t\tresults = append(results, data)\n\t\t}\n\t}\n\n\tbquery := elastic.NewBoolQuery().\n\t\tFilter(elastic.NewRangeQuery(offsetLabel).Gte(offset)).\n\t\tMust(querys...)\n\n\tresult, err := s.ESClient.Search().\n\t\tIndex(\"dataman-*\").\n\t\tQuery(bquery).\n\t\tSort(\"logtime.sort\", true).\n\t\tFrom(page.PageFrom).\n\t\tSize(page.PageSize).\n\t\tPretty(true).\n\t\tDo()\n\n\tif err != nil && err.(*elastic.Error).Status == http.StatusNotFound {\n\t\treturn nil, nil\n\t}\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, hit := range result.Hits.Hits {\n\t\tdata := make(map[string]interface{})\n\t\tjson.Unmarshal(*hit.Source, &data)\n\t\tresults = append(results, data)\n\t}\n\n\treturn results, nil\n}", "func GetNews(limit int, offset int) (v []*News, err error) {\n\to := orm.NewOrm()\n\n\tvar news []*News\n\tqs, errorGet := o.QueryTable(\"news\").Limit(limit).Offset(offset).All(&news)\n\n\tfmt.Println(qs, news)\n\n\treturn news, errorGet\n\n}", "func Limit(data []User, limit, offset string) ([]User, error) {\n\tlimitInt, err := strconv.Atoi(limit)\n\tif err != nil || limitInt < 0 {\n\t\treturn []User{}, err\n\t}\n\toffsetInt, err := strconv.Atoi(offset)\n\tif err != nil || offsetInt < 0 {\n\t\treturn []User{}, err\n\t}\n\n\tswitch {\n\tcase offsetInt >= len(data):\n\t\treturn []User{}, nil\n\tcase offsetInt+limitInt > len(data):\n\t\treturn data[offsetInt:], nil\n\tdefault:\n\t\treturn data[offsetInt : offsetInt+limitInt], nil\n\t}\n}", "func Posts(c *gin.Context) {\r\n\tlimit, _ := strconv.Atoi(c.DefaultQuery(\"limit\", \"10\"))\r\n\toffset, _ := strconv.Atoi(c.DefaultQuery(\"offset\", \"0\"))\r\n\r\n\tvar posts []Post\r\n\tdb.Limit(limit).Offset(offset).Find(&posts)\r\n\tc.JSON(http.StatusOK, gin.H{\r\n\t\t\"messege\": \"\",\r\n\t\t\"data\": posts,\r\n\t})\r\n}", "func OffsetToPage(offset, size int) int {\n\treturn (offset / size) + 1\n}", "func getPage(c *gin.Context) {\n\tfmt.Println(\"getPage\")\n\n\t// TODO: check mode\n\t// if local\n\tr := getFilestoreDoc(c.Param(\"id\"))\n\n\t// if firestore\n\t// TODO: add firestore\n\n\tc.JSON(http.StatusOK, r)\n}" ]
[ "0.62947685", "0.6278961", "0.6123229", "0.60996085", "0.60073113", "0.59673357", "0.5958331", "0.5937828", "0.5937828", "0.59175867", "0.5915382", "0.5893339", "0.58894", "0.58830416", "0.58767486", "0.5874895", "0.58610237", "0.5832857", "0.5831853", "0.5828642", "0.5751818", "0.57490444", "0.57410073", "0.5720975", "0.57049066", "0.5683874", "0.56687415", "0.5657714", "0.5646232", "0.5638729", "0.56202775", "0.56197286", "0.5608196", "0.55934024", "0.5585032", "0.5580367", "0.5563028", "0.5562462", "0.5550614", "0.55378455", "0.55304384", "0.5527228", "0.5526705", "0.55085343", "0.55076444", "0.5497048", "0.5484358", "0.5468051", "0.5464807", "0.54415524", "0.54312885", "0.5400727", "0.5386169", "0.53820634", "0.53776944", "0.5367897", "0.5365774", "0.5354748", "0.53543293", "0.5343331", "0.5341252", "0.53096384", "0.528658", "0.5279174", "0.52732253", "0.52714115", "0.525253", "0.52444506", "0.5228126", "0.521615", "0.5212884", "0.5211837", "0.5199349", "0.51991904", "0.51973855", "0.51895255", "0.51872814", "0.51858455", "0.5170058", "0.5168421", "0.51551086", "0.5153838", "0.5150338", "0.51392937", "0.5126604", "0.51262057", "0.5123359", "0.51219726", "0.51174146", "0.51117355", "0.51110876", "0.5109083", "0.510658", "0.5105697", "0.5095111", "0.5094831", "0.50895536", "0.5086485", "0.50835407", "0.5071902" ]
0.6210712
2
String returns the string representation
func (s DescribeUserHierarchyStructureInput) String() string { return awsutil.Prettify(s) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (s CreateAlgorithmOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateAlgorithmOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateCanaryOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s Library) String() string {\n\tres := make([]string, 5)\n\tres[0] = \"ID: \" + reform.Inspect(s.ID, true)\n\tres[1] = \"UserID: \" + reform.Inspect(s.UserID, true)\n\tres[2] = \"VolumeID: \" + reform.Inspect(s.VolumeID, true)\n\tres[3] = \"CreatedAt: \" + reform.Inspect(s.CreatedAt, true)\n\tres[4] = \"UpdatedAt: \" + reform.Inspect(s.UpdatedAt, true)\n\treturn strings.Join(res, \", \")\n}", "func (r Info) String() string {\n\tJSON, err := json.Marshal(r)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\treturn string(JSON)\n}", "func (s ReEncryptOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateFHIRDatastoreOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func String() string {\n\toutput := output{\n\t\tRerun: Rerun,\n\t\tVariables: Variables,\n\t\tItems: Items,\n\t}\n\tvar err error\n\tvar b []byte\n\tif Indent == \"\" {\n\t\tb, err = json.Marshal(output)\n\t} else {\n\t\tb, err = json.MarshalIndent(output, \"\", Indent)\n\t}\n\tif err != nil {\n\t\tmessageErr := Errorf(\"Error in parser. Please report this output to https://github.com/drgrib/alfred/issues: %v\", err)\n\t\tpanic(messageErr)\n\t}\n\ts := string(b)\n\treturn s\n}", "func (s CreateQuickConnectOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (r *Registry) String() string {\n\tout := make([]string, 0, len(r.nameToObject))\n\tfor name, object := range r.nameToObject {\n\t\tout = append(out, fmt.Sprintf(\"* %s:\\n%s\", name, object.serialization))\n\t}\n\treturn strings.Join(out, \"\\n\\n\")\n}", "func (s CreateSceneOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateSafetyRuleOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateLanguageModelOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (o QtreeCreateResponse) String() string {\n\treturn ToString(reflect.ValueOf(o))\n}", "func (r SendAll) String() string {\n\tJSON, err := json.Marshal(r)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\treturn string(JSON)\n}", "func (r ReceiveAll) String() string {\n\tJSON, err := json.Marshal(r)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\treturn string(JSON)\n}", "func (enc *simpleEncoding) String() string {\n\treturn \"simpleEncoding(\" + enc.baseName + \")\"\n}", "func (s CreateDatabaseOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (z Zamowienium) String() string {\n\tjz, _ := json.Marshal(z)\n\treturn string(jz)\n}", "func (s CreateHITTypeOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateProgramOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateEntityOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateProjectVersionOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (o *Addshifttraderequest) String() string {\n \n \n \n \n o.AcceptableIntervals = []string{\"\"} \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (s CreateUseCaseOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (r Rooms) String() string {\n\tjr, _ := json.Marshal(r)\n\treturn string(jr)\n}", "func (i Info) String() string {\n\ts, _ := i.toJSON()\n\treturn s\n}", "func (o *Botversionsummary) String() string {\n \n \n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (e ExternalCfps) String() string {\n\tje, _ := json.Marshal(e)\n\treturn string(je)\n}", "func (s CreateTrustStoreOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func String() string {\n\treturn fmt.Sprintf(\n\t\t\"AppVersion = %s\\n\"+\n\t\t\t\"VCSRef = %s\\n\"+\n\t\t\t\"BuildVersion = %s\\n\"+\n\t\t\t\"BuildDate = %s\",\n\t\tAppVersion, VCSRef, BuildVersion, Date,\n\t)\n}", "func (s CreateDataLakeOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateSolutionVersionOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s GetSceneOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (i NotMachine) String() string { return toString(i) }", "func (s CreateRuleOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateRuleOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s StartPipelineReprocessingOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateDatastoreOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateSequenceStoreOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (o *Adjustablelivespeakerdetection) String() string {\n \n \n \n \n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (s CreateRateBasedRuleOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (r Resiliency) String() string {\n\tb, _ := json.Marshal(r)\n\treturn string(b)\n}", "func (s RestoreFromRecoveryPointOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateWaveOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (o QtreeCreateResponseResult) String() string {\n\treturn ToString(reflect.ValueOf(o))\n}", "func (s CreateRoomOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateBotLocaleOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s DeleteAlgorithmOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (z Zamowienia) String() string {\n\tjz, _ := json.Marshal(z)\n\treturn string(jz)\n}", "func (i *Info) String() string {\n\tb, _ := json.Marshal(i)\n\treturn string(b)\n}", "func (s ProcessingFeatureStoreOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s ExportProjectOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (r RoomOccupancies) String() string {\n\tjr, _ := json.Marshal(r)\n\treturn string(jr)\n}", "func (r *InterRecord) String() string {\n\tbuf := r.Bytes()\n\tdefer ffjson.Pool(buf)\n\n\treturn string(buf)\n}", "func (s CreateResolverRuleOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateResolverRuleOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateResolverRuleOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (o *Coretype) String() string {\n \n \n \n \n \n o.ValidationFields = []string{\"\"} \n \n o.ItemValidationFields = []string{\"\"} \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (s CreateLayerOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateModelCardOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (o *Limitchangerequestdetails) String() string {\n \n \n \n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (s NetworkPathComponentDetails) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (t Terms) String() string {\n\tjt, _ := json.Marshal(t)\n\treturn string(jt)\n}", "func (g GetObjectOutput) String() string {\n\treturn helper.Prettify(g)\n}", "func (s StartContactEvaluationOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (o *Interactionstatsalert) String() string {\n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (s CreateModelOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateModelOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (o *Digitalcondition) String() string {\n\tj, _ := json.Marshal(o)\n\tstr, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n\treturn str\n}", "func (r RoomOccupancy) String() string {\n\tjr, _ := json.Marshal(r)\n\treturn string(jr)\n}", "func (d *Diagram) String() string { return toString(d) }", "func (o *Outboundroute) String() string {\n \n \n \n \n o.ClassificationTypes = []string{\"\"} \n \n \n o.ExternalTrunkBases = []Domainentityref{{}} \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (s CreateCodeRepositoryOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateActivationOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateBotOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateProjectOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateProjectOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateProjectOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateProjectOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s ResolutionTechniques) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateTrialComponentOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (c CourseCode) String() string {\n\tjc, _ := json.Marshal(c)\n\treturn string(jc)\n}", "func (p *Parms) String() string {\n\tout, _ := json.MarshalIndent(p, \"\", \"\\t\")\n\treturn string(out)\n}", "func (p polynomial) String() (str string) {\n\tfor _, m := range p.monomials {\n\t\tstr = str + \" \" + m.String() + \" +\"\n\t}\n\tstr = strings.TrimRight(str, \"+\")\n\treturn \"f(x) = \" + strings.TrimSpace(str)\n}", "func (s CreateThingOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (r *RUT) String() string {\n\treturn r.Format(DefaultFormatter)\n}", "func (s CreatePatchBaselineOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (o *Crossplatformpolicycreate) String() string {\n \n \n \n \n \n \n \n \n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (s BotVersionLocaleDetails) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s LifeCycleLastTestInitiated) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s DeleteMultiplexProgramOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s GetObjectOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s LifeCycleLastTestReverted) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateDocumentOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateComponentOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateIntegrationOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (o *Commonruleconditions) String() string {\n o.Clauses = []Commonruleconditions{{}} \n o.Predicates = []Commonrulepredicate{{}} \n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (t Test1s) String() string {\n\tjt, _ := json.Marshal(t)\n\treturn string(jt)\n}", "func (o *Directrouting) String() string {\n\tj, _ := json.Marshal(o)\n\tstr, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n\treturn str\n}", "func (s CreateContactFlowOutput) String() string {\n\treturn awsutil.Prettify(s)\n}" ]
[ "0.7215058", "0.7215058", "0.72000957", "0.7199919", "0.7177383", "0.7166947", "0.7118059", "0.7087492", "0.70870787", "0.7079275", "0.70782894", "0.7067719", "0.7031721", "0.70269966", "0.7026298", "0.70251423", "0.7021565", "0.70164025", "0.701059", "0.7010184", "0.70022964", "0.6997043", "0.6996532", "0.6992619", "0.69909185", "0.69900763", "0.69862556", "0.6985364", "0.6975378", "0.69738907", "0.69624275", "0.6961772", "0.69603413", "0.69507927", "0.6946753", "0.69460964", "0.69460964", "0.6944943", "0.694029", "0.69369334", "0.69332623", "0.69287163", "0.692656", "0.6924643", "0.69216746", "0.69213074", "0.69181406", "0.6917802", "0.6911058", "0.69104654", "0.6909528", "0.690845", "0.690454", "0.6899065", "0.6896141", "0.6894107", "0.6894107", "0.6894107", "0.68921995", "0.68920684", "0.689124", "0.68893504", "0.688871", "0.6884391", "0.6882336", "0.6880731", "0.68767136", "0.68766147", "0.68766147", "0.68751997", "0.68735147", "0.68734384", "0.68731403", "0.6871602", "0.6869421", "0.68684965", "0.68677104", "0.68677104", "0.68677104", "0.68677104", "0.68673396", "0.68622416", "0.6862084", "0.6859391", "0.6857645", "0.6853781", "0.68523467", "0.6851581", "0.6846037", "0.6844023", "0.6843859", "0.68434954", "0.68419206", "0.68416274", "0.684033", "0.6839815", "0.68363225", "0.6835165", "0.68334675", "0.68327725", "0.6832733" ]
0.0
-1
Validate inspects the fields of the type to determine if they are valid.
func (s *DescribeUserHierarchyStructureInput) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "DescribeUserHierarchyStructureInput"} if s.InstanceId == nil { invalidParams.Add(aws.NewErrParamRequired("InstanceId")) } if s.InstanceId != nil && len(*s.InstanceId) < 1 { invalidParams.Add(aws.NewErrParamMinLen("InstanceId", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (f *InfoField) Validate() error {\n\tif err := f.BWCls.Validate(); err != nil {\n\t\treturn err\n\t}\n\tif err := f.RLC.Validate(); err != nil {\n\t\treturn err\n\t}\n\tif err := f.Idx.Validate(); err != nil {\n\t\treturn err\n\t}\n\tif err := f.PathType.Validate(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (f FieldSpec) Validate() error {\n\tif f.Name == \"\" {\n\t\treturn errors.New(\"Field name required\")\n\t}\n\n\tif f.Type == \"\" {\n\t\treturn errors.New(\"Field type required\")\n\t}\n\n\treturn nil\n}", "func (p *Pass) FieldsValid() bool {\n\tfmt.Printf(\"validating: \")\n\tvalid := true\n\tfor k, v := range *p {\n\t\tfmt.Printf(\"%v...\", k)\n\t\tv := isFieldValid(k, v)\n\t\tvalid = valid && v\n\t\tif v {\n\t\t\tfmt.Printf(\"VALID \")\n\t\t} else {\n\t\t\tfmt.Printf(\"INVALID \")\n\t\t}\n\t}\n\n\tfmt.Println(\"\")\n\treturn valid\n}", "func (m Type) Validate(formats strfmt.Registry) error {\n\treturn nil\n}", "func (m *TestFieldsEx2) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateFieldType(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateProjectID(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func Validate(instance interface{}) string {\n\tval := unwrap(reflect.ValueOf(instance))\n\ttyp := val.Type()\n\n\tif typ.Kind() != reflect.Struct {\n\t\tcore.DefaultLogger.Panic(\"The provided instance is not a struct\")\n\t}\n\n\tvar result []string\n\n\tfor i := 0; i < typ.NumField(); i++ {\n\t\tfield := typ.Field(i)\n\t\tfieldTag := field.Tag\n\t\tif len(fieldTag) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tfieldVal := val.Field(i)\n\t\tfieldKind := fieldVal.Kind()\n\t\tif !fieldVal.CanInterface() || fieldKind == reflect.Invalid {\n\t\t\tcontinue\n\t\t}\n\n\t\tvar toEval []evalContext\n\t\tvar requiredCtx *evalContext\n\n\t\tfor _, v := range validators {\n\t\t\tif param, found := fieldTag.Lookup(v.key); found {\n\t\t\t\tctx := evalContext{validator: v, param: param}\n\n\t\t\t\tif v.key == required.key {\n\t\t\t\t\trequiredCtx = &ctx\n\t\t\t\t} else {\n\t\t\t\t\ttoEval = append(toEval, ctx)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif len(toEval) == 0 && requiredCtx == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tif requiredCtx == nil {\n\t\t\trequiredCtx = &evalContext{validator: required, param: \"true\"}\n\t\t}\n\n\t\tvar errors []string\n\t\teval := func(ctx evalContext) bool {\n\t\t\tif err := ctx.validator.fn(fieldVal, ctx.param); len(err) > 0 {\n\t\t\t\terrors = append(errors, err)\n\t\t\t\treturn false\n\t\t\t}\n\t\t\treturn true\n\t\t}\n\n\t\tif eval(*requiredCtx) {\n\t\t\tfor _, ctx := range toEval {\n\t\t\t\teval(ctx)\n\t\t\t}\n\t\t}\n\n\t\tif len(errors) > 0 {\n\t\t\tresult = append(result, fmt.Sprintf(\"%s: %s\", field.Name, strings.Join(errors, \", \")))\n\t\t}\n\t}\n\n\treturn strings.Join(result, \"; \")\n}", "func (info *structInfo) fieldValid(i int, t reflect.Type) bool {\n\treturn info.field(i).isValid(i, t)\n}", "func (v *ClassValue) Valid() bool {\n\tfor _, f := range v.Fields {\n\t\tif !f.Valid() {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (self *StructFieldDef) Validate() error {\n\tif self.Name == \"\" {\n\t\treturn fmt.Errorf(\"StructFieldDef.name is missing but is a required field\")\n\t} else {\n\t\tval := Validate(RdlSchema(), \"Identifier\", self.Name)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"StructFieldDef.name does not contain a valid Identifier (%v)\", val.Error)\n\t\t}\n\t}\n\tif self.Type == \"\" {\n\t\treturn fmt.Errorf(\"StructFieldDef.type is missing but is a required field\")\n\t} else {\n\t\tval := Validate(RdlSchema(), \"TypeRef\", self.Type)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"StructFieldDef.type does not contain a valid TypeRef (%v)\", val.Error)\n\t\t}\n\t}\n\tif self.Comment != \"\" {\n\t\tval := Validate(RdlSchema(), \"String\", self.Comment)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"StructFieldDef.comment does not contain a valid String (%v)\", val.Error)\n\t\t}\n\t}\n\tif self.Items != \"\" {\n\t\tval := Validate(RdlSchema(), \"TypeRef\", self.Items)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"StructFieldDef.items does not contain a valid TypeRef (%v)\", val.Error)\n\t\t}\n\t}\n\tif self.Keys != \"\" {\n\t\tval := Validate(RdlSchema(), \"TypeRef\", self.Keys)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"StructFieldDef.keys does not contain a valid TypeRef (%v)\", val.Error)\n\t\t}\n\t}\n\treturn nil\n}", "func (v *Validator) Validate(data interface{}) (bool, []string, error) {\n\t//validate and check for any errors, reading explicit validations errors and returning\n\t//a list of fields that failed or the error\n\terr := v.Validator.Struct(data)\n\tif err != nil {\n\t\tvalidationErrs, ok := err.(validator.ValidationErrors)\n\t\tif !ok {\n\t\t\treturn false, nil, errors.Wrap(err, \"validate\")\n\t\t}\n\t\tfields := make([]string, 0)\n\t\tfor _, validationErr := range validationErrs {\n\t\t\tfields = append(fields, validationErr.Field())\n\t\t}\n\t\treturn false, fields, nil\n\t}\n\treturn true, nil, nil\n}", "func validateFields(req *logical.Request, data *framework.FieldData) error {\n\tvar unknownFields []string\n\tfor k := range req.Data {\n\t\tif _, ok := data.Schema[k]; !ok {\n\t\t\tunknownFields = append(unknownFields, k)\n\t\t}\n\t}\n\n\tif len(unknownFields) > 0 {\n\t\t// Sort since this is a human error\n\t\tsort.Strings(unknownFields)\n\n\t\treturn fmt.Errorf(\"unknown fields: %q\", unknownFields)\n\t}\n\n\treturn nil\n}", "func validateFields(req *logical.Request, data *framework.FieldData) error {\n\tvar unknownFields []string\n\tfor k := range req.Data {\n\t\tif _, ok := data.Schema[k]; !ok {\n\t\t\tunknownFields = append(unknownFields, k)\n\t\t}\n\t}\n\n\tif len(unknownFields) > 0 {\n\t\t// Sort since this is a human error\n\t\tsort.Strings(unknownFields)\n\n\t\treturn fmt.Errorf(\"unknown fields: %q\", unknownFields)\n\t}\n\n\treturn nil\n}", "func (s *RecordSchema) Validate(v reflect.Value) bool {\n\tv = dereference(v)\n\tif v.Kind() != reflect.Struct || !v.CanAddr() || !v.CanInterface() {\n\t\treturn false\n\t}\n\trec, ok := v.Interface().(GenericRecord)\n\tif !ok {\n\t\t// This is not a generic record and is likely a specific record. Hence\n\t\t// use the basic check.\n\t\treturn v.Kind() == reflect.Struct\n\t}\n\n\tfieldCount := 0\n\tfor key, val := range rec.fields {\n\t\tfor idx := range s.Fields {\n\t\t\t// key.Name must have rs.Fields[idx].Name as a suffix\n\t\t\tif len(s.Fields[idx].Name) <= len(key) {\n\t\t\t\tlhs := key[len(key)-len(s.Fields[idx].Name):]\n\t\t\t\tif lhs == s.Fields[idx].Name {\n\t\t\t\t\tif !s.Fields[idx].Type.Validate(reflect.ValueOf(val)) {\n\t\t\t\t\t\treturn false\n\t\t\t\t\t}\n\t\t\t\t\tfieldCount++\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// All of the fields set must be accounted for in the union.\n\tif fieldCount < len(rec.fields) {\n\t\treturn false\n\t}\n\n\treturn true\n}", "func (s StructSpec) Validate() error {\n\tfor _, f := range s.Fields {\n\t\terr := f.Validate()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (cv *CustomValidator) Validate(i interface{}) error {\n\terrorRes, err := cv.Validator.Struct(i).(validator.ValidationErrors)\n\tif !err {\n\t\treturn nil\n\t}\n\terrorFields := []string{}\n\tfor _, k := range errorRes {\n\t\terrorFields = append(errorFields, k.StructField())\n\t}\n\tif len(errorFields) == 1 {\n\t\treturn errors.New(strings.Join(errorFields, \", \") + \" field is invalid or missing.\")\n\t}\n\treturn errors.New(strings.Join(errorFields, \", \") + \" fields are invalid or missing.\")\n}", "func Validate(v interface{}) error {\n\n\t// returns nil or ValidationErrors ( []FieldError )\n\terr := val.Struct(v)\n\tif err != nil {\n\n\t\t// this check is only needed when your code could produce\n\t\t// an invalid value for validation such as interface with nil\n\t\t// value most including myself do not usually have code like this.\n\t\tif _, ok := err.(*validator.InvalidValidationError); ok {\n\t\t\treturn nil\n\t\t}\n\n\t\treturn err\n\t}\n\treturn nil\n}", "func ValidateFields(model interface{}) error {\n\terr := validator.Validate(model)\n\tif err != nil {\n\t\terrs, ok := err.(validator.ErrorMap)\n\t\tif ok {\n\t\t\tfor f, _ := range errs {\n\t\t\t\treturn errors.New(ecodes.ValidateField, constant.ValidateFieldErr+\"-\"+f)\n\t\t\t}\n\t\t} else {\n\t\t\treturn errors.New(ecodes.ValidationUnknown, constant.ValidationUnknownErr)\n\t\t}\n\t}\n\n\treturn nil\n}", "func (v *Validator) ValidateFields(input map[string]string) {\n\tfor field, value := range input {\n\t\t_, found := find(requiredFields, field)\n\t\tif !found {\n\t\t\tv.errors[\"errors\"] = append(v.errors[field], fmt.Sprintf(\"%+v is not valid, check docs for valid fields\", field))\n\t\t}\n\t\t(v.model)[field] = value\n\t}\n}", "func (self *TypeDef) Validate() error {\n\tif self.Type == \"\" {\n\t\treturn fmt.Errorf(\"TypeDef.type is missing but is a required field\")\n\t} else {\n\t\tval := Validate(RdlSchema(), \"TypeRef\", self.Type)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"TypeDef.type does not contain a valid TypeRef (%v)\", val.Error)\n\t\t}\n\t}\n\tif self.Name == \"\" {\n\t\treturn fmt.Errorf(\"TypeDef.name is missing but is a required field\")\n\t} else {\n\t\tval := Validate(RdlSchema(), \"TypeName\", self.Name)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"TypeDef.name does not contain a valid TypeName (%v)\", val.Error)\n\t\t}\n\t}\n\tif self.Comment != \"\" {\n\t\tval := Validate(RdlSchema(), \"String\", self.Comment)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"TypeDef.comment does not contain a valid String (%v)\", val.Error)\n\t\t}\n\t}\n\treturn nil\n}", "func (mt *EasypostFieldObject) Validate() (err error) {\n\tif mt.Key == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`response`, \"key\"))\n\t}\n\tif mt.Value == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`response`, \"value\"))\n\t}\n\tif mt.Visibility == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`response`, \"visibility\"))\n\t}\n\tif mt.Label == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`response`, \"label\"))\n\t}\n\n\treturn\n}", "func (ti TypeInfo) Validate() error {\n\tif len(ti.Type) == 0 {\n\t\treturn errors.Wrap(ErrValidatingData, \"TypeInfo requires a type\")\n\t}\n\treturn nil\n}", "func ValidateStructFields(in interface{}, requiredFieldIDs []string) (err error) {\n\tvar inAsMap map[string]interface{}\n\ttemp, err := json.Marshal(in)\n\tif err != nil {\n\t\treturn errors.New(\"error validating input struct\")\n\t}\n\terr = json.Unmarshal(temp, &inAsMap)\n\tif err != nil {\n\t\treturn errors.New(\"error validating input struct\")\n\t}\n\n\tfor _, requiredFieldID := range requiredFieldIDs {\n\t\t// Make sure the field is in the data.\n\t\tif val, ok := inAsMap[requiredFieldID]; !ok || len(fmt.Sprintf(\"%v\", val)) == 0 {\n\t\t\treturn errors.New(\"required input field \" + requiredFieldID + \" not specified\")\n\t\t}\n\t}\n\n\treturn nil\n}", "func Validate(value interface{}) error {\n\tv := reflect.Indirect(reflect.ValueOf(value))\n\tt := v.Type()\n\n\t// Look for an IsValid method on value. To check that this IsValid method\n\t// exists, we need to retrieve it with MethodByName, which returns a\n\t// reflect.Value. This reflect.Value, m, has a method that is called\n\t// IsValid as well, which tells us whether v actually represents the\n\t// function we're looking for. But they're two completely different IsValid\n\t// methods. Yes, this is confusing.\n\tm := reflect.ValueOf(value).MethodByName(\"IsValid\")\n\tif m.IsValid() {\n\t\te := m.Call([]reflect.Value{})\n\t\terr, ok := e[0].Interface().(error)\n\t\tif ok && err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// For non-struct values, we cannot do much, as there's no associated tags\n\t// to lookup to decide how to validate, so we have to assume they're valid.\n\tif t.Kind() != reflect.Struct {\n\t\treturn nil\n\t}\n\n\t// For struct values, iterate through the fields and use the type of field\n\t// along with its validate tags to decide next steps\n\tfor i := 0; i < v.NumField(); i++ {\n\t\tfield := v.Field(i)\n\n\t\tswitch field.Type().Kind() {\n\t\tcase reflect.Struct:\n\t\t\tdv := field.Interface()\n\t\t\tif err := Validate(dv); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase reflect.Slice:\n\t\t\tdv := reflect.ValueOf(field.Interface())\n\t\t\tif tag, ok := t.Field(i).Tag.Lookup(\"validate\"); ok {\n\t\t\t\tif err := validate(tag, t.Field(i).Name, v, v.Field(i)); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor j := 0; j < dv.Len(); j++ {\n\t\t\t\tif err := Validate(dv.Index(j).Interface()); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\tcase reflect.Map:\n\t\t\tif tag, ok := t.Field(i).Tag.Lookup(\"validate\"); ok {\n\t\t\t\tif err := validate(tag, t.Field(i).Name, v, v.Field(i)); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\tcase reflect.Bool, reflect.Int, reflect.Int64, reflect.Float64, reflect.String:\n\t\t\ttag, ok := t.Field(i).Tag.Lookup(\"validate\")\n\t\t\tif !ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err := validate(tag, t.Field(i).Name, v, v.Field(i)); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase reflect.Chan:\n\t\t\treturn nil\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"unimplemented struct field type: %s\", t.Field(i).Name)\n\t\t}\n\t}\n\treturn nil\n}", "func (m *Type1) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (u *Usecase) validFields(d *Device) error {\n\tif d.Name == \"\" {\n\t\treturn &InvalidError{\"attribute `Name` must not be empty\"}\n\t}\n\n\tif d.User == 0 {\n\t\treturn &InvalidError{\"invalid user\"}\n\t}\n\n\treturn nil\n}", "func Validate(schema interface{}, errors []map[string]interface{}) {\n\t/**\n\t * create validator instance\n\t */\n\tvalidate := validator.New()\n\n\tif err := validate.Struct(schema); err != nil {\n\t\tif _, ok := err.(*validator.InvalidValidationError); ok {\n\t\t\terrors = append(errors, map[string]interface{}{\n\t\t\t\t\"message\": fmt.Sprint(err), \"flag\": \"INVALID_BODY\"},\n\t\t\t)\n\t\t}\n\n\t\tfor _, err := range err.(validator.ValidationErrors) {\n\t\t\terrors = append(errors, map[string]interface{}{\n\t\t\t\t\"message\": fmt.Sprint(err), \"flag\": \"INVALID_BODY\"},\n\t\t\t)\n\t\t}\n\t\texception.BadRequest(\"Validation error\", errors)\n\t}\n\tif errors != nil {\n\t\texception.BadRequest(\"Validation error\", errors)\n\t}\n}", "func (s *FieldStatsService) Validate() error {\n\tvar invalid []string\n\tif s.level != \"\" && (s.level != FieldStatsIndicesLevel && s.level != FieldStatsClusterLevel) {\n\t\tinvalid = append(invalid, \"Level\")\n\t}\n\tif len(invalid) != 0 {\n\t\treturn fmt.Errorf(\"missing or invalid required fields: %v\", invalid)\n\t}\n\treturn nil\n}", "func (t Type) Validate() error {\n\tswitch t {\n\tcase git:\n\t\treturn nil\n\tcase nop:\n\t\treturn nil\n\tdefault:\n\t\treturn ErrInvalidType\n\t}\n}", "func (time Time) Validate() bool {\n\tret := true\n\tif ret == true && time.hours != (Hours{}) {\n\t\tret = time.hours.Validate()\n\t}\n\n\tif ret == true && time.minutes != (Minutes{}) {\n\t\tret = time.minutes.Validate()\n\t}\n\n\tif ret == true && time.seconds != (Seconds{}) {\n\t\tret = time.seconds.Validate()\n\t}\n\n\tif ret == true && time.delay != (Delay{}) {\n\t\tret = time.delay.Validate()\n\t}\n\n\tif ret != true {\n\t\tlog.Println(\"Failed to validate time '\" + time.value + \"'\")\n\t}\n\treturn ret\n}", "func (p *Publication) IsValidFields() error {\n\tif p.Content != \"\" {\n\t\treturn nil\n\t}\n\treturn errorstatus.ErrorBadInfo\n\n}", "func (a Relayer) Validate() error {\n\treturn validation.ValidateStruct(&a,\n\t\tvalidation.Field(&a.Address, validation.Required),\n\t)\n}", "func (builder *Builder) ValidateFields() error {\n\tvmImageRefFields := []string{\"ImageSku\", \"ImageVersion\"}\n\tcustomVMIMageRefFields := []string{\"Image\", \"ImageResourceGroup\", \"ImageStorageAccount\", \"ImageContainer\"}\n\n\tif !builder.hasMarketplaceVMImageRef() && !builder.hasCustomVMIMageRef() {\n\t\treturn fmt.Errorf(\n\t\t\t\"missing fields: you must provide values for either %s fields or %s fields\",\n\t\t\tstrings.Join(vmImageRefFields, \", \"),\n\t\t\tstrings.Join(customVMIMageRefFields, \", \"),\n\t\t)\n\t}\n\n\tif builder.hasMarketplaceVMImageRef() && builder.hasCustomVMIMageRef() {\n\t\treturn fmt.Errorf(\n\t\t\t\"confilicting fields: you must provide values for either %s fields or %s fields\",\n\t\t\tstrings.Join(vmImageRefFields, \", \"),\n\t\t\tstrings.Join(customVMIMageRefFields, \", \"),\n\t\t)\n\t}\n\n\treturn nil\n}", "func (self *NumberTypeDef) Validate() error {\n\tif self.Type == \"\" {\n\t\treturn fmt.Errorf(\"NumberTypeDef.type is missing but is a required field\")\n\t} else {\n\t\tval := Validate(RdlSchema(), \"TypeRef\", self.Type)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"NumberTypeDef.type does not contain a valid TypeRef (%v)\", val.Error)\n\t\t}\n\t}\n\tif self.Name == \"\" {\n\t\treturn fmt.Errorf(\"NumberTypeDef.name is missing but is a required field\")\n\t} else {\n\t\tval := Validate(RdlSchema(), \"TypeName\", self.Name)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"NumberTypeDef.name does not contain a valid TypeName (%v)\", val.Error)\n\t\t}\n\t}\n\tif self.Comment != \"\" {\n\t\tval := Validate(RdlSchema(), \"String\", self.Comment)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"NumberTypeDef.comment does not contain a valid String (%v)\", val.Error)\n\t\t}\n\t}\n\treturn nil\n}", "func (mt *EasypostCarrierTypes) Validate() (err error) {\n\tif mt.Type == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`response`, \"type\"))\n\t}\n\tif mt.Object == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`response`, \"object\"))\n\t}\n\tif mt.Fields == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`response`, \"fields\"))\n\t}\n\n\tif ok := goa.ValidatePattern(`^CarrierType$`, mt.Object); !ok {\n\t\terr = goa.MergeErrors(err, goa.InvalidPatternError(`response.object`, mt.Object, `^CarrierType$`))\n\t}\n\treturn\n}", "func (t *Transform) Validate() *field.Error {\n\tswitch t.Type {\n\tcase TransformTypeMath:\n\t\tif t.Math == nil {\n\t\t\treturn field.Required(field.NewPath(\"math\"), \"given transform type math requires configuration\")\n\t\t}\n\t\treturn verrors.WrapFieldError(t.Math.Validate(), field.NewPath(\"math\"))\n\tcase TransformTypeMap:\n\t\tif t.Map == nil {\n\t\t\treturn field.Required(field.NewPath(\"map\"), \"given transform type map requires configuration\")\n\t\t}\n\t\treturn verrors.WrapFieldError(t.Map.Validate(), field.NewPath(\"map\"))\n\tcase TransformTypeMatch:\n\t\tif t.Match == nil {\n\t\t\treturn field.Required(field.NewPath(\"match\"), \"given transform type match requires configuration\")\n\t\t}\n\t\treturn verrors.WrapFieldError(t.Match.Validate(), field.NewPath(\"match\"))\n\tcase TransformTypeString:\n\t\tif t.String == nil {\n\t\t\treturn field.Required(field.NewPath(\"string\"), \"given transform type string requires configuration\")\n\t\t}\n\t\treturn verrors.WrapFieldError(t.String.Validate(), field.NewPath(\"string\"))\n\tcase TransformTypeConvert:\n\t\tif t.Convert == nil {\n\t\t\treturn field.Required(field.NewPath(\"convert\"), \"given transform type convert requires configuration\")\n\t\t}\n\t\tif err := t.Convert.Validate(); err != nil {\n\t\t\treturn verrors.WrapFieldError(err, field.NewPath(\"convert\"))\n\t\t}\n\tdefault:\n\t\t// Should never happen\n\t\treturn field.Invalid(field.NewPath(\"type\"), t.Type, \"unknown transform type\")\n\t}\n\n\treturn nil\n}", "func (strategy UpdateScatterStrategy) FieldsValidation() error {\n\tif len(strategy) == 0 {\n\t\treturn nil\n\t}\n\n\tm := make(map[string]struct{}, len(strategy))\n\tfor _, term := range strategy {\n\t\tif term.Key == \"\" {\n\t\t\treturn fmt.Errorf(\"key should not be empty\")\n\t\t}\n\t\tid := term.Key + \":\" + term.Value\n\t\tif _, ok := m[id]; !ok {\n\t\t\tm[id] = struct{}{}\n\t\t} else {\n\t\t\treturn fmt.Errorf(\"duplicated key=%v value=%v\", term.Key, term.Value)\n\t\t}\n\t}\n\n\treturn nil\n}", "func (self *StructTypeDef) Validate() error {\n\tif self.Type == \"\" {\n\t\treturn fmt.Errorf(\"StructTypeDef.type is missing but is a required field\")\n\t} else {\n\t\tval := Validate(RdlSchema(), \"TypeRef\", self.Type)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"StructTypeDef.type does not contain a valid TypeRef (%v)\", val.Error)\n\t\t}\n\t}\n\tif self.Name == \"\" {\n\t\treturn fmt.Errorf(\"StructTypeDef.name is missing but is a required field\")\n\t} else {\n\t\tval := Validate(RdlSchema(), \"TypeName\", self.Name)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"StructTypeDef.name does not contain a valid TypeName (%v)\", val.Error)\n\t\t}\n\t}\n\tif self.Comment != \"\" {\n\t\tval := Validate(RdlSchema(), \"String\", self.Comment)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"StructTypeDef.comment does not contain a valid String (%v)\", val.Error)\n\t\t}\n\t}\n\tif self.Fields == nil {\n\t\treturn fmt.Errorf(\"StructTypeDef: Missing required field: fields\")\n\t}\n\treturn nil\n}", "func (self *MapTypeDef) Validate() error {\n\tif self.Type == \"\" {\n\t\treturn fmt.Errorf(\"MapTypeDef.type is missing but is a required field\")\n\t} else {\n\t\tval := Validate(RdlSchema(), \"TypeRef\", self.Type)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"MapTypeDef.type does not contain a valid TypeRef (%v)\", val.Error)\n\t\t}\n\t}\n\tif self.Name == \"\" {\n\t\treturn fmt.Errorf(\"MapTypeDef.name is missing but is a required field\")\n\t} else {\n\t\tval := Validate(RdlSchema(), \"TypeName\", self.Name)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"MapTypeDef.name does not contain a valid TypeName (%v)\", val.Error)\n\t\t}\n\t}\n\tif self.Comment != \"\" {\n\t\tval := Validate(RdlSchema(), \"String\", self.Comment)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"MapTypeDef.comment does not contain a valid String (%v)\", val.Error)\n\t\t}\n\t}\n\tif self.Keys == \"\" {\n\t\treturn fmt.Errorf(\"MapTypeDef.keys is missing but is a required field\")\n\t} else {\n\t\tval := Validate(RdlSchema(), \"TypeRef\", self.Keys)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"MapTypeDef.keys does not contain a valid TypeRef (%v)\", val.Error)\n\t\t}\n\t}\n\tif self.Items == \"\" {\n\t\treturn fmt.Errorf(\"MapTypeDef.items is missing but is a required field\")\n\t} else {\n\t\tval := Validate(RdlSchema(), \"TypeRef\", self.Items)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"MapTypeDef.items does not contain a valid TypeRef (%v)\", val.Error)\n\t\t}\n\t}\n\treturn nil\n}", "func (s StructInCustom) Validate() []string {\n\tvar errs []string\n\tif s.Name == \"\" {\n\t\terrs = append(errs, \"name::is_required\")\n\t}\n\n\treturn errs\n}", "func (cv Validator) Validate(i interface{}) error {\n\treturn cv.Validator.Struct(i)\n}", "func Validate(v interface{}) (error, bool) {\n\tresult, err := govalidator.ValidateStruct(v)\n\tif err != nil {\n\t\tlog.Println(\"Invalid data\", err)\n\t}\n\treturn err, result\n}", "func validateFieldDurations(fl validator.FieldLevel) bool {\n\tv := fl.Field().Bool()\n\tif v {\n\t\t//read the parameter and extract the other fields that were specified\n\t\tparam := fl.Param()\n\t\tfields := strings.Fields(param)\n\t\tfor _, field := range fields {\n\t\t\t//check if the field is set\n\t\t\tstructField, _, _, ok := fl.GetStructFieldOKAdvanced2(fl.Parent(), field)\n\t\t\tif !ok || structField.IsZero() {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t}\n\treturn true\n}", "func (h *HazardType) Validate(tx *pop.Connection) (*validate.Errors, error) {\n\terrors := validate.Validate(\n\t\t&validators.StringIsPresent{Name: \"Label\", Field: h.Label, Message: \"A label is required.\"},\n\t\t&validators.StringIsPresent{Name: \"Description\", Field: h.Description, Message: \"Please provide a brief description.\"},\n\t)\n\n\treturn errors, nil\n}", "func (tS *testAInfo) Validate(msg actor.Msg) bool {\n\tswitch m := msg[0].(type) {\n\tcase int:\n\t\tif m > 0 && m < 10 {\n\t\t\treturn true\n\t\t}\n\tdefault:\n\t\tfor _, datum := range tS.allowed {\n\t\t\tif reflect.TypeOf(msg[0]) ==\n\t\t\t\treflect.TypeOf(datum) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\t// Does not match a valid type\n\treturn false\n}", "func (ut *RegisterPayload) Validate() (err error) {\n\tif ut.Email == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`type`, \"email\"))\n\t}\n\tif ut.Password == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`type`, \"password\"))\n\t}\n\tif ut.FirstName == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`type`, \"first_name\"))\n\t}\n\tif ut.LastName == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`type`, \"last_name\"))\n\t}\n\tif err2 := goa.ValidateFormat(goa.FormatEmail, ut.Email); err2 != nil {\n\t\terr = goa.MergeErrors(err, goa.InvalidFormatError(`type.email`, ut.Email, goa.FormatEmail, err2))\n\t}\n\tif utf8.RuneCountInString(ut.Email) < 6 {\n\t\terr = goa.MergeErrors(err, goa.InvalidLengthError(`type.email`, ut.Email, utf8.RuneCountInString(ut.Email), 6, true))\n\t}\n\tif utf8.RuneCountInString(ut.Email) > 150 {\n\t\terr = goa.MergeErrors(err, goa.InvalidLengthError(`type.email`, ut.Email, utf8.RuneCountInString(ut.Email), 150, false))\n\t}\n\tif utf8.RuneCountInString(ut.FirstName) < 1 {\n\t\terr = goa.MergeErrors(err, goa.InvalidLengthError(`type.first_name`, ut.FirstName, utf8.RuneCountInString(ut.FirstName), 1, true))\n\t}\n\tif utf8.RuneCountInString(ut.FirstName) > 200 {\n\t\terr = goa.MergeErrors(err, goa.InvalidLengthError(`type.first_name`, ut.FirstName, utf8.RuneCountInString(ut.FirstName), 200, false))\n\t}\n\tif utf8.RuneCountInString(ut.LastName) < 1 {\n\t\terr = goa.MergeErrors(err, goa.InvalidLengthError(`type.last_name`, ut.LastName, utf8.RuneCountInString(ut.LastName), 1, true))\n\t}\n\tif utf8.RuneCountInString(ut.LastName) > 200 {\n\t\terr = goa.MergeErrors(err, goa.InvalidLengthError(`type.last_name`, ut.LastName, utf8.RuneCountInString(ut.LastName), 200, false))\n\t}\n\tif utf8.RuneCountInString(ut.Password) < 5 {\n\t\terr = goa.MergeErrors(err, goa.InvalidLengthError(`type.password`, ut.Password, utf8.RuneCountInString(ut.Password), 5, true))\n\t}\n\tif utf8.RuneCountInString(ut.Password) > 100 {\n\t\terr = goa.MergeErrors(err, goa.InvalidLengthError(`type.password`, ut.Password, utf8.RuneCountInString(ut.Password), 100, false))\n\t}\n\treturn\n}", "func (u Phone) Validate() error {\n\treturn nil\n\t// return validation.ValidateStruct(&u,\n\t// \tvalidation.Field(&u.Name, validation.Required),\n\t// \tvalidation.Field(&u.Created, validation.Required))\n}", "func (r *InfoReq) Validate() error {\n\treturn validate.Struct(r)\n}", "func (r *RouteSpecFields) Validate(ctx context.Context) (errs *apis.FieldError) {\n\n\tif r.Domain == \"\" {\n\t\terrs = errs.Also(apis.ErrMissingField(\"domain\"))\n\t}\n\n\tif r.Hostname == \"www\" {\n\t\terrs = errs.Also(apis.ErrInvalidValue(\"hostname\", r.Hostname))\n\t}\n\n\tif _, err := BuildPathRegexp(r.Path); err != nil {\n\t\terrs = errs.Also(apis.ErrInvalidValue(\"path\", r.Path))\n\t}\n\n\treturn errs\n}", "func (mt *EasypostScanform) Validate() (err error) {\n\tif mt.Address != nil {\n\t\tif err2 := mt.Address.Validate(); err2 != nil {\n\t\t\terr = goa.MergeErrors(err, err2)\n\t\t}\n\t}\n\tif mt.ID != nil {\n\t\tif ok := goa.ValidatePattern(`^sf_`, *mt.ID); !ok {\n\t\t\terr = goa.MergeErrors(err, goa.InvalidPatternError(`response.id`, *mt.ID, `^sf_`))\n\t\t}\n\t}\n\tif ok := goa.ValidatePattern(`^ScanForm$`, mt.Object); !ok {\n\t\terr = goa.MergeErrors(err, goa.InvalidPatternError(`response.object`, mt.Object, `^ScanForm$`))\n\t}\n\tif mt.Status != nil {\n\t\tif !(*mt.Status == \"creating\" || *mt.Status == \"created\" || *mt.Status == \"failed\") {\n\t\t\terr = goa.MergeErrors(err, goa.InvalidEnumValueError(`response.status`, *mt.Status, []interface{}{\"creating\", \"created\", \"failed\"}))\n\t\t}\n\t}\n\treturn\n}", "func Validate(schema interface{}) {\n\tvalidate := validator.New()\n\n\tif err := validate.Struct(schema); err != nil {\n\t\tif _, ok := err.(*validator.InvalidValidationError); ok {\n\t\t\texception.BadRequest(fmt.Sprint(err), \"INVALID_BODY\")\n\t\t}\n\n\t\tfor _, err := range err.(validator.ValidationErrors) {\n\t\t\texception.BadRequest(fmt.Sprint(err), \"INVALID_BODY\")\n\t\t}\n\t}\n}", "func (v *Validation) Validate(i interface{}) ValidationErrors {\n\terrs := v.validate.Struct(i)\n\tif errs == nil {\n\t\treturn nil\n\t}\n\n\tvar returnErrs ValidationErrors\n\tfor _, err := range errs.(validator.ValidationErrors) {\n\t\t// cast the FieldError into our ValidationError and append to the slice\n\t\tve := ValidationError{err.(validator.FieldError)}\n\t\treturnErrs = append(returnErrs, ve)\n\t}\n\treturn returnErrs\n}", "func (s *MemberDefinition) Validate() error {\n\tinvalidParams := request.ErrInvalidParams{Context: \"MemberDefinition\"}\n\tif s.CognitoMemberDefinition != nil {\n\t\tif err := s.CognitoMemberDefinition.Validate(); err != nil {\n\t\t\tinvalidParams.AddNested(\"CognitoMemberDefinition\", err.(request.ErrInvalidParams))\n\t\t}\n\t}\n\tif s.OidcMemberDefinition != nil {\n\t\tif err := s.OidcMemberDefinition.Validate(); err != nil {\n\t\t\tinvalidParams.AddNested(\"OidcMemberDefinition\", err.(request.ErrInvalidParams))\n\t\t}\n\t}\n\n\tif invalidParams.Len() > 0 {\n\t\treturn invalidParams\n\t}\n\treturn nil\n}", "func (m *MeasurementType) Validate(formats strfmt.Registry) error {\n\treturn nil\n}", "func (s *UnionSchema) Validate(v reflect.Value) bool {\n\tv = dereference(v)\n\tfor i := range s.Types {\n\t\tif t := s.Types[i]; t.Validate(v) {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func (u *User) Validate() *errors.RestError {\n\tif err := validators.ValidateStruct(u); err != nil {\n\t\treturn err\n\t}\n\t// Sanitize Structure\n\tu.FirstName = strings.TrimSpace(u.FirstName)\n\tu.LastName = strings.TrimSpace(u.LastName)\n\tu.Email = strings.TrimSpace(u.Email)\n\tu.Username = strings.TrimSpace(u.Username)\n\tu.Password = strings.TrimSpace(u.Password)\n\t// Check password\n\tif err := u.validatePassword(); err != nil {\n\t\treturn err\n\t}\n\t// Check uniqueness\n\tif err := u.checkUniqueness(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (DirectorBindStrategy) Validate(ctx request.Context, obj runtime.Object) field.ErrorList {\n\to := obj.(*bind.DirectorBind)\n\tlog.Printf(\"Validating fields for DirectorBind %s\\n\", o.Name)\n\terrors := field.ErrorList{}\n\t// perform validation here and add to errors using field.Invalid\n\treturn errors\n}", "func (t *Test1) Validate(tx *pop.Connection) (*validate.Errors, error) {\n\treturn validate.Validate(\n\t\t&validators.IntIsPresent{Field: t.Field1, Name: \"Field1\"},\n\t), nil\n}", "func (t ConvertTransform) Validate() *field.Error {\n\tif !t.GetFormat().IsValid() {\n\t\treturn field.Invalid(field.NewPath(\"format\"), t.Format, \"invalid format\")\n\t}\n\tif !t.ToType.IsValid() {\n\t\treturn field.Invalid(field.NewPath(\"toType\"), t.ToType, \"invalid type\")\n\t}\n\treturn nil\n}", "func (conf TypeConfig) Validate() error {\n\tfor _, rule := range conf.Rules {\n\t\td, ok := conf.Descriptors[rule.Descriptor]\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"rule %s=%s uses descriptor %s that does not exist\", rule.Name, rule.Value, rule.Descriptor)\n\t\t}\n\t\tif !hasField(rule.Name, d) {\n\t\t\treturn fmt.Errorf(\"rule %s refers to field %s that is not present in descriptor\", rule.Descriptor, rule.Name)\n\t\t}\n\n\t}\n\tfor name, desc := range conf.Descriptors {\n\t\tfor i, d := range desc {\n\t\t\tcol, ok := d.(map[string]interface{})\n\t\t\tif !ok {\n\t\t\t\treturn fmt.Errorf(\"descriptor %s has invalid structure in element %d\", name, i)\n\t\t\t}\n\t\t\tif col[\"name\"] == \"ts\" && col[\"type\"] != \"time\" {\n\t\t\t\treturn fmt.Errorf(\"descriptor %s has field ts with wrong type %s\", name, col[\"type\"])\n\t\t\t}\n\t\t}\n\t\tcol := desc[0].(map[string]interface{})\n\t\tif col[\"name\"] != \"_path\" {\n\t\t\treturn fmt.Errorf(\"descriptor %s does not have _path as first column\", name)\n\t\t}\n\t}\n\treturn nil\n}", "func (m APIStepType) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\t// value enum\n\tif err := m.validateAPIStepTypeEnum(\"\", \"body\", m); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (r *Version) Validate() error {\n\n\tR := *r\n\tif len(R) > 4 {\n\t\treturn errors.New(\"Version field may not contain more than 4 fields\")\n\t}\n\tif len(R) < 3 {\n\t\treturn errors.New(\"Version field must contain at least 3 fields\")\n\t}\n\tfor i, x := range R[:3] {\n\t\tn, ok := x.(int)\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"Version field %d is not an integer: %d\", i, n)\n\t\t}\n\t\tif n > 99 {\n\t\t\treturn fmt.Errorf(\"Version field %d value is over 99: %d\", i, n)\n\t\t}\n\t}\n\tif len(R) > 3 {\n\t\ts, ok := R[3].(string)\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"optional field 4 of Version is not a string\")\n\t\t} else {\n\t\t\tfor i, x := range s {\n\t\t\t\tif !(unicode.IsLetter(x) || unicode.IsDigit(x)) {\n\t\t\t\t\treturn fmt.Errorf(\n\t\t\t\t\t\t\"optional field 4 of Version contains other than letters and numbers at position %d: '%v,\", i, x)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func (s *ListAggregatedUtterancesInput) Validate() error {\n\tinvalidParams := request.ErrInvalidParams{Context: \"ListAggregatedUtterancesInput\"}\n\tif s.AggregationDuration == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"AggregationDuration\"))\n\t}\n\tif s.BotAliasId != nil && len(*s.BotAliasId) < 10 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"BotAliasId\", 10))\n\t}\n\tif s.BotId == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"BotId\"))\n\t}\n\tif s.BotId != nil && len(*s.BotId) < 10 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"BotId\", 10))\n\t}\n\tif s.BotVersion != nil && len(*s.BotVersion) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"BotVersion\", 1))\n\t}\n\tif s.Filters != nil && len(s.Filters) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"Filters\", 1))\n\t}\n\tif s.LocaleId == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"LocaleId\"))\n\t}\n\tif s.MaxResults != nil && *s.MaxResults < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinValue(\"MaxResults\", 1))\n\t}\n\tif s.AggregationDuration != nil {\n\t\tif err := s.AggregationDuration.Validate(); err != nil {\n\t\t\tinvalidParams.AddNested(\"AggregationDuration\", err.(request.ErrInvalidParams))\n\t\t}\n\t}\n\tif s.Filters != nil {\n\t\tfor i, v := range s.Filters {\n\t\t\tif v == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err := v.Validate(); err != nil {\n\t\t\t\tinvalidParams.AddNested(fmt.Sprintf(\"%s[%v]\", \"Filters\", i), err.(request.ErrInvalidParams))\n\t\t\t}\n\t\t}\n\t}\n\tif s.SortBy != nil {\n\t\tif err := s.SortBy.Validate(); err != nil {\n\t\t\tinvalidParams.AddNested(\"SortBy\", err.(request.ErrInvalidParams))\n\t\t}\n\t}\n\n\tif invalidParams.Len() > 0 {\n\t\treturn invalidParams\n\t}\n\treturn nil\n}", "func (s *CreateSlotTypeInput) Validate() error {\n\tinvalidParams := request.ErrInvalidParams{Context: \"CreateSlotTypeInput\"}\n\tif s.BotId == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"BotId\"))\n\t}\n\tif s.BotId != nil && len(*s.BotId) < 10 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"BotId\", 10))\n\t}\n\tif s.BotVersion == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"BotVersion\"))\n\t}\n\tif s.BotVersion != nil && len(*s.BotVersion) < 5 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"BotVersion\", 5))\n\t}\n\tif s.LocaleId == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"LocaleId\"))\n\t}\n\tif s.LocaleId != nil && len(*s.LocaleId) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"LocaleId\", 1))\n\t}\n\tif s.SlotTypeName == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"SlotTypeName\"))\n\t}\n\tif s.SlotTypeName != nil && len(*s.SlotTypeName) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"SlotTypeName\", 1))\n\t}\n\tif s.SlotTypeValues != nil && len(s.SlotTypeValues) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"SlotTypeValues\", 1))\n\t}\n\tif s.CompositeSlotTypeSetting != nil {\n\t\tif err := s.CompositeSlotTypeSetting.Validate(); err != nil {\n\t\t\tinvalidParams.AddNested(\"CompositeSlotTypeSetting\", err.(request.ErrInvalidParams))\n\t\t}\n\t}\n\tif s.ExternalSourceSetting != nil {\n\t\tif err := s.ExternalSourceSetting.Validate(); err != nil {\n\t\t\tinvalidParams.AddNested(\"ExternalSourceSetting\", err.(request.ErrInvalidParams))\n\t\t}\n\t}\n\tif s.SlotTypeValues != nil {\n\t\tfor i, v := range s.SlotTypeValues {\n\t\t\tif v == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err := v.Validate(); err != nil {\n\t\t\t\tinvalidParams.AddNested(fmt.Sprintf(\"%s[%v]\", \"SlotTypeValues\", i), err.(request.ErrInvalidParams))\n\t\t\t}\n\t\t}\n\t}\n\tif s.ValueSelectionSetting != nil {\n\t\tif err := s.ValueSelectionSetting.Validate(); err != nil {\n\t\t\tinvalidParams.AddNested(\"ValueSelectionSetting\", err.(request.ErrInvalidParams))\n\t\t}\n\t}\n\n\tif invalidParams.Len() > 0 {\n\t\treturn invalidParams\n\t}\n\treturn nil\n}", "func (s *OrderBy) Validate() error {\n\tinvalidParams := request.ErrInvalidParams{Context: \"OrderBy\"}\n\tif s.PropertyName == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"PropertyName\"))\n\t}\n\tif s.PropertyName != nil && len(*s.PropertyName) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"PropertyName\", 1))\n\t}\n\n\tif invalidParams.Len() > 0 {\n\t\treturn invalidParams\n\t}\n\treturn nil\n}", "func (m *StripeRefundSpecificFields) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (m ModelErrorDatumType) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\t// value enum\n\tif err := m.validateModelErrorDatumTypeEnum(\"\", \"body\", m); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (s *CreateBotAliasInput) Validate() error {\n\tinvalidParams := request.ErrInvalidParams{Context: \"CreateBotAliasInput\"}\n\tif s.BotAliasLocaleSettings != nil && len(s.BotAliasLocaleSettings) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"BotAliasLocaleSettings\", 1))\n\t}\n\tif s.BotAliasName == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"BotAliasName\"))\n\t}\n\tif s.BotAliasName != nil && len(*s.BotAliasName) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"BotAliasName\", 1))\n\t}\n\tif s.BotId == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"BotId\"))\n\t}\n\tif s.BotId != nil && len(*s.BotId) < 10 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"BotId\", 10))\n\t}\n\tif s.BotVersion != nil && len(*s.BotVersion) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"BotVersion\", 1))\n\t}\n\tif s.BotAliasLocaleSettings != nil {\n\t\tfor i, v := range s.BotAliasLocaleSettings {\n\t\t\tif v == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err := v.Validate(); err != nil {\n\t\t\t\tinvalidParams.AddNested(fmt.Sprintf(\"%s[%v]\", \"BotAliasLocaleSettings\", i), err.(request.ErrInvalidParams))\n\t\t\t}\n\t\t}\n\t}\n\tif s.ConversationLogSettings != nil {\n\t\tif err := s.ConversationLogSettings.Validate(); err != nil {\n\t\t\tinvalidParams.AddNested(\"ConversationLogSettings\", err.(request.ErrInvalidParams))\n\t\t}\n\t}\n\tif s.SentimentAnalysisSettings != nil {\n\t\tif err := s.SentimentAnalysisSettings.Validate(); err != nil {\n\t\t\tinvalidParams.AddNested(\"SentimentAnalysisSettings\", err.(request.ErrInvalidParams))\n\t\t}\n\t}\n\n\tif invalidParams.Len() > 0 {\n\t\treturn invalidParams\n\t}\n\treturn nil\n}", "func Validate(ctx http.IContext, vld *validator.Validate, arg interface{}) bool {\n\n\tif err := ctx.GetRequest().GetBodyAs(arg); err != nil {\n\t\thttp.InternalServerException(ctx)\n\t\treturn false\n\t}\n\n\tswitch err := vld.Struct(arg); err.(type) {\n\tcase validator.ValidationErrors:\n\t\thttp.FailedValidationException(ctx, err.(validator.ValidationErrors))\n\t\treturn false\n\n\tcase nil:\n\t\tbreak\n\n\tdefault:\n\t\thttp.InternalServerException(ctx)\n\t\treturn false\n\t}\n\n\treturn true\n}", "func (a *Account) Validate() error {\n\tvalidate := validator.New()\n\treturn validate.Struct(a)\n}", "func (s *CreateMemberInput) Validate() error {\n\tinvalidParams := request.ErrInvalidParams{Context: \"CreateMemberInput\"}\n\tif s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"ClientRequestToken\", 1))\n\t}\n\tif s.InvitationId == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"InvitationId\"))\n\t}\n\tif s.InvitationId != nil && len(*s.InvitationId) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"InvitationId\", 1))\n\t}\n\tif s.MemberConfiguration == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"MemberConfiguration\"))\n\t}\n\tif s.NetworkId == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"NetworkId\"))\n\t}\n\tif s.NetworkId != nil && len(*s.NetworkId) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"NetworkId\", 1))\n\t}\n\tif s.MemberConfiguration != nil {\n\t\tif err := s.MemberConfiguration.Validate(); err != nil {\n\t\t\tinvalidParams.AddNested(\"MemberConfiguration\", err.(request.ErrInvalidParams))\n\t\t}\n\t}\n\n\tif invalidParams.Len() > 0 {\n\t\treturn invalidParams\n\t}\n\treturn nil\n}", "func (ut *UpdateUserPayload) Validate() (err error) {\n\tif ut.Name == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`type`, \"name\"))\n\t}\n\tif ut.Email == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`type`, \"email\"))\n\t}\n\tif ut.Bio == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`type`, \"bio\"))\n\t}\n\tif err2 := goa.ValidateFormat(goa.FormatEmail, ut.Email); err2 != nil {\n\t\terr = goa.MergeErrors(err, goa.InvalidFormatError(`type.email`, ut.Email, goa.FormatEmail, err2))\n\t}\n\tif ok := goa.ValidatePattern(`\\S`, ut.Name); !ok {\n\t\terr = goa.MergeErrors(err, goa.InvalidPatternError(`type.name`, ut.Name, `\\S`))\n\t}\n\tif utf8.RuneCountInString(ut.Name) > 256 {\n\t\terr = goa.MergeErrors(err, goa.InvalidLengthError(`type.name`, ut.Name, utf8.RuneCountInString(ut.Name), 256, false))\n\t}\n\treturn\n}", "func (o *Virtualserver) validate(dbRecord *common.DbRecord) (ok bool, err error) {\n\t////////////////////////////////////////////////////////////////////////////\n\t// Marshal data interface.\n\t////////////////////////////////////////////////////////////////////////////\n\tvar data virtualserver.Data\n\terr = shared.MarshalInterface(dbRecord.Data, &data)\n\tif err != nil {\n\t\treturn\n\t}\n\t////////////////////////////////////////////////////////////////////////////\n\t// Test required fields.\n\t////////////////////////////////////////////////////////////////////////////\n\tok = true\n\trequired := make(map[string]bool)\n\trequired[\"ProductCode\"] = false\n\trequired[\"IP\"] = false\n\trequired[\"Port\"] = false\n\trequired[\"LoadBalancerIP\"] = false\n\trequired[\"Name\"] = false\n\t////////////////////////////////////////////////////////////////////////////\n\tif data.ProductCode != 0 {\n\t\trequired[\"ProductCode\"] = true\n\t}\n\tif len(dbRecord.LoadBalancerIP) > 0 {\n\t\trequired[\"LoadBalancerIP\"] = true\n\t}\n\tif len(data.Ports) != 0 {\n\t\trequired[\"Port\"] = true\n\t}\n\tif data.IP != \"\" {\n\t\trequired[\"IP\"] = true\n\t}\n\tif data.Name != \"\" {\n\t\trequired[\"Name\"] = true\n\t}\n\tfor _, val := range required {\n\t\tif val == false {\n\t\t\tok = false\n\t\t}\n\t}\n\tif !ok {\n\t\terr = fmt.Errorf(\"missing required fields - %+v\", required)\n\t}\n\treturn\n}", "func Validate(t interface{}) error {\n\treturn validator.Struct(t)\n}", "func (m *ColumnDetails) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateKeyType(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateSortOrder(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateType(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateValueType(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (cv *CustomValidator) Validate(i interface{}) error {\n\treturn cv.Validator.Struct(i)\n}", "func (cv *CustomValidator) Validate(i interface{}) error {\n\treturn cv.Validator.Struct(i)\n}", "func (s *WriteRecordsInput) Validate() error {\n\tinvalidParams := request.ErrInvalidParams{Context: \"WriteRecordsInput\"}\n\tif s.DatabaseName == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"DatabaseName\"))\n\t}\n\tif s.Records == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"Records\"))\n\t}\n\tif s.Records != nil && len(s.Records) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"Records\", 1))\n\t}\n\tif s.TableName == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"TableName\"))\n\t}\n\tif s.CommonAttributes != nil {\n\t\tif err := s.CommonAttributes.Validate(); err != nil {\n\t\t\tinvalidParams.AddNested(\"CommonAttributes\", err.(request.ErrInvalidParams))\n\t\t}\n\t}\n\tif s.Records != nil {\n\t\tfor i, v := range s.Records {\n\t\t\tif v == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err := v.Validate(); err != nil {\n\t\t\t\tinvalidParams.AddNested(fmt.Sprintf(\"%s[%v]\", \"Records\", i), err.(request.ErrInvalidParams))\n\t\t\t}\n\t\t}\n\t}\n\n\tif invalidParams.Len() > 0 {\n\t\treturn invalidParams\n\t}\n\treturn nil\n}", "func (m *HashType) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateFunction(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateMethod(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateModifier(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (s *CognitoMemberDefinition) Validate() error {\n\tinvalidParams := request.ErrInvalidParams{Context: \"CognitoMemberDefinition\"}\n\tif s.ClientId == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"ClientId\"))\n\t}\n\tif s.ClientId != nil && len(*s.ClientId) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"ClientId\", 1))\n\t}\n\tif s.UserGroup == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"UserGroup\"))\n\t}\n\tif s.UserGroup != nil && len(*s.UserGroup) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"UserGroup\", 1))\n\t}\n\tif s.UserPool == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"UserPool\"))\n\t}\n\tif s.UserPool != nil && len(*s.UserPool) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"UserPool\", 1))\n\t}\n\n\tif invalidParams.Len() > 0 {\n\t\treturn invalidParams\n\t}\n\treturn nil\n}", "func (recipe *Recipe) Validate() error {\n\tvalidate := validator.New()\n\treturn validate.Struct(recipe)\n}", "func (s *CreateInferenceExperimentInput) Validate() error {\n\tinvalidParams := request.ErrInvalidParams{Context: \"CreateInferenceExperimentInput\"}\n\tif s.EndpointName == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"EndpointName\"))\n\t}\n\tif s.ModelVariants == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"ModelVariants\"))\n\t}\n\tif s.ModelVariants != nil && len(s.ModelVariants) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"ModelVariants\", 1))\n\t}\n\tif s.Name == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"Name\"))\n\t}\n\tif s.Name != nil && len(*s.Name) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"Name\", 1))\n\t}\n\tif s.RoleArn == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"RoleArn\"))\n\t}\n\tif s.RoleArn != nil && len(*s.RoleArn) < 20 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"RoleArn\", 20))\n\t}\n\tif s.ShadowModeConfig == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"ShadowModeConfig\"))\n\t}\n\tif s.Type == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"Type\"))\n\t}\n\tif s.DataStorageConfig != nil {\n\t\tif err := s.DataStorageConfig.Validate(); err != nil {\n\t\t\tinvalidParams.AddNested(\"DataStorageConfig\", err.(request.ErrInvalidParams))\n\t\t}\n\t}\n\tif s.ModelVariants != nil {\n\t\tfor i, v := range s.ModelVariants {\n\t\t\tif v == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err := v.Validate(); err != nil {\n\t\t\t\tinvalidParams.AddNested(fmt.Sprintf(\"%s[%v]\", \"ModelVariants\", i), err.(request.ErrInvalidParams))\n\t\t\t}\n\t\t}\n\t}\n\tif s.ShadowModeConfig != nil {\n\t\tif err := s.ShadowModeConfig.Validate(); err != nil {\n\t\t\tinvalidParams.AddNested(\"ShadowModeConfig\", err.(request.ErrInvalidParams))\n\t\t}\n\t}\n\tif s.Tags != nil {\n\t\tfor i, v := range s.Tags {\n\t\t\tif v == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err := v.Validate(); err != nil {\n\t\t\t\tinvalidParams.AddNested(fmt.Sprintf(\"%s[%v]\", \"Tags\", i), err.(request.ErrInvalidParams))\n\t\t\t}\n\t\t}\n\t}\n\n\tif invalidParams.Len() > 0 {\n\t\treturn invalidParams\n\t}\n\treturn nil\n}", "func (s *UpdateSlotTypeInput) Validate() error {\n\tinvalidParams := request.ErrInvalidParams{Context: \"UpdateSlotTypeInput\"}\n\tif s.BotId == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"BotId\"))\n\t}\n\tif s.BotId != nil && len(*s.BotId) < 10 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"BotId\", 10))\n\t}\n\tif s.BotVersion == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"BotVersion\"))\n\t}\n\tif s.BotVersion != nil && len(*s.BotVersion) < 5 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"BotVersion\", 5))\n\t}\n\tif s.LocaleId == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"LocaleId\"))\n\t}\n\tif s.LocaleId != nil && len(*s.LocaleId) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"LocaleId\", 1))\n\t}\n\tif s.SlotTypeId == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"SlotTypeId\"))\n\t}\n\tif s.SlotTypeId != nil && len(*s.SlotTypeId) < 10 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"SlotTypeId\", 10))\n\t}\n\tif s.SlotTypeName == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"SlotTypeName\"))\n\t}\n\tif s.SlotTypeName != nil && len(*s.SlotTypeName) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"SlotTypeName\", 1))\n\t}\n\tif s.SlotTypeValues != nil && len(s.SlotTypeValues) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"SlotTypeValues\", 1))\n\t}\n\tif s.CompositeSlotTypeSetting != nil {\n\t\tif err := s.CompositeSlotTypeSetting.Validate(); err != nil {\n\t\t\tinvalidParams.AddNested(\"CompositeSlotTypeSetting\", err.(request.ErrInvalidParams))\n\t\t}\n\t}\n\tif s.ExternalSourceSetting != nil {\n\t\tif err := s.ExternalSourceSetting.Validate(); err != nil {\n\t\t\tinvalidParams.AddNested(\"ExternalSourceSetting\", err.(request.ErrInvalidParams))\n\t\t}\n\t}\n\tif s.SlotTypeValues != nil {\n\t\tfor i, v := range s.SlotTypeValues {\n\t\t\tif v == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err := v.Validate(); err != nil {\n\t\t\t\tinvalidParams.AddNested(fmt.Sprintf(\"%s[%v]\", \"SlotTypeValues\", i), err.(request.ErrInvalidParams))\n\t\t\t}\n\t\t}\n\t}\n\tif s.ValueSelectionSetting != nil {\n\t\tif err := s.ValueSelectionSetting.Validate(); err != nil {\n\t\t\tinvalidParams.AddNested(\"ValueSelectionSetting\", err.(request.ErrInvalidParams))\n\t\t}\n\t}\n\n\tif invalidParams.Len() > 0 {\n\t\treturn invalidParams\n\t}\n\treturn nil\n}", "func Validate(obj interface{}) (map[string]interface{}, bool) {\n\n\trules := govalidator.MapData{\n\t\t\"name\": []string{\"required\", \"between:3,150\"},\n\t\t//\"email\": []string{\"required\", \"min:4\", \"max:20\", \"email\"},\n\t\t//\"web\": []string{\"url\"},\n\t\t//\"age\": []string{\"numeric_between:18,56\"},\n\t}\n\n\treturn validate.Validate(rules, obj)\n}", "func (u *User) Validate() ([]app.Invalid, error) {\n\tvar inv []app.Invalid\n\n\tif u.UserType == 0 {\n\t\tinv = append(inv, app.Invalid{Fld: \"UserType\", Err: \"The value of UserType cannot be 0.\"})\n\t}\n\n\tif u.FirstName == \"\" {\n\t\tinv = append(inv, app.Invalid{Fld: \"FirstName\", Err: \"A value of FirstName cannot be empty.\"})\n\t}\n\n\tif u.LastName == \"\" {\n\t\tinv = append(inv, app.Invalid{Fld: \"LastName\", Err: \"A value of LastName cannot be empty.\"})\n\t}\n\n\tif u.Email == \"\" {\n\t\tinv = append(inv, app.Invalid{Fld: \"Email\", Err: \"A value of Email cannot be empty.\"})\n\t}\n\n\tif u.Company == \"\" {\n\t\tinv = append(inv, app.Invalid{Fld: \"Company\", Err: \"A value of Company cannot be empty.\"})\n\t}\n\n\tif len(u.Addresses) == 0 {\n\t\tinv = append(inv, app.Invalid{Fld: \"Addresses\", Err: \"There must be at least one address.\"})\n\t} else {\n\t\tfor _, ua := range u.Addresses {\n\t\t\tif va, err := ua.Validate(); err != nil {\n\t\t\t\tinv = append(inv, va...)\n\t\t\t}\n\t\t}\n\t}\n\n\tif len(inv) > 0 {\n\t\treturn inv, errors.New(\"Validation failures identified\")\n\t}\n\n\treturn nil, nil\n}", "func (s *GetPropertyValueHistoryInput) Validate() error {\n\tinvalidParams := request.ErrInvalidParams{Context: \"GetPropertyValueHistoryInput\"}\n\tif s.ComponentName != nil && len(*s.ComponentName) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"ComponentName\", 1))\n\t}\n\tif s.ComponentTypeId != nil && len(*s.ComponentTypeId) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"ComponentTypeId\", 1))\n\t}\n\tif s.EndTime != nil && len(*s.EndTime) < 20 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"EndTime\", 20))\n\t}\n\tif s.EntityId != nil && len(*s.EntityId) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"EntityId\", 1))\n\t}\n\tif s.PropertyFilters != nil && len(s.PropertyFilters) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"PropertyFilters\", 1))\n\t}\n\tif s.SelectedProperties == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"SelectedProperties\"))\n\t}\n\tif s.SelectedProperties != nil && len(s.SelectedProperties) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"SelectedProperties\", 1))\n\t}\n\tif s.StartTime != nil && len(*s.StartTime) < 20 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"StartTime\", 20))\n\t}\n\tif s.WorkspaceId == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"WorkspaceId\"))\n\t}\n\tif s.WorkspaceId != nil && len(*s.WorkspaceId) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"WorkspaceId\", 1))\n\t}\n\tif s.PropertyFilters != nil {\n\t\tfor i, v := range s.PropertyFilters {\n\t\t\tif v == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err := v.Validate(); err != nil {\n\t\t\t\tinvalidParams.AddNested(fmt.Sprintf(\"%s[%v]\", \"PropertyFilters\", i), err.(request.ErrInvalidParams))\n\t\t\t}\n\t\t}\n\t}\n\n\tif invalidParams.Len() > 0 {\n\t\treturn invalidParams\n\t}\n\treturn nil\n}", "func (v *validator) Validate(val interface{}) (bool, *domain.NuxError) {\n\tif l, ok := val.(int); ok {\n\t\treturn v.validateInt(l)\n\t}\n\n\tif l, ok := val.(int64); ok {\n\t\treturn v.validateInt64(l)\n\t}\n\n\tif l, ok := val.(float64); ok {\n\t\treturn v.validateFloat64(l)\n\t}\n\n\tif l, ok := val.(float32); ok {\n\t\treturn v.validateFloat32(l)\n\t}\n\n\treturn true, nil\n}", "func (d *Definition) Validate() (bool, error) {\n\treturn govalidator.ValidateStruct(d)\n}", "func (s *ServiceCatalogProvisioningDetails) Validate() error {\n\tinvalidParams := request.ErrInvalidParams{Context: \"ServiceCatalogProvisioningDetails\"}\n\tif s.PathId != nil && len(*s.PathId) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"PathId\", 1))\n\t}\n\tif s.ProductId == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"ProductId\"))\n\t}\n\tif s.ProductId != nil && len(*s.ProductId) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"ProductId\", 1))\n\t}\n\tif s.ProvisioningArtifactId != nil && len(*s.ProvisioningArtifactId) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"ProvisioningArtifactId\", 1))\n\t}\n\tif s.ProvisioningParameters != nil {\n\t\tfor i, v := range s.ProvisioningParameters {\n\t\t\tif v == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err := v.Validate(); err != nil {\n\t\t\t\tinvalidParams.AddNested(fmt.Sprintf(\"%s[%v]\", \"ProvisioningParameters\", i), err.(request.ErrInvalidParams))\n\t\t\t}\n\t\t}\n\t}\n\n\tif invalidParams.Len() > 0 {\n\t\treturn invalidParams\n\t}\n\treturn nil\n}", "func (self *AliasTypeDef) Validate() error {\n\tif self.Type == \"\" {\n\t\treturn fmt.Errorf(\"AliasTypeDef.type is missing but is a required field\")\n\t} else {\n\t\tval := Validate(RdlSchema(), \"TypeRef\", self.Type)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"AliasTypeDef.type does not contain a valid TypeRef (%v)\", val.Error)\n\t\t}\n\t}\n\tif self.Name == \"\" {\n\t\treturn fmt.Errorf(\"AliasTypeDef.name is missing but is a required field\")\n\t} else {\n\t\tval := Validate(RdlSchema(), \"TypeName\", self.Name)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"AliasTypeDef.name does not contain a valid TypeName (%v)\", val.Error)\n\t\t}\n\t}\n\tif self.Comment != \"\" {\n\t\tval := Validate(RdlSchema(), \"String\", self.Comment)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"AliasTypeDef.comment does not contain a valid String (%v)\", val.Error)\n\t\t}\n\t}\n\treturn nil\n}", "func (s *ListSlotTypesInput) Validate() error {\n\tinvalidParams := request.ErrInvalidParams{Context: \"ListSlotTypesInput\"}\n\tif s.BotId == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"BotId\"))\n\t}\n\tif s.BotId != nil && len(*s.BotId) < 10 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"BotId\", 10))\n\t}\n\tif s.BotVersion == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"BotVersion\"))\n\t}\n\tif s.BotVersion != nil && len(*s.BotVersion) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"BotVersion\", 1))\n\t}\n\tif s.Filters != nil && len(s.Filters) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"Filters\", 1))\n\t}\n\tif s.LocaleId == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"LocaleId\"))\n\t}\n\tif s.LocaleId != nil && len(*s.LocaleId) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"LocaleId\", 1))\n\t}\n\tif s.MaxResults != nil && *s.MaxResults < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinValue(\"MaxResults\", 1))\n\t}\n\tif s.Filters != nil {\n\t\tfor i, v := range s.Filters {\n\t\t\tif v == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err := v.Validate(); err != nil {\n\t\t\t\tinvalidParams.AddNested(fmt.Sprintf(\"%s[%v]\", \"Filters\", i), err.(request.ErrInvalidParams))\n\t\t\t}\n\t\t}\n\t}\n\tif s.SortBy != nil {\n\t\tif err := s.SortBy.Validate(); err != nil {\n\t\t\tinvalidParams.AddNested(\"SortBy\", err.(request.ErrInvalidParams))\n\t\t}\n\t}\n\n\tif invalidParams.Len() > 0 {\n\t\treturn invalidParams\n\t}\n\treturn nil\n}", "func (s *UpdateBotAliasInput) Validate() error {\n\tinvalidParams := request.ErrInvalidParams{Context: \"UpdateBotAliasInput\"}\n\tif s.BotAliasId == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"BotAliasId\"))\n\t}\n\tif s.BotAliasId != nil && len(*s.BotAliasId) < 10 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"BotAliasId\", 10))\n\t}\n\tif s.BotAliasLocaleSettings != nil && len(s.BotAliasLocaleSettings) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"BotAliasLocaleSettings\", 1))\n\t}\n\tif s.BotAliasName == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"BotAliasName\"))\n\t}\n\tif s.BotAliasName != nil && len(*s.BotAliasName) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"BotAliasName\", 1))\n\t}\n\tif s.BotId == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"BotId\"))\n\t}\n\tif s.BotId != nil && len(*s.BotId) < 10 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"BotId\", 10))\n\t}\n\tif s.BotVersion != nil && len(*s.BotVersion) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"BotVersion\", 1))\n\t}\n\tif s.BotAliasLocaleSettings != nil {\n\t\tfor i, v := range s.BotAliasLocaleSettings {\n\t\t\tif v == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err := v.Validate(); err != nil {\n\t\t\t\tinvalidParams.AddNested(fmt.Sprintf(\"%s[%v]\", \"BotAliasLocaleSettings\", i), err.(request.ErrInvalidParams))\n\t\t\t}\n\t\t}\n\t}\n\tif s.ConversationLogSettings != nil {\n\t\tif err := s.ConversationLogSettings.Validate(); err != nil {\n\t\t\tinvalidParams.AddNested(\"ConversationLogSettings\", err.(request.ErrInvalidParams))\n\t\t}\n\t}\n\tif s.SentimentAnalysisSettings != nil {\n\t\tif err := s.SentimentAnalysisSettings.Validate(); err != nil {\n\t\t\tinvalidParams.AddNested(\"SentimentAnalysisSettings\", err.(request.ErrInvalidParams))\n\t\t}\n\t}\n\n\tif invalidParams.Len() > 0 {\n\t\treturn invalidParams\n\t}\n\treturn nil\n}", "func (v *Validator) Validate(i interface{}) error {\n\treturn v.validator.Struct(i)\n}", "func (s *CreateProfileInput) Validate() error {\n\tinvalidParams := aws.ErrInvalidParams{Context: \"CreateProfileInput\"}\n\n\tif s.Address == nil {\n\t\tinvalidParams.Add(aws.NewErrParamRequired(\"Address\"))\n\t}\n\tif s.Address != nil && len(*s.Address) < 1 {\n\t\tinvalidParams.Add(aws.NewErrParamMinLen(\"Address\", 1))\n\t}\n\tif s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 10 {\n\t\tinvalidParams.Add(aws.NewErrParamMinLen(\"ClientRequestToken\", 10))\n\t}\n\tif len(s.DistanceUnit) == 0 {\n\t\tinvalidParams.Add(aws.NewErrParamRequired(\"DistanceUnit\"))\n\t}\n\tif s.Locale != nil && len(*s.Locale) < 1 {\n\t\tinvalidParams.Add(aws.NewErrParamMinLen(\"Locale\", 1))\n\t}\n\n\tif s.ProfileName == nil {\n\t\tinvalidParams.Add(aws.NewErrParamRequired(\"ProfileName\"))\n\t}\n\tif s.ProfileName != nil && len(*s.ProfileName) < 1 {\n\t\tinvalidParams.Add(aws.NewErrParamMinLen(\"ProfileName\", 1))\n\t}\n\tif len(s.TemperatureUnit) == 0 {\n\t\tinvalidParams.Add(aws.NewErrParamRequired(\"TemperatureUnit\"))\n\t}\n\n\tif s.Timezone == nil {\n\t\tinvalidParams.Add(aws.NewErrParamRequired(\"Timezone\"))\n\t}\n\tif s.Timezone != nil && len(*s.Timezone) < 1 {\n\t\tinvalidParams.Add(aws.NewErrParamMinLen(\"Timezone\", 1))\n\t}\n\tif len(s.WakeWord) == 0 {\n\t\tinvalidParams.Add(aws.NewErrParamRequired(\"WakeWord\"))\n\t}\n\tif s.MeetingRoomConfiguration != nil {\n\t\tif err := s.MeetingRoomConfiguration.Validate(); err != nil {\n\t\t\tinvalidParams.AddNested(\"MeetingRoomConfiguration\", err.(aws.ErrInvalidParams))\n\t\t}\n\t}\n\tif s.Tags != nil {\n\t\tfor i, v := range s.Tags {\n\t\t\tif err := v.Validate(); err != nil {\n\t\t\t\tinvalidParams.AddNested(fmt.Sprintf(\"%s[%v]\", \"Tags\", i), err.(aws.ErrInvalidParams))\n\t\t\t}\n\t\t}\n\t}\n\n\tif invalidParams.Len() > 0 {\n\t\treturn invalidParams\n\t}\n\treturn nil\n}", "func (l *logger) Validate() error {\n\tif l == nil {\n\t\treturn nil\n\t}\n\tif err := l.Console.Validate(); err != nil {\n\t\treturn fmt.Errorf(\"`Console` field: %s\", err.Error())\n\t}\n\tif err := l.File.Validate(); err != nil {\n\t\treturn fmt.Errorf(\"`File` field: %s\", err.Error())\n\t}\n\treturn nil\n}", "func (self *ArrayTypeDef) Validate() error {\n\tif self.Type == \"\" {\n\t\treturn fmt.Errorf(\"ArrayTypeDef.type is missing but is a required field\")\n\t} else {\n\t\tval := Validate(RdlSchema(), \"TypeRef\", self.Type)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"ArrayTypeDef.type does not contain a valid TypeRef (%v)\", val.Error)\n\t\t}\n\t}\n\tif self.Name == \"\" {\n\t\treturn fmt.Errorf(\"ArrayTypeDef.name is missing but is a required field\")\n\t} else {\n\t\tval := Validate(RdlSchema(), \"TypeName\", self.Name)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"ArrayTypeDef.name does not contain a valid TypeName (%v)\", val.Error)\n\t\t}\n\t}\n\tif self.Comment != \"\" {\n\t\tval := Validate(RdlSchema(), \"String\", self.Comment)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"ArrayTypeDef.comment does not contain a valid String (%v)\", val.Error)\n\t\t}\n\t}\n\tif self.Items == \"\" {\n\t\treturn fmt.Errorf(\"ArrayTypeDef.items is missing but is a required field\")\n\t} else {\n\t\tval := Validate(RdlSchema(), \"TypeRef\", self.Items)\n\t\tif !val.Valid {\n\t\t\treturn fmt.Errorf(\"ArrayTypeDef.items does not contain a valid TypeRef (%v)\", val.Error)\n\t\t}\n\t}\n\treturn nil\n}", "func (s *RegexMatchTuple) Validate() error {\n\tinvalidParams := request.ErrInvalidParams{Context: \"RegexMatchTuple\"}\n\tif s.FieldToMatch == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"FieldToMatch\"))\n\t}\n\tif s.RegexPatternSetId == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"RegexPatternSetId\"))\n\t}\n\tif s.RegexPatternSetId != nil && len(*s.RegexPatternSetId) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"RegexPatternSetId\", 1))\n\t}\n\tif s.TextTransformation == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"TextTransformation\"))\n\t}\n\tif s.FieldToMatch != nil {\n\t\tif err := s.FieldToMatch.Validate(); err != nil {\n\t\t\tinvalidParams.AddNested(\"FieldToMatch\", err.(request.ErrInvalidParams))\n\t\t}\n\t}\n\n\tif invalidParams.Len() > 0 {\n\t\treturn invalidParams\n\t}\n\treturn nil\n}", "func (r *RecordValidator) Validate(i interface{}) error {\r\n\treturn r.validator.Struct(i)\r\n}", "func (s *Service) Validate() error {\n\tnonEmptyFields := map[string]checker{\n\t\t\"Name\": checker{s.Name, true},\n\t\t\"Type\": checker{s.Type.String(), false}, // Type is a enum, no need to check\n\t\t\"Owner\": checker{s.Owner, true},\n\t\t\"ClusterType\": checker{s.ClusterType, true},\n\t\t\"InstanceName\": checker{s.InstanceName.String(), true},\n\t}\n\n\tfor label, field := range nonEmptyFields {\n\t\tif field.val == \"\" {\n\t\t\treturn fmt.Errorf(errorTmpl, label+\" is empty\")\n\t\t} else if field.checkSeparator && strings.Contains(field.val, keyPartSeparator) {\n\t\t\treturn fmt.Errorf(errorTmpl, label+separatorErrorMsg)\n\t\t}\n\t}\n\n\tswitch {\n\tcase len([]rune(s.Name)) > maxServiceNameLen:\n\t\treturn fmt.Errorf(errorTmpl, fmt.Sprintf(\"Name %q is too long, max len is %d symbols\", s.Name, maxServiceNameLen))\n\tcase !reRolloutType.MatchString(s.RolloutType):\n\t\treturn fmt.Errorf(errorTmpl, \"RolloutType is invalid\")\n\t}\n\treturn nil\n}", "func (t *Visibility_Visibility) Validate(opts ...ygot.ValidationOption) error {\n\tif err := ytypes.Validate(SchemaTree[\"Visibility_Visibility\"], t, opts...); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (s *UpdateWorkteamInput) Validate() error {\n\tinvalidParams := request.ErrInvalidParams{Context: \"UpdateWorkteamInput\"}\n\tif s.Description != nil && len(*s.Description) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"Description\", 1))\n\t}\n\tif s.MemberDefinitions != nil && len(s.MemberDefinitions) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"MemberDefinitions\", 1))\n\t}\n\tif s.WorkteamName == nil {\n\t\tinvalidParams.Add(request.NewErrParamRequired(\"WorkteamName\"))\n\t}\n\tif s.WorkteamName != nil && len(*s.WorkteamName) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"WorkteamName\", 1))\n\t}\n\tif s.MemberDefinitions != nil {\n\t\tfor i, v := range s.MemberDefinitions {\n\t\t\tif v == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err := v.Validate(); err != nil {\n\t\t\t\tinvalidParams.AddNested(fmt.Sprintf(\"%s[%v]\", \"MemberDefinitions\", i), err.(request.ErrInvalidParams))\n\t\t\t}\n\t\t}\n\t}\n\n\tif invalidParams.Len() > 0 {\n\t\treturn invalidParams\n\t}\n\treturn nil\n}", "func (s *SlotTypeValue) Validate() error {\n\tinvalidParams := request.ErrInvalidParams{Context: \"SlotTypeValue\"}\n\tif s.Synonyms != nil && len(s.Synonyms) < 1 {\n\t\tinvalidParams.Add(request.NewErrParamMinLen(\"Synonyms\", 1))\n\t}\n\tif s.SampleValue != nil {\n\t\tif err := s.SampleValue.Validate(); err != nil {\n\t\t\tinvalidParams.AddNested(\"SampleValue\", err.(request.ErrInvalidParams))\n\t\t}\n\t}\n\tif s.Synonyms != nil {\n\t\tfor i, v := range s.Synonyms {\n\t\t\tif v == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err := v.Validate(); err != nil {\n\t\t\t\tinvalidParams.AddNested(fmt.Sprintf(\"%s[%v]\", \"Synonyms\", i), err.(request.ErrInvalidParams))\n\t\t\t}\n\t\t}\n\t}\n\n\tif invalidParams.Len() > 0 {\n\t\treturn invalidParams\n\t}\n\treturn nil\n}" ]
[ "0.6366166", "0.6255708", "0.62440985", "0.6219268", "0.6205969", "0.6186602", "0.61787015", "0.6151207", "0.6135345", "0.6129121", "0.61265224", "0.61265224", "0.60985357", "0.60598147", "0.60547787", "0.60132855", "0.5993056", "0.5990731", "0.59752667", "0.59422064", "0.59114707", "0.59090024", "0.5889592", "0.58741313", "0.5829609", "0.58170855", "0.58096683", "0.58095896", "0.58095545", "0.58024305", "0.5794755", "0.57862866", "0.57858443", "0.57791334", "0.5764243", "0.57606256", "0.57459706", "0.5732621", "0.5724816", "0.5721725", "0.5710794", "0.57104737", "0.5704633", "0.5703819", "0.5702953", "0.56983054", "0.56940216", "0.5690886", "0.5657812", "0.5649313", "0.56480217", "0.564582", "0.563624", "0.5627615", "0.5625255", "0.5619124", "0.5613144", "0.56088334", "0.5605432", "0.56024873", "0.55947214", "0.55911726", "0.5589795", "0.5585938", "0.55821085", "0.5582017", "0.5581614", "0.55808634", "0.5580246", "0.5574314", "0.5568627", "0.55618674", "0.5560738", "0.55515087", "0.5550786", "0.5550786", "0.5541505", "0.5539938", "0.55395836", "0.5536529", "0.5532453", "0.5530356", "0.55274034", "0.5516386", "0.55141157", "0.551397", "0.5513621", "0.5507534", "0.55044377", "0.5499806", "0.5497794", "0.5496284", "0.5494955", "0.5485755", "0.54851174", "0.5484035", "0.54840046", "0.5483409", "0.5483303", "0.5483193", "0.5481435" ]
0.0
-1
MarshalFields encodes the AWS API shape using the passed in protocol encoder.
func (s DescribeUserHierarchyStructureInput) MarshalFields(e protocol.FieldEncoder) error { e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) if s.InstanceId != nil { v := *s.InstanceId metadata := protocol.Metadata{} e.SetValue(protocol.PathTarget, "InstanceId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (s CreateApiOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ApiEndpoint != nil {\n\t\tv := *s.ApiEndpoint\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiEndpoint\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiKeySelectionExpression != nil {\n\t\tv := *s.ApiKeySelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiKeySelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.CreatedDate != nil {\n\t\tv := *s.CreatedDate\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"createdDate\", protocol.TimeValue{V: v, Format: protocol.UnixTimeFormat}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.DisableSchemaValidation != nil {\n\t\tv := *s.DisableSchemaValidation\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"disableSchemaValidation\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.ProtocolType) > 0 {\n\t\tv := s.ProtocolType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"protocolType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.RouteSelectionExpression != nil {\n\t\tv := *s.RouteSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Version != nil {\n\t\tv := *s.Version\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"version\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.Warnings) > 0 {\n\t\tv := s.Warnings\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"warnings\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\treturn nil\n}", "func (s OutputService9TestShapeSingleStructure) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.Foo != nil {\n\t\tv := *s.Foo\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Foo\", protocol.StringValue(v), metadata)\n\t}\n\treturn nil\n}", "func (s OutputService6TestShapeSingleStructure) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.Foo != nil {\n\t\tv := *s.Foo\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"foo\", protocol.StringValue(v), metadata)\n\t}\n\treturn nil\n}", "func (s CreateApiInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.ApiKeySelectionExpression != nil {\n\t\tv := *s.ApiKeySelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiKeySelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.DisableSchemaValidation != nil {\n\t\tv := *s.DisableSchemaValidation\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"disableSchemaValidation\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.ProtocolType) > 0 {\n\t\tv := s.ProtocolType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"protocolType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.RouteSelectionExpression != nil {\n\t\tv := *s.RouteSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Version != nil {\n\t\tv := *s.Version\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"version\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s Api) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ApiEndpoint != nil {\n\t\tv := *s.ApiEndpoint\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiEndpoint\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiKeySelectionExpression != nil {\n\t\tv := *s.ApiKeySelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiKeySelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.CreatedDate != nil {\n\t\tv := *s.CreatedDate\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"createdDate\", protocol.TimeValue{V: v, Format: protocol.UnixTimeFormat}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.DisableSchemaValidation != nil {\n\t\tv := *s.DisableSchemaValidation\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"disableSchemaValidation\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.ProtocolType) > 0 {\n\t\tv := s.ProtocolType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"protocolType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.RouteSelectionExpression != nil {\n\t\tv := *s.RouteSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Version != nil {\n\t\tv := *s.Version\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"version\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.Warnings) > 0 {\n\t\tv := s.Warnings\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"warnings\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\treturn nil\n}", "func (s GetApiOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ApiEndpoint != nil {\n\t\tv := *s.ApiEndpoint\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiEndpoint\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiKeySelectionExpression != nil {\n\t\tv := *s.ApiKeySelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiKeySelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.CreatedDate != nil {\n\t\tv := *s.CreatedDate\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"createdDate\", protocol.TimeValue{V: v, Format: protocol.UnixTimeFormat}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.DisableSchemaValidation != nil {\n\t\tv := *s.DisableSchemaValidation\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"disableSchemaValidation\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.ProtocolType) > 0 {\n\t\tv := s.ProtocolType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"protocolType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.RouteSelectionExpression != nil {\n\t\tv := *s.RouteSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Version != nil {\n\t\tv := *s.Version\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"version\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.Warnings) > 0 {\n\t\tv := s.Warnings\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"warnings\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\treturn nil\n}", "func (s UpdateApiOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ApiEndpoint != nil {\n\t\tv := *s.ApiEndpoint\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiEndpoint\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiKeySelectionExpression != nil {\n\t\tv := *s.ApiKeySelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiKeySelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.CreatedDate != nil {\n\t\tv := *s.CreatedDate\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"createdDate\", protocol.TimeValue{V: v, Format: protocol.UnixTimeFormat}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.DisableSchemaValidation != nil {\n\t\tv := *s.DisableSchemaValidation\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"disableSchemaValidation\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.ProtocolType) > 0 {\n\t\tv := s.ProtocolType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"protocolType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.RouteSelectionExpression != nil {\n\t\tv := *s.RouteSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Version != nil {\n\t\tv := *s.Version\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"version\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.Warnings) > 0 {\n\t\tv := s.Warnings\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"warnings\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\treturn nil\n}", "func (s UpdateRestApiOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif len(s.ApiKeySource) > 0 {\n\t\tv := s.ApiKeySource\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiKeySource\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.BinaryMediaTypes != nil {\n\t\tv := s.BinaryMediaTypes\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"binaryMediaTypes\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.CreatedDate != nil {\n\t\tv := *s.CreatedDate\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"createdDate\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.EndpointConfiguration != nil {\n\t\tv := s.EndpointConfiguration\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"endpointConfiguration\", v, metadata)\n\t}\n\tif s.Id != nil {\n\t\tv := *s.Id\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"id\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.MinimumCompressionSize != nil {\n\t\tv := *s.MinimumCompressionSize\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"minimumCompressionSize\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Policy != nil {\n\t\tv := *s.Policy\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"policy\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Tags != nil {\n\t\tv := s.Tags\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"tags\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.Version != nil {\n\t\tv := *s.Version\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"version\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Warnings != nil {\n\t\tv := s.Warnings\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"warnings\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\treturn nil\n}", "func (s CreateCanaryInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.ArtifactS3Location != nil {\n\t\tv := *s.ArtifactS3Location\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"ArtifactS3Location\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Code != nil {\n\t\tv := s.Code\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"Code\", v, metadata)\n\t}\n\tif s.ExecutionRoleArn != nil {\n\t\tv := *s.ExecutionRoleArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"ExecutionRoleArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.FailureRetentionPeriodInDays != nil {\n\t\tv := *s.FailureRetentionPeriodInDays\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"FailureRetentionPeriodInDays\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RunConfig != nil {\n\t\tv := s.RunConfig\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"RunConfig\", v, metadata)\n\t}\n\tif s.RuntimeVersion != nil {\n\t\tv := *s.RuntimeVersion\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"RuntimeVersion\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Schedule != nil {\n\t\tv := s.Schedule\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"Schedule\", v, metadata)\n\t}\n\tif s.SuccessRetentionPeriodInDays != nil {\n\t\tv := *s.SuccessRetentionPeriodInDays\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"SuccessRetentionPeriodInDays\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.Tags != nil {\n\t\tv := s.Tags\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"Tags\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.VpcConfig != nil {\n\t\tv := s.VpcConfig\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"VpcConfig\", v, metadata)\n\t}\n\treturn nil\n}", "func (s VirtualGatewayRef) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.Arn != nil {\n\t\tv := *s.Arn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"arn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.CreatedAt != nil {\n\t\tv := *s.CreatedAt\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"createdAt\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.LastUpdatedAt != nil {\n\t\tv := *s.LastUpdatedAt\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"lastUpdatedAt\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.MeshName != nil {\n\t\tv := *s.MeshName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"meshName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.MeshOwner != nil {\n\t\tv := *s.MeshOwner\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"meshOwner\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ResourceOwner != nil {\n\t\tv := *s.ResourceOwner\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"resourceOwner\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Version != nil {\n\t\tv := *s.Version\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"version\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.VirtualGatewayName != nil {\n\t\tv := *s.VirtualGatewayName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"virtualGatewayName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreateAccessPointInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tvar ClientToken string\n\tif s.ClientToken != nil {\n\t\tClientToken = *s.ClientToken\n\t} else {\n\t\tClientToken = protocol.GetIdempotencyToken()\n\t}\n\t{\n\t\tv := ClientToken\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"ClientToken\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.FileSystemId != nil {\n\t\tv := *s.FileSystemId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"FileSystemId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.PosixUser != nil {\n\t\tv := s.PosixUser\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"PosixUser\", v, metadata)\n\t}\n\tif s.RootDirectory != nil {\n\t\tv := s.RootDirectory\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"RootDirectory\", v, metadata)\n\t}\n\tif s.Tags != nil {\n\t\tv := s.Tags\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"Tags\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddFields(v1)\n\t\t}\n\t\tls0.End()\n\n\t}\n\treturn nil\n}", "func (s UpdateRouteInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.ApiKeyRequired != nil {\n\t\tv := *s.ApiKeyRequired\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiKeyRequired\", protocol.BoolValue(v), metadata)\n\t}\n\tif len(s.AuthorizationScopes) > 0 {\n\t\tv := s.AuthorizationScopes\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"authorizationScopes\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif len(s.AuthorizationType) > 0 {\n\t\tv := s.AuthorizationType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"authorizationType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.AuthorizerId != nil {\n\t\tv := *s.AuthorizerId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"authorizerId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ModelSelectionExpression != nil {\n\t\tv := *s.ModelSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"modelSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.OperationName != nil {\n\t\tv := *s.OperationName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"operationName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.RequestModels) > 0 {\n\t\tv := s.RequestModels\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestModels\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif len(s.RequestParameters) > 0 {\n\t\tv := s.RequestParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetFields(k1, v1)\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.RouteKey != nil {\n\t\tv := *s.RouteKey\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeKey\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteResponseSelectionExpression != nil {\n\t\tv := *s.RouteResponseSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeResponseSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Target != nil {\n\t\tv := *s.Target\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"target\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteId != nil {\n\t\tv := *s.RouteId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"routeId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreateRouteOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ApiKeyRequired != nil {\n\t\tv := *s.ApiKeyRequired\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiKeyRequired\", protocol.BoolValue(v), metadata)\n\t}\n\tif len(s.AuthorizationScopes) > 0 {\n\t\tv := s.AuthorizationScopes\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"authorizationScopes\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif len(s.AuthorizationType) > 0 {\n\t\tv := s.AuthorizationType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"authorizationType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.AuthorizerId != nil {\n\t\tv := *s.AuthorizerId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"authorizerId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ModelSelectionExpression != nil {\n\t\tv := *s.ModelSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"modelSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.OperationName != nil {\n\t\tv := *s.OperationName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"operationName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.RequestModels) > 0 {\n\t\tv := s.RequestModels\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestModels\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif len(s.RequestParameters) > 0 {\n\t\tv := s.RequestParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetFields(k1, v1)\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.RouteId != nil {\n\t\tv := *s.RouteId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteKey != nil {\n\t\tv := *s.RouteKey\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeKey\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteResponseSelectionExpression != nil {\n\t\tv := *s.RouteResponseSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeResponseSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Target != nil {\n\t\tv := *s.Target\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"target\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreateModelInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.ContentType != nil {\n\t\tv := *s.ContentType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"contentType\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Schema != nil {\n\t\tv := *s.Schema\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"schema\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreateRouteInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.ApiKeyRequired != nil {\n\t\tv := *s.ApiKeyRequired\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiKeyRequired\", protocol.BoolValue(v), metadata)\n\t}\n\tif len(s.AuthorizationScopes) > 0 {\n\t\tv := s.AuthorizationScopes\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"authorizationScopes\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif len(s.AuthorizationType) > 0 {\n\t\tv := s.AuthorizationType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"authorizationType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.AuthorizerId != nil {\n\t\tv := *s.AuthorizerId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"authorizerId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ModelSelectionExpression != nil {\n\t\tv := *s.ModelSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"modelSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.OperationName != nil {\n\t\tv := *s.OperationName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"operationName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.RequestModels) > 0 {\n\t\tv := s.RequestModels\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestModels\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif len(s.RequestParameters) > 0 {\n\t\tv := s.RequestParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetFields(k1, v1)\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.RouteKey != nil {\n\t\tv := *s.RouteKey\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeKey\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteResponseSelectionExpression != nil {\n\t\tv := *s.RouteResponseSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeResponseSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Target != nil {\n\t\tv := *s.Target\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"target\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s Api) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ApiEndpoint != nil {\n\t\tv := *s.ApiEndpoint\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiEndpoint\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiKeySelectionExpression != nil {\n\t\tv := *s.ApiKeySelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiKeySelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.CorsConfiguration != nil {\n\t\tv := s.CorsConfiguration\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"corsConfiguration\", v, metadata)\n\t}\n\tif s.CreatedDate != nil {\n\t\tv := *s.CreatedDate\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"createdDate\",\n\t\t\tprotocol.TimeValue{V: v, Format: \"iso8601\", QuotedFormatTime: true}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.DisableSchemaValidation != nil {\n\t\tv := *s.DisableSchemaValidation\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"disableSchemaValidation\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.ImportInfo != nil {\n\t\tv := s.ImportInfo\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"importInfo\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.ProtocolType) > 0 {\n\t\tv := s.ProtocolType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"protocolType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.RouteSelectionExpression != nil {\n\t\tv := *s.RouteSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Tags != nil {\n\t\tv := s.Tags\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"tags\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.Version != nil {\n\t\tv := *s.Version\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"version\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Warnings != nil {\n\t\tv := s.Warnings\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"warnings\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\treturn nil\n}", "func (s OutputService15TestShapeItemDetailShape) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ID != nil {\n\t\tv := *s.ID\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"ID\", protocol.StringValue(v), metadata)\n\t}\n\t// Skipping Type XML Attribute.\n\treturn nil\n}", "func (s CreateProxySessionInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.Capabilities != nil {\n\t\tv := s.Capabilities\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"Capabilities\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.ExpiryMinutes != nil {\n\t\tv := *s.ExpiryMinutes\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"ExpiryMinutes\", protocol.Int64Value(v), metadata)\n\t}\n\tif len(s.GeoMatchLevel) > 0 {\n\t\tv := s.GeoMatchLevel\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"GeoMatchLevel\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.GeoMatchParams != nil {\n\t\tv := s.GeoMatchParams\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"GeoMatchParams\", v, metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.NumberSelectionBehavior) > 0 {\n\t\tv := s.NumberSelectionBehavior\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"NumberSelectionBehavior\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.ParticipantPhoneNumbers != nil {\n\t\tv := s.ParticipantPhoneNumbers\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"ParticipantPhoneNumbers\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.VoiceConnectorId != nil {\n\t\tv := *s.VoiceConnectorId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"voiceConnectorId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s UpdateRouteOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ApiKeyRequired != nil {\n\t\tv := *s.ApiKeyRequired\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiKeyRequired\", protocol.BoolValue(v), metadata)\n\t}\n\tif len(s.AuthorizationScopes) > 0 {\n\t\tv := s.AuthorizationScopes\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"authorizationScopes\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif len(s.AuthorizationType) > 0 {\n\t\tv := s.AuthorizationType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"authorizationType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.AuthorizerId != nil {\n\t\tv := *s.AuthorizerId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"authorizerId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ModelSelectionExpression != nil {\n\t\tv := *s.ModelSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"modelSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.OperationName != nil {\n\t\tv := *s.OperationName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"operationName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.RequestModels) > 0 {\n\t\tv := s.RequestModels\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestModels\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif len(s.RequestParameters) > 0 {\n\t\tv := s.RequestParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetFields(k1, v1)\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.RouteId != nil {\n\t\tv := *s.RouteId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteKey != nil {\n\t\tv := *s.RouteKey\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeKey\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteResponseSelectionExpression != nil {\n\t\tv := *s.RouteResponseSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeResponseSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Target != nil {\n\t\tv := *s.Target\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"target\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s UpdateStageInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.AccessLogSettings != nil {\n\t\tv := s.AccessLogSettings\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"accessLogSettings\", v, metadata)\n\t}\n\tif s.ClientCertificateId != nil {\n\t\tv := *s.ClientCertificateId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"clientCertificateId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.DefaultRouteSettings != nil {\n\t\tv := s.DefaultRouteSettings\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"defaultRouteSettings\", v, metadata)\n\t}\n\tif s.DeploymentId != nil {\n\t\tv := *s.DeploymentId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"deploymentId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.RouteSettings) > 0 {\n\t\tv := s.RouteSettings\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"routeSettings\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetFields(k1, v1)\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif len(s.StageVariables) > 0 {\n\t\tv := s.StageVariables\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"stageVariables\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.StageName != nil {\n\t\tv := *s.StageName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"stageName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s VirtualGatewaySpec) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.BackendDefaults != nil {\n\t\tv := s.BackendDefaults\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"backendDefaults\", v, metadata)\n\t}\n\tif s.Listeners != nil {\n\t\tv := s.Listeners\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"listeners\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddFields(v1)\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.Logging != nil {\n\t\tv := s.Logging\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"logging\", v, metadata)\n\t}\n\treturn nil\n}", "func (s UpdateApiInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.ApiKeySelectionExpression != nil {\n\t\tv := *s.ApiKeySelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiKeySelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.DisableSchemaValidation != nil {\n\t\tv := *s.DisableSchemaValidation\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"disableSchemaValidation\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteSelectionExpression != nil {\n\t\tv := *s.RouteSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Version != nil {\n\t\tv := *s.Version\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"version\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreateIntegrationOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ConnectionId != nil {\n\t\tv := *s.ConnectionId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"connectionId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.ConnectionType) > 0 {\n\t\tv := s.ConnectionType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"connectionType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif len(s.ContentHandlingStrategy) > 0 {\n\t\tv := s.ContentHandlingStrategy\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"contentHandlingStrategy\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.CredentialsArn != nil {\n\t\tv := *s.CredentialsArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"credentialsArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IntegrationId != nil {\n\t\tv := *s.IntegrationId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IntegrationMethod != nil {\n\t\tv := *s.IntegrationMethod\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationMethod\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IntegrationResponseSelectionExpression != nil {\n\t\tv := *s.IntegrationResponseSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationResponseSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.IntegrationType) > 0 {\n\t\tv := s.IntegrationType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.IntegrationUri != nil {\n\t\tv := *s.IntegrationUri\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationUri\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.PassthroughBehavior) > 0 {\n\t\tv := s.PassthroughBehavior\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"passthroughBehavior\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif len(s.RequestParameters) > 0 {\n\t\tv := s.RequestParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif len(s.RequestTemplates) > 0 {\n\t\tv := s.RequestTemplates\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestTemplates\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.TemplateSelectionExpression != nil {\n\t\tv := *s.TemplateSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"templateSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.TimeoutInMillis != nil {\n\t\tv := *s.TimeoutInMillis\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"timeoutInMillis\", protocol.Int64Value(v), metadata)\n\t}\n\treturn nil\n}", "func (s UpdateSignalingChannelInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.ChannelARN != nil {\n\t\tv := *s.ChannelARN\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"ChannelARN\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.CurrentVersion != nil {\n\t\tv := *s.CurrentVersion\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"CurrentVersion\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.SingleMasterConfiguration != nil {\n\t\tv := s.SingleMasterConfiguration\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"SingleMasterConfiguration\", v, metadata)\n\t}\n\treturn nil\n}", "func (s DescribeDetectorInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.DetectorModelName != nil {\n\t\tv := *s.DetectorModelName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"detectorModelName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.KeyValue != nil {\n\t\tv := *s.KeyValue\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.QueryTarget, \"keyValue\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreateApiMappingInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiMappingKey != nil {\n\t\tv := *s.ApiMappingKey\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiMappingKey\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Stage != nil {\n\t\tv := *s.Stage\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"stage\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.DomainName != nil {\n\t\tv := *s.DomainName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"domainName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreateThingInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.AttributePayload != nil {\n\t\tv := s.AttributePayload\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"attributePayload\", v, metadata)\n\t}\n\tif s.BillingGroupName != nil {\n\t\tv := *s.BillingGroupName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"billingGroupName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ThingTypeName != nil {\n\t\tv := *s.ThingTypeName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"thingTypeName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ThingName != nil {\n\t\tv := *s.ThingName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"thingName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func encodeSchema(w io.Writer, s *schema.Schema) (err error) {\n\tif s == nil {\n\t\treturn\n\t}\n\n\tew := errWriter{w: w}\n\tif s.Description != \"\" {\n\t\tew.writeFormat(`\"description\": %q, `, s.Description)\n\t}\n\tew.writeString(`\"type\": \"object\", `)\n\tew.writeString(`\"additionalProperties\": false, `)\n\tew.writeString(`\"properties\": {`)\n\tvar required []string\n\tvar notFirst bool\n\tfor _, key := range sortedFieldNames(s.Fields) {\n\t\tfield := s.Fields[key]\n\t\tif notFirst {\n\t\t\tew.writeString(\", \")\n\t\t}\n\t\tnotFirst = true\n\t\tif field.Required {\n\t\t\trequired = append(required, fmt.Sprintf(\"%q\", key))\n\t\t}\n\t\tew.err = encodeField(ew, key, field)\n\t\tif ew.err != nil {\n\t\t\treturn ew.err\n\t\t}\n\t}\n\tew.writeString(\"}\")\n\tif s.MinLen > 0 {\n\t\tew.writeFormat(`, \"minProperties\": %s`, strconv.FormatInt(int64(s.MinLen), 10))\n\t}\n\tif s.MaxLen > 0 {\n\t\tew.writeFormat(`, \"maxProperties\": %s`, strconv.FormatInt(int64(s.MaxLen), 10))\n\t}\n\n\tif len(required) > 0 {\n\t\tew.writeFormat(`, \"required\": [%s]`, strings.Join(required, \", \"))\n\t}\n\treturn ew.err\n}", "func (s OutputService15TestShapeItemShape) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ItemDetail != nil {\n\t\tv := s.ItemDetail\n\t\tattrs := make([]protocol.Attribute, 0, 1)\n\n\t\tif len(s.ItemDetail.Type) > 0 {\n\n\t\t\tv := s.ItemDetail.Type\n\t\t\tattrs = append(attrs, protocol.Attribute{Name: \"xsi:type\", Value: v, Meta: protocol.Metadata{}})\n\t\t}\n\t\tmetadata := protocol.Metadata{Attributes: attrs, XMLNamespacePrefix: \"xsi\", XMLNamespaceURI: \"http://www.w3.org/2001/XMLSchema-instance\"}\n\t\te.SetFields(protocol.BodyTarget, \"ItemDetail\", v, metadata)\n\t}\n\treturn nil\n}", "func (s UpdateApiMappingInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiMappingKey != nil {\n\t\tv := *s.ApiMappingKey\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiMappingKey\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Stage != nil {\n\t\tv := *s.Stage\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"stage\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiMappingId != nil {\n\t\tv := *s.ApiMappingId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"apiMappingId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.DomainName != nil {\n\t\tv := *s.DomainName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"domainName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s VirtualServiceRef) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.Arn != nil {\n\t\tv := *s.Arn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"arn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.CreatedAt != nil {\n\t\tv := *s.CreatedAt\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"createdAt\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.LastUpdatedAt != nil {\n\t\tv := *s.LastUpdatedAt\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"lastUpdatedAt\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.MeshName != nil {\n\t\tv := *s.MeshName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"meshName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.MeshOwner != nil {\n\t\tv := *s.MeshOwner\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"meshOwner\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ResourceOwner != nil {\n\t\tv := *s.ResourceOwner\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"resourceOwner\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Version != nil {\n\t\tv := *s.Version\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"version\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.VirtualServiceName != nil {\n\t\tv := *s.VirtualServiceName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"virtualServiceName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s UpdateIntegrationOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ConnectionId != nil {\n\t\tv := *s.ConnectionId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"connectionId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.ConnectionType) > 0 {\n\t\tv := s.ConnectionType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"connectionType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif len(s.ContentHandlingStrategy) > 0 {\n\t\tv := s.ContentHandlingStrategy\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"contentHandlingStrategy\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.CredentialsArn != nil {\n\t\tv := *s.CredentialsArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"credentialsArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IntegrationId != nil {\n\t\tv := *s.IntegrationId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IntegrationMethod != nil {\n\t\tv := *s.IntegrationMethod\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationMethod\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IntegrationResponseSelectionExpression != nil {\n\t\tv := *s.IntegrationResponseSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationResponseSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.IntegrationType) > 0 {\n\t\tv := s.IntegrationType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.IntegrationUri != nil {\n\t\tv := *s.IntegrationUri\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationUri\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.PassthroughBehavior) > 0 {\n\t\tv := s.PassthroughBehavior\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"passthroughBehavior\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif len(s.RequestParameters) > 0 {\n\t\tv := s.RequestParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif len(s.RequestTemplates) > 0 {\n\t\tv := s.RequestTemplates\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestTemplates\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.TemplateSelectionExpression != nil {\n\t\tv := *s.TemplateSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"templateSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.TimeoutInMillis != nil {\n\t\tv := *s.TimeoutInMillis\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"timeoutInMillis\", protocol.Int64Value(v), metadata)\n\t}\n\treturn nil\n}", "func (s UpdateIPSetInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.Activate != nil {\n\t\tv := *s.Activate\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"activate\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.Location != nil {\n\t\tv := *s.Location\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"location\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.DetectorId != nil {\n\t\tv := *s.DetectorId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"detectorId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IpSetId != nil {\n\t\tv := *s.IpSetId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"ipSetId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreateStageInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.AccessLogSettings != nil {\n\t\tv := s.AccessLogSettings\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"accessLogSettings\", v, metadata)\n\t}\n\tif s.ClientCertificateId != nil {\n\t\tv := *s.ClientCertificateId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"clientCertificateId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.DefaultRouteSettings != nil {\n\t\tv := s.DefaultRouteSettings\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"defaultRouteSettings\", v, metadata)\n\t}\n\tif s.DeploymentId != nil {\n\t\tv := *s.DeploymentId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"deploymentId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.RouteSettings) > 0 {\n\t\tv := s.RouteSettings\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"routeSettings\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetFields(k1, v1)\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.StageName != nil {\n\t\tv := *s.StageName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"stageName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.StageVariables) > 0 {\n\t\tv := s.StageVariables\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"stageVariables\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s GetSigningPlatformInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.PlatformId != nil {\n\t\tv := *s.PlatformId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"platformId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s Robot) MarshalFields(e protocol.FieldEncoder) error {\n\tif len(s.Architecture) > 0 {\n\t\tv := s.Architecture\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"architecture\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.Arn != nil {\n\t\tv := *s.Arn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"arn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.CreatedAt != nil {\n\t\tv := *s.CreatedAt\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"createdAt\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.FleetArn != nil {\n\t\tv := *s.FleetArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"fleetArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.GreenGrassGroupId != nil {\n\t\tv := *s.GreenGrassGroupId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"greenGrassGroupId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.LastDeploymentJob != nil {\n\t\tv := *s.LastDeploymentJob\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"lastDeploymentJob\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.LastDeploymentTime != nil {\n\t\tv := *s.LastDeploymentTime\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"lastDeploymentTime\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.Status) > 0 {\n\t\tv := s.Status\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"status\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\treturn nil\n}", "func (s UpdateIntegrationInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.ConnectionId != nil {\n\t\tv := *s.ConnectionId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"connectionId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.ConnectionType) > 0 {\n\t\tv := s.ConnectionType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"connectionType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif len(s.ContentHandlingStrategy) > 0 {\n\t\tv := s.ContentHandlingStrategy\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"contentHandlingStrategy\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.CredentialsArn != nil {\n\t\tv := *s.CredentialsArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"credentialsArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IntegrationMethod != nil {\n\t\tv := *s.IntegrationMethod\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationMethod\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.IntegrationType) > 0 {\n\t\tv := s.IntegrationType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.IntegrationUri != nil {\n\t\tv := *s.IntegrationUri\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationUri\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.PassthroughBehavior) > 0 {\n\t\tv := s.PassthroughBehavior\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"passthroughBehavior\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif len(s.RequestParameters) > 0 {\n\t\tv := s.RequestParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif len(s.RequestTemplates) > 0 {\n\t\tv := s.RequestTemplates\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestTemplates\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.TemplateSelectionExpression != nil {\n\t\tv := *s.TemplateSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"templateSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.TimeoutInMillis != nil {\n\t\tv := *s.TimeoutInMillis\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"timeoutInMillis\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IntegrationId != nil {\n\t\tv := *s.IntegrationId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"integrationId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s DescribeInputDeviceOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.Arn != nil {\n\t\tv := *s.Arn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"arn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.ConnectionState) > 0 {\n\t\tv := s.ConnectionState\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"connectionState\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif len(s.DeviceSettingsSyncState) > 0 {\n\t\tv := s.DeviceSettingsSyncState\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"deviceSettingsSyncState\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.HdDeviceSettings != nil {\n\t\tv := s.HdDeviceSettings\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"hdDeviceSettings\", v, metadata)\n\t}\n\tif s.Id != nil {\n\t\tv := *s.Id\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"id\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.MacAddress != nil {\n\t\tv := *s.MacAddress\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"macAddress\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.NetworkSettings != nil {\n\t\tv := s.NetworkSettings\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"networkSettings\", v, metadata)\n\t}\n\tif s.SerialNumber != nil {\n\t\tv := *s.SerialNumber\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"serialNumber\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.Type) > 0 {\n\t\tv := s.Type\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"type\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\treturn nil\n}", "func (s CreateAliasInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.FunctionVersion != nil {\n\t\tv := *s.FunctionVersion\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"FunctionVersion\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RoutingConfig != nil {\n\t\tv := s.RoutingConfig\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"RoutingConfig\", v, metadata)\n\t}\n\tif s.FunctionName != nil {\n\t\tv := *s.FunctionName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"FunctionName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreateImageInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tvar ClientToken string\n\tif s.ClientToken != nil {\n\t\tClientToken = *s.ClientToken\n\t} else {\n\t\tClientToken = protocol.GetIdempotencyToken()\n\t}\n\t{\n\t\tv := ClientToken\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"clientToken\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.DistributionConfigurationArn != nil {\n\t\tv := *s.DistributionConfigurationArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"distributionConfigurationArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.EnhancedImageMetadataEnabled != nil {\n\t\tv := *s.EnhancedImageMetadataEnabled\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"enhancedImageMetadataEnabled\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.ImageRecipeArn != nil {\n\t\tv := *s.ImageRecipeArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"imageRecipeArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ImageTestsConfiguration != nil {\n\t\tv := s.ImageTestsConfiguration\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"imageTestsConfiguration\", v, metadata)\n\t}\n\tif s.InfrastructureConfigurationArn != nil {\n\t\tv := *s.InfrastructureConfigurationArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"infrastructureConfigurationArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Tags != nil {\n\t\tv := s.Tags\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"tags\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\treturn nil\n}", "func (s VpcLink) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.CreatedDate != nil {\n\t\tv := *s.CreatedDate\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"createdDate\",\n\t\t\tprotocol.TimeValue{V: v, Format: \"iso8601\", QuotedFormatTime: true}, metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.SecurityGroupIds != nil {\n\t\tv := s.SecurityGroupIds\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"securityGroupIds\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.SubnetIds != nil {\n\t\tv := s.SubnetIds\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"subnetIds\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.Tags != nil {\n\t\tv := s.Tags\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"tags\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.VpcLinkId != nil {\n\t\tv := *s.VpcLinkId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"vpcLinkId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.VpcLinkStatus) > 0 {\n\t\tv := s.VpcLinkStatus\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"vpcLinkStatus\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.VpcLinkStatusMessage != nil {\n\t\tv := *s.VpcLinkStatusMessage\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"vpcLinkStatusMessage\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.VpcLinkVersion) > 0 {\n\t\tv := s.VpcLinkVersion\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"vpcLinkVersion\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\treturn nil\n}", "func (s Route) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ApiGatewayManaged != nil {\n\t\tv := *s.ApiGatewayManaged\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiGatewayManaged\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.ApiKeyRequired != nil {\n\t\tv := *s.ApiKeyRequired\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiKeyRequired\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.AuthorizationScopes != nil {\n\t\tv := s.AuthorizationScopes\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"authorizationScopes\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif len(s.AuthorizationType) > 0 {\n\t\tv := s.AuthorizationType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"authorizationType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.AuthorizerId != nil {\n\t\tv := *s.AuthorizerId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"authorizerId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ModelSelectionExpression != nil {\n\t\tv := *s.ModelSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"modelSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.OperationName != nil {\n\t\tv := *s.OperationName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"operationName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RequestModels != nil {\n\t\tv := s.RequestModels\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestModels\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.RequestParameters != nil {\n\t\tv := s.RequestParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetFields(k1, v1)\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.RouteId != nil {\n\t\tv := *s.RouteId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteKey != nil {\n\t\tv := *s.RouteKey\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeKey\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteResponseSelectionExpression != nil {\n\t\tv := *s.RouteResponseSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeResponseSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Target != nil {\n\t\tv := *s.Target\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"target\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s Integration) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ApiGatewayManaged != nil {\n\t\tv := *s.ApiGatewayManaged\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiGatewayManaged\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.ConnectionId != nil {\n\t\tv := *s.ConnectionId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"connectionId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.ConnectionType) > 0 {\n\t\tv := s.ConnectionType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"connectionType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif len(s.ContentHandlingStrategy) > 0 {\n\t\tv := s.ContentHandlingStrategy\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"contentHandlingStrategy\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.CredentialsArn != nil {\n\t\tv := *s.CredentialsArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"credentialsArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IntegrationId != nil {\n\t\tv := *s.IntegrationId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IntegrationMethod != nil {\n\t\tv := *s.IntegrationMethod\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationMethod\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IntegrationResponseSelectionExpression != nil {\n\t\tv := *s.IntegrationResponseSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationResponseSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.IntegrationType) > 0 {\n\t\tv := s.IntegrationType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.IntegrationUri != nil {\n\t\tv := *s.IntegrationUri\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationUri\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.PassthroughBehavior) > 0 {\n\t\tv := s.PassthroughBehavior\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"passthroughBehavior\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.PayloadFormatVersion != nil {\n\t\tv := *s.PayloadFormatVersion\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"payloadFormatVersion\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RequestParameters != nil {\n\t\tv := s.RequestParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.RequestTemplates != nil {\n\t\tv := s.RequestTemplates\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestTemplates\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.TemplateSelectionExpression != nil {\n\t\tv := *s.TemplateSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"templateSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.TimeoutInMillis != nil {\n\t\tv := *s.TimeoutInMillis\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"timeoutInMillis\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.TlsConfig != nil {\n\t\tv := s.TlsConfig\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"tlsConfig\", v, metadata)\n\t}\n\treturn nil\n}", "func (s UpdateModelInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.ContentType != nil {\n\t\tv := *s.ContentType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"contentType\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Schema != nil {\n\t\tv := *s.Schema\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"schema\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ModelId != nil {\n\t\tv := *s.ModelId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"modelId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreateOTAUpdateInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.AdditionalParameters != nil {\n\t\tv := s.AdditionalParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"additionalParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.AwsJobExecutionsRolloutConfig != nil {\n\t\tv := s.AwsJobExecutionsRolloutConfig\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"awsJobExecutionsRolloutConfig\", v, metadata)\n\t}\n\tif s.AwsJobPresignedUrlConfig != nil {\n\t\tv := s.AwsJobPresignedUrlConfig\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"awsJobPresignedUrlConfig\", v, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Files != nil {\n\t\tv := s.Files\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"files\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddFields(v1)\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.Protocols != nil {\n\t\tv := s.Protocols\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"protocols\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.RoleArn != nil {\n\t\tv := *s.RoleArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"roleArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Tags != nil {\n\t\tv := s.Tags\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"tags\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddFields(v1)\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif len(s.TargetSelection) > 0 {\n\t\tv := s.TargetSelection\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"targetSelection\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.Targets != nil {\n\t\tv := s.Targets\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"targets\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.OtaUpdateId != nil {\n\t\tv := *s.OtaUpdateId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"otaUpdateId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreateIntegrationInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.ConnectionId != nil {\n\t\tv := *s.ConnectionId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"connectionId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.ConnectionType) > 0 {\n\t\tv := s.ConnectionType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"connectionType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif len(s.ContentHandlingStrategy) > 0 {\n\t\tv := s.ContentHandlingStrategy\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"contentHandlingStrategy\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.CredentialsArn != nil {\n\t\tv := *s.CredentialsArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"credentialsArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IntegrationMethod != nil {\n\t\tv := *s.IntegrationMethod\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationMethod\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.IntegrationType) > 0 {\n\t\tv := s.IntegrationType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.IntegrationUri != nil {\n\t\tv := *s.IntegrationUri\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationUri\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.PassthroughBehavior) > 0 {\n\t\tv := s.PassthroughBehavior\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"passthroughBehavior\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif len(s.RequestParameters) > 0 {\n\t\tv := s.RequestParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif len(s.RequestTemplates) > 0 {\n\t\tv := s.RequestTemplates\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestTemplates\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.TemplateSelectionExpression != nil {\n\t\tv := *s.TemplateSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"templateSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.TimeoutInMillis != nil {\n\t\tv := *s.TimeoutInMillis\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"timeoutInMillis\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreateApiKeyInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Expires != nil {\n\t\tv := *s.Expires\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"expires\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreateBranchInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.BackendEnvironmentArn != nil {\n\t\tv := *s.BackendEnvironmentArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"backendEnvironmentArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.BasicAuthCredentials != nil {\n\t\tv := *s.BasicAuthCredentials\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"basicAuthCredentials\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.BranchName != nil {\n\t\tv := *s.BranchName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"branchName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.BuildSpec != nil {\n\t\tv := *s.BuildSpec\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"buildSpec\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.DisplayName != nil {\n\t\tv := *s.DisplayName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"displayName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.EnableAutoBuild != nil {\n\t\tv := *s.EnableAutoBuild\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"enableAutoBuild\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.EnableBasicAuth != nil {\n\t\tv := *s.EnableBasicAuth\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"enableBasicAuth\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.EnableNotification != nil {\n\t\tv := *s.EnableNotification\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"enableNotification\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.EnablePullRequestPreview != nil {\n\t\tv := *s.EnablePullRequestPreview\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"enablePullRequestPreview\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.EnvironmentVariables != nil {\n\t\tv := s.EnvironmentVariables\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"environmentVariables\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.Framework != nil {\n\t\tv := *s.Framework\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"framework\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.PullRequestEnvironmentName != nil {\n\t\tv := *s.PullRequestEnvironmentName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"pullRequestEnvironmentName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.Stage) > 0 {\n\t\tv := s.Stage\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"stage\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.Tags != nil {\n\t\tv := s.Tags\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"tags\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.Ttl != nil {\n\t\tv := *s.Ttl\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"ttl\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.AppId != nil {\n\t\tv := *s.AppId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"appId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s UpdateRouteResponseInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.ModelSelectionExpression != nil {\n\t\tv := *s.ModelSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"modelSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.ResponseModels) > 0 {\n\t\tv := s.ResponseModels\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"responseModels\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif len(s.ResponseParameters) > 0 {\n\t\tv := s.ResponseParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"responseParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetFields(k1, v1)\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.RouteResponseKey != nil {\n\t\tv := *s.RouteResponseKey\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeResponseKey\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteId != nil {\n\t\tv := *s.RouteId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"routeId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteResponseId != nil {\n\t\tv := *s.RouteResponseId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"routeResponseId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreatePackageInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.PackageDescription != nil {\n\t\tv := *s.PackageDescription\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"PackageDescription\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.PackageName != nil {\n\t\tv := *s.PackageName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"PackageName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.PackageSource != nil {\n\t\tv := s.PackageSource\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"PackageSource\", v, metadata)\n\t}\n\tif len(s.PackageType) > 0 {\n\t\tv := s.PackageType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"PackageType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\treturn nil\n}", "func (s UpdatePipelineOutput) MarshalFields(e protocol.FieldEncoder) error {\n\treturn nil\n}", "func (s OutputService11TestShapeOutputService11TestCaseOperation1Output) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.Char != nil {\n\t\tv := *s.Char\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"x-char\", protocol.StringValue(v), metadata)\n\t}\n\tif s.Double != nil {\n\t\tv := *s.Double\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"x-double\", protocol.Float64Value(v), metadata)\n\t}\n\tif s.FalseBool != nil {\n\t\tv := *s.FalseBool\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"x-false-bool\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.Float != nil {\n\t\tv := *s.Float\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"x-float\", protocol.Float64Value(v), metadata)\n\t}\n\tif s.Integer != nil {\n\t\tv := *s.Integer\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"x-int\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.Long != nil {\n\t\tv := *s.Long\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"x-long\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.Str != nil {\n\t\tv := *s.Str\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"x-str\", protocol.StringValue(v), metadata)\n\t}\n\tif s.Timestamp != nil {\n\t\tv := *s.Timestamp\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"x-timestamp\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.RFC822TimeFormatName, QuotedFormatTime: false}, metadata)\n\t}\n\tif s.TrueBool != nil {\n\t\tv := *s.TrueBool\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"x-true-bool\", protocol.BoolValue(v), metadata)\n\t}\n\treturn nil\n}", "func (s CreateRouteResponseInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.ModelSelectionExpression != nil {\n\t\tv := *s.ModelSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"modelSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.ResponseModels) > 0 {\n\t\tv := s.ResponseModels\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"responseModels\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif len(s.ResponseParameters) > 0 {\n\t\tv := s.ResponseParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"responseParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetFields(k1, v1)\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.RouteResponseKey != nil {\n\t\tv := *s.RouteResponseKey\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeResponseKey\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteId != nil {\n\t\tv := *s.RouteId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"routeId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreateAppInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.BasicAuthCredentials != nil {\n\t\tv := *s.BasicAuthCredentials\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"basicAuthCredentials\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.BuildSpec != nil {\n\t\tv := *s.BuildSpec\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"buildSpec\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.CustomRules) > 0 {\n\t\tv := s.CustomRules\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"customRules\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddFields(v1)\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.EnableBasicAuth != nil {\n\t\tv := *s.EnableBasicAuth\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"enableBasicAuth\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.EnableBranchAutoBuild != nil {\n\t\tv := *s.EnableBranchAutoBuild\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"enableBranchAutoBuild\", protocol.BoolValue(v), metadata)\n\t}\n\tif len(s.EnvironmentVariables) > 0 {\n\t\tv := s.EnvironmentVariables\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"environmentVariables\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.IamServiceRoleArn != nil {\n\t\tv := *s.IamServiceRoleArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"iamServiceRoleArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.OauthToken != nil {\n\t\tv := *s.OauthToken\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"oauthToken\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.Platform) > 0 {\n\t\tv := s.Platform\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"platform\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.Repository != nil {\n\t\tv := *s.Repository\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"repository\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.Tags) > 0 {\n\t\tv := s.Tags\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"tags\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\treturn nil\n}", "func (s GetApiInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreateBranchInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.BasicAuthCredentials != nil {\n\t\tv := *s.BasicAuthCredentials\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"basicAuthCredentials\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.BranchName != nil {\n\t\tv := *s.BranchName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"branchName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.BuildSpec != nil {\n\t\tv := *s.BuildSpec\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"buildSpec\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.EnableAutoBuild != nil {\n\t\tv := *s.EnableAutoBuild\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"enableAutoBuild\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.EnableBasicAuth != nil {\n\t\tv := *s.EnableBasicAuth\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"enableBasicAuth\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.EnableNotification != nil {\n\t\tv := *s.EnableNotification\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"enableNotification\", protocol.BoolValue(v), metadata)\n\t}\n\tif len(s.EnvironmentVariables) > 0 {\n\t\tv := s.EnvironmentVariables\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"environmentVariables\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.Framework != nil {\n\t\tv := *s.Framework\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"framework\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.Stage) > 0 {\n\t\tv := s.Stage\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"stage\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif len(s.Tags) > 0 {\n\t\tv := s.Tags\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"tags\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.Ttl != nil {\n\t\tv := *s.Ttl\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"ttl\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.AppId != nil {\n\t\tv := *s.AppId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"appId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreateApiMappingOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiMappingId != nil {\n\t\tv := *s.ApiMappingId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiMappingId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiMappingKey != nil {\n\t\tv := *s.ApiMappingKey\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiMappingKey\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Stage != nil {\n\t\tv := *s.Stage\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"stage\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s Integration) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ConnectionId != nil {\n\t\tv := *s.ConnectionId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"connectionId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.ConnectionType) > 0 {\n\t\tv := s.ConnectionType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"connectionType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif len(s.ContentHandlingStrategy) > 0 {\n\t\tv := s.ContentHandlingStrategy\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"contentHandlingStrategy\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.CredentialsArn != nil {\n\t\tv := *s.CredentialsArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"credentialsArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IntegrationId != nil {\n\t\tv := *s.IntegrationId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IntegrationMethod != nil {\n\t\tv := *s.IntegrationMethod\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationMethod\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IntegrationResponseSelectionExpression != nil {\n\t\tv := *s.IntegrationResponseSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationResponseSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.IntegrationType) > 0 {\n\t\tv := s.IntegrationType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.IntegrationUri != nil {\n\t\tv := *s.IntegrationUri\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationUri\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.PassthroughBehavior) > 0 {\n\t\tv := s.PassthroughBehavior\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"passthroughBehavior\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif len(s.RequestParameters) > 0 {\n\t\tv := s.RequestParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif len(s.RequestTemplates) > 0 {\n\t\tv := s.RequestTemplates\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestTemplates\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.TemplateSelectionExpression != nil {\n\t\tv := *s.TemplateSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"templateSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.TimeoutInMillis != nil {\n\t\tv := *s.TimeoutInMillis\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"timeoutInMillis\", protocol.Int64Value(v), metadata)\n\t}\n\treturn nil\n}", "func (s VirtualNodeSpec) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.BackendDefaults != nil {\n\t\tv := s.BackendDefaults\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"backendDefaults\", v, metadata)\n\t}\n\tif s.Backends != nil {\n\t\tv := s.Backends\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"backends\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddFields(v1)\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.Listeners != nil {\n\t\tv := s.Listeners\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"listeners\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddFields(v1)\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.Logging != nil {\n\t\tv := s.Logging\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"logging\", v, metadata)\n\t}\n\tif s.ServiceDiscovery != nil {\n\t\tv := s.ServiceDiscovery\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"serviceDiscovery\", v, metadata)\n\t}\n\treturn nil\n}", "func (s NetworkPathComponent) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ComponentId != nil {\n\t\tv := *s.ComponentId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"ComponentId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ComponentType != nil {\n\t\tv := *s.ComponentType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"ComponentType\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Egress != nil {\n\t\tv := s.Egress\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"Egress\", v, metadata)\n\t}\n\tif s.Ingress != nil {\n\t\tv := s.Ingress\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"Ingress\", v, metadata)\n\t}\n\treturn nil\n}", "func (s UpdateBranchInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.BasicAuthCredentials != nil {\n\t\tv := *s.BasicAuthCredentials\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"basicAuthCredentials\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.BuildSpec != nil {\n\t\tv := *s.BuildSpec\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"buildSpec\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.EnableAutoBuild != nil {\n\t\tv := *s.EnableAutoBuild\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"enableAutoBuild\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.EnableBasicAuth != nil {\n\t\tv := *s.EnableBasicAuth\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"enableBasicAuth\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.EnableNotification != nil {\n\t\tv := *s.EnableNotification\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"enableNotification\", protocol.BoolValue(v), metadata)\n\t}\n\tif len(s.EnvironmentVariables) > 0 {\n\t\tv := s.EnvironmentVariables\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"environmentVariables\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.Framework != nil {\n\t\tv := *s.Framework\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"framework\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.Stage) > 0 {\n\t\tv := s.Stage\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"stage\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.Ttl != nil {\n\t\tv := *s.Ttl\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"ttl\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.AppId != nil {\n\t\tv := *s.AppId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"appId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.BranchName != nil {\n\t\tv := *s.BranchName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"branchName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s OutputService13TestShapeTimeContainer) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.Bar != nil {\n\t\tv := *s.Bar\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"bar\",\n\t\t\tprotocol.TimeValue{V: v, Format: \"unixTimestamp\", QuotedFormatTime: false}, metadata)\n\t}\n\tif s.Foo != nil {\n\t\tv := *s.Foo\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"foo\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.ISO8601TimeFormatName, QuotedFormatTime: false}, metadata)\n\t}\n\treturn nil\n}", "func (s ImportComponentInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.ChangeDescription != nil {\n\t\tv := *s.ChangeDescription\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"changeDescription\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tvar ClientToken string\n\tif s.ClientToken != nil {\n\t\tClientToken = *s.ClientToken\n\t} else {\n\t\tClientToken = protocol.GetIdempotencyToken()\n\t}\n\t{\n\t\tv := ClientToken\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"clientToken\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Data != nil {\n\t\tv := *s.Data\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"data\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.Format) > 0 {\n\t\tv := s.Format\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"format\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.KmsKeyId != nil {\n\t\tv := *s.KmsKeyId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"kmsKeyId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.Platform) > 0 {\n\t\tv := s.Platform\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"platform\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.SemanticVersion != nil {\n\t\tv := *s.SemanticVersion\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"semanticVersion\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Tags != nil {\n\t\tv := s.Tags\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"tags\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif len(s.Type) > 0 {\n\t\tv := s.Type\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"type\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.Uri != nil {\n\t\tv := *s.Uri\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"uri\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s Route) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ApiKeyRequired != nil {\n\t\tv := *s.ApiKeyRequired\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiKeyRequired\", protocol.BoolValue(v), metadata)\n\t}\n\tif len(s.AuthorizationScopes) > 0 {\n\t\tv := s.AuthorizationScopes\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"authorizationScopes\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif len(s.AuthorizationType) > 0 {\n\t\tv := s.AuthorizationType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"authorizationType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.AuthorizerId != nil {\n\t\tv := *s.AuthorizerId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"authorizerId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ModelSelectionExpression != nil {\n\t\tv := *s.ModelSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"modelSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.OperationName != nil {\n\t\tv := *s.OperationName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"operationName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.RequestModels) > 0 {\n\t\tv := s.RequestModels\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestModels\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif len(s.RequestParameters) > 0 {\n\t\tv := s.RequestParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetFields(k1, v1)\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.RouteId != nil {\n\t\tv := *s.RouteId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteKey != nil {\n\t\tv := *s.RouteKey\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeKey\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteResponseSelectionExpression != nil {\n\t\tv := *s.RouteResponseSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeResponseSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Target != nil {\n\t\tv := *s.Target\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"target\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CodeReview) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.CodeReviewArn != nil {\n\t\tv := *s.CodeReviewArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"CodeReviewArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.CreatedTimeStamp != nil {\n\t\tv := *s.CreatedTimeStamp\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"CreatedTimeStamp\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.LastUpdatedTimeStamp != nil {\n\t\tv := *s.LastUpdatedTimeStamp\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"LastUpdatedTimeStamp\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.Metrics != nil {\n\t\tv := s.Metrics\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"Metrics\", v, metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Owner != nil {\n\t\tv := *s.Owner\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Owner\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.ProviderType) > 0 {\n\t\tv := s.ProviderType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"ProviderType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.PullRequestId != nil {\n\t\tv := *s.PullRequestId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"PullRequestId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RepositoryName != nil {\n\t\tv := *s.RepositoryName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"RepositoryName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.SourceCodeType != nil {\n\t\tv := s.SourceCodeType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"SourceCodeType\", v, metadata)\n\t}\n\tif len(s.State) > 0 {\n\t\tv := s.State\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"State\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.StateReason != nil {\n\t\tv := *s.StateReason\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"StateReason\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.Type) > 0 {\n\t\tv := s.Type\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Type\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\treturn nil\n}", "func (s PutObjectOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ContentSHA256 != nil {\n\t\tv := *s.ContentSHA256\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"ContentSHA256\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ETag != nil {\n\t\tv := *s.ETag\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"ETag\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.StorageClass) > 0 {\n\t\tv := s.StorageClass\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"StorageClass\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\treturn nil\n}", "func (s GetRouteOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ApiKeyRequired != nil {\n\t\tv := *s.ApiKeyRequired\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiKeyRequired\", protocol.BoolValue(v), metadata)\n\t}\n\tif len(s.AuthorizationScopes) > 0 {\n\t\tv := s.AuthorizationScopes\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"authorizationScopes\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif len(s.AuthorizationType) > 0 {\n\t\tv := s.AuthorizationType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"authorizationType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.AuthorizerId != nil {\n\t\tv := *s.AuthorizerId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"authorizerId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ModelSelectionExpression != nil {\n\t\tv := *s.ModelSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"modelSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.OperationName != nil {\n\t\tv := *s.OperationName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"operationName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.RequestModels) > 0 {\n\t\tv := s.RequestModels\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestModels\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif len(s.RequestParameters) > 0 {\n\t\tv := s.RequestParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetFields(k1, v1)\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.RouteId != nil {\n\t\tv := *s.RouteId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteKey != nil {\n\t\tv := *s.RouteKey\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeKey\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteResponseSelectionExpression != nil {\n\t\tv := *s.RouteResponseSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeResponseSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Target != nil {\n\t\tv := *s.Target\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"target\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s OutputService1TestShapeOutputService1TestCaseOperation1Output) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.Char != nil {\n\t\tv := *s.Char\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Char\", protocol.StringValue(v), metadata)\n\t}\n\tif s.Double != nil {\n\t\tv := *s.Double\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Double\", protocol.Float64Value(v), metadata)\n\t}\n\tif s.FalseBool != nil {\n\t\tv := *s.FalseBool\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"FalseBool\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.Float != nil {\n\t\tv := *s.Float\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Float\", protocol.Float64Value(v), metadata)\n\t}\n\tif s.Float64s != nil {\n\t\tv := s.Float64s\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"Float64s\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.Float64Value(v1))\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.Long != nil {\n\t\tv := *s.Long\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Long\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.Num != nil {\n\t\tv := *s.Num\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"FooNum\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.Str != nil {\n\t\tv := *s.Str\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Str\", protocol.StringValue(v), metadata)\n\t}\n\tif s.Timestamp != nil {\n\t\tv := *s.Timestamp\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Timestamp\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.ISO8601TimeFormatName, QuotedFormatTime: false}, metadata)\n\t}\n\tif s.TrueBool != nil {\n\t\tv := *s.TrueBool\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"TrueBool\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.ImaHeader != nil {\n\t\tv := *s.ImaHeader\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"ImaHeader\", protocol.StringValue(v), metadata)\n\t}\n\tif s.ImaHeaderLocation != nil {\n\t\tv := *s.ImaHeaderLocation\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"X-Foo\", protocol.StringValue(v), metadata)\n\t}\n\treturn nil\n}", "func (s GetStageInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.StageName != nil {\n\t\tv := *s.StageName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"stageName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s UpdateAppInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.BasicAuthCredentials != nil {\n\t\tv := *s.BasicAuthCredentials\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"basicAuthCredentials\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.BuildSpec != nil {\n\t\tv := *s.BuildSpec\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"buildSpec\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.CustomRules) > 0 {\n\t\tv := s.CustomRules\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"customRules\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddFields(v1)\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.EnableBasicAuth != nil {\n\t\tv := *s.EnableBasicAuth\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"enableBasicAuth\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.EnableBranchAutoBuild != nil {\n\t\tv := *s.EnableBranchAutoBuild\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"enableBranchAutoBuild\", protocol.BoolValue(v), metadata)\n\t}\n\tif len(s.EnvironmentVariables) > 0 {\n\t\tv := s.EnvironmentVariables\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"environmentVariables\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.IamServiceRoleArn != nil {\n\t\tv := *s.IamServiceRoleArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"iamServiceRoleArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.Platform) > 0 {\n\t\tv := s.Platform\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"platform\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.AppId != nil {\n\t\tv := *s.AppId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"appId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s OutputService1TestShapeOutputService1TestCaseOperation2Output) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.Char != nil {\n\t\tv := *s.Char\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Char\", protocol.StringValue(v), metadata)\n\t}\n\tif s.Double != nil {\n\t\tv := *s.Double\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Double\", protocol.Float64Value(v), metadata)\n\t}\n\tif s.FalseBool != nil {\n\t\tv := *s.FalseBool\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"FalseBool\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.Float != nil {\n\t\tv := *s.Float\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Float\", protocol.Float64Value(v), metadata)\n\t}\n\tif s.Float64s != nil {\n\t\tv := s.Float64s\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"Float64s\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.Float64Value(v1))\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.Long != nil {\n\t\tv := *s.Long\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Long\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.Num != nil {\n\t\tv := *s.Num\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"FooNum\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.Str != nil {\n\t\tv := *s.Str\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Str\", protocol.StringValue(v), metadata)\n\t}\n\tif s.Timestamp != nil {\n\t\tv := *s.Timestamp\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Timestamp\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.ISO8601TimeFormatName, QuotedFormatTime: false}, metadata)\n\t}\n\tif s.TrueBool != nil {\n\t\tv := *s.TrueBool\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"TrueBool\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.ImaHeader != nil {\n\t\tv := *s.ImaHeader\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"ImaHeader\", protocol.StringValue(v), metadata)\n\t}\n\tif s.ImaHeaderLocation != nil {\n\t\tv := *s.ImaHeaderLocation\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"X-Foo\", protocol.StringValue(v), metadata)\n\t}\n\treturn nil\n}", "func (s GetIntegrationOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ConnectionId != nil {\n\t\tv := *s.ConnectionId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"connectionId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.ConnectionType) > 0 {\n\t\tv := s.ConnectionType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"connectionType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif len(s.ContentHandlingStrategy) > 0 {\n\t\tv := s.ContentHandlingStrategy\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"contentHandlingStrategy\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.CredentialsArn != nil {\n\t\tv := *s.CredentialsArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"credentialsArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IntegrationId != nil {\n\t\tv := *s.IntegrationId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IntegrationMethod != nil {\n\t\tv := *s.IntegrationMethod\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationMethod\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IntegrationResponseSelectionExpression != nil {\n\t\tv := *s.IntegrationResponseSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationResponseSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.IntegrationType) > 0 {\n\t\tv := s.IntegrationType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.IntegrationUri != nil {\n\t\tv := *s.IntegrationUri\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationUri\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.PassthroughBehavior) > 0 {\n\t\tv := s.PassthroughBehavior\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"passthroughBehavior\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif len(s.RequestParameters) > 0 {\n\t\tv := s.RequestParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif len(s.RequestTemplates) > 0 {\n\t\tv := s.RequestTemplates\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestTemplates\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.TemplateSelectionExpression != nil {\n\t\tv := *s.TemplateSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"templateSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.TimeoutInMillis != nil {\n\t\tv := *s.TimeoutInMillis\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"timeoutInMillis\", protocol.Int64Value(v), metadata)\n\t}\n\treturn nil\n}", "func (s VirtualServiceBackend) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ClientPolicy != nil {\n\t\tv := s.ClientPolicy\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"clientPolicy\", v, metadata)\n\t}\n\tif s.VirtualServiceName != nil {\n\t\tv := *s.VirtualServiceName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"virtualServiceName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreatePolicyInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.PolicyDocument != nil {\n\t\tv := *s.PolicyDocument\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"policyDocument\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.PolicyName != nil {\n\t\tv := *s.PolicyName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"policyName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreateBucketInput) MarshalFields(e protocol.FieldEncoder) error {\n\n\tif len(s.ACL) > 0 {\n\t\tv := s.ACL\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"x-amz-acl\", v, metadata)\n\t}\n\tif s.GrantFullControl != nil {\n\t\tv := *s.GrantFullControl\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"x-amz-grant-full-control\", protocol.StringValue(v), metadata)\n\t}\n\tif s.GrantRead != nil {\n\t\tv := *s.GrantRead\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"x-amz-grant-read\", protocol.StringValue(v), metadata)\n\t}\n\tif s.GrantReadACP != nil {\n\t\tv := *s.GrantReadACP\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"x-amz-grant-read-acp\", protocol.StringValue(v), metadata)\n\t}\n\tif s.GrantWrite != nil {\n\t\tv := *s.GrantWrite\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"x-amz-grant-write\", protocol.StringValue(v), metadata)\n\t}\n\tif s.GrantWriteACP != nil {\n\t\tv := *s.GrantWriteACP\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"x-amz-grant-write-acp\", protocol.StringValue(v), metadata)\n\t}\n\tif s.ObjectLockEnabledForBucket != nil {\n\t\tv := *s.ObjectLockEnabledForBucket\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"x-amz-bucket-object-lock-enabled\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.Bucket != nil {\n\t\tv := *s.Bucket\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"Bucket\", protocol.StringValue(v), metadata)\n\t}\n\tif s.CreateBucketConfiguration != nil {\n\t\tv := s.CreateBucketConfiguration\n\n\t\tmetadata := protocol.Metadata{XMLNamespaceURI: \"http://s3.amazonaws.com/doc/2006-03-01/\"}\n\t\te.SetFields(protocol.PayloadTarget, \"CreateBucketConfiguration\", v, metadata)\n\t}\n\treturn nil\n}", "func (s UpdateApiMappingOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiMappingId != nil {\n\t\tv := *s.ApiMappingId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiMappingId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiMappingKey != nil {\n\t\tv := *s.ApiMappingKey\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiMappingKey\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Stage != nil {\n\t\tv := *s.Stage\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"stage\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s Pipeline) MarshalFields(e protocol.FieldEncoder) error {\n\tif len(s.Activities) > 0 {\n\t\tv := s.Activities\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"activities\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddFields(v1)\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.Arn != nil {\n\t\tv := *s.Arn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"arn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.CreationTime != nil {\n\t\tv := *s.CreationTime\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"creationTime\", protocol.TimeValue{V: v, Format: protocol.UnixTimeFormat}, metadata)\n\t}\n\tif s.LastUpdateTime != nil {\n\t\tv := *s.LastUpdateTime\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"lastUpdateTime\", protocol.TimeValue{V: v, Format: protocol.UnixTimeFormat}, metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.ReprocessingSummaries) > 0 {\n\t\tv := s.ReprocessingSummaries\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"reprocessingSummaries\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddFields(v1)\n\t\t}\n\t\tls0.End()\n\n\t}\n\treturn nil\n}", "func (s VirtualNodeRef) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.Arn != nil {\n\t\tv := *s.Arn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"arn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.CreatedAt != nil {\n\t\tv := *s.CreatedAt\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"createdAt\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.LastUpdatedAt != nil {\n\t\tv := *s.LastUpdatedAt\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"lastUpdatedAt\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.MeshName != nil {\n\t\tv := *s.MeshName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"meshName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.MeshOwner != nil {\n\t\tv := *s.MeshOwner\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"meshOwner\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ResourceOwner != nil {\n\t\tv := *s.ResourceOwner\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"resourceOwner\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Version != nil {\n\t\tv := *s.Version\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"version\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.VirtualNodeName != nil {\n\t\tv := *s.VirtualNodeName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"virtualNodeName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s GetIntrospectionSchemaInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.Format) > 0 {\n\t\tv := s.Format\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.QueryTarget, \"format\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.IncludeDirectives != nil {\n\t\tv := *s.IncludeDirectives\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.QueryTarget, \"includeDirectives\", protocol.BoolValue(v), metadata)\n\t}\n\treturn nil\n}", "func (s OTAUpdateInfo) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.AdditionalParameters != nil {\n\t\tv := s.AdditionalParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"additionalParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.AwsIotJobArn != nil {\n\t\tv := *s.AwsIotJobArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"awsIotJobArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.AwsIotJobId != nil {\n\t\tv := *s.AwsIotJobId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"awsIotJobId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.AwsJobExecutionsRolloutConfig != nil {\n\t\tv := s.AwsJobExecutionsRolloutConfig\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"awsJobExecutionsRolloutConfig\", v, metadata)\n\t}\n\tif s.AwsJobPresignedUrlConfig != nil {\n\t\tv := s.AwsJobPresignedUrlConfig\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"awsJobPresignedUrlConfig\", v, metadata)\n\t}\n\tif s.CreationDate != nil {\n\t\tv := *s.CreationDate\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"creationDate\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ErrorInfo != nil {\n\t\tv := s.ErrorInfo\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"errorInfo\", v, metadata)\n\t}\n\tif s.LastModifiedDate != nil {\n\t\tv := *s.LastModifiedDate\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"lastModifiedDate\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.OtaUpdateArn != nil {\n\t\tv := *s.OtaUpdateArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"otaUpdateArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.OtaUpdateFiles != nil {\n\t\tv := s.OtaUpdateFiles\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"otaUpdateFiles\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddFields(v1)\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.OtaUpdateId != nil {\n\t\tv := *s.OtaUpdateId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"otaUpdateId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.OtaUpdateStatus) > 0 {\n\t\tv := s.OtaUpdateStatus\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"otaUpdateStatus\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.Protocols != nil {\n\t\tv := s.Protocols\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"protocols\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif len(s.TargetSelection) > 0 {\n\t\tv := s.TargetSelection\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"targetSelection\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.Targets != nil {\n\t\tv := s.Targets\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"targets\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\treturn nil\n}", "func (s Source) MarshalFields(e protocol.FieldEncoder) error {\n\tif len(s.Architecture) > 0 {\n\t\tv := s.Architecture\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"architecture\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.Etag != nil {\n\t\tv := *s.Etag\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"etag\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.S3Bucket != nil {\n\t\tv := *s.S3Bucket\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"s3Bucket\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.S3Key != nil {\n\t\tv := *s.S3Key\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"s3Key\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreateModelOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ContentType != nil {\n\t\tv := *s.ContentType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"contentType\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ModelId != nil {\n\t\tv := *s.ModelId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"modelId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Schema != nil {\n\t\tv := *s.Schema\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"schema\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreateJobInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.HopDestinations != nil {\n\t\tv := s.HopDestinations\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"hopDestinations\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddFields(v1)\n\t\t}\n\t\tls0.End()\n\t}\n\tif s.AccelerationSettings != nil {\n\t\tv := s.AccelerationSettings\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"accelerationSettings\", v, metadata)\n\t}\n\tif len(s.BillingTagsSource) > 0 {\n\t\tv := s.BillingTagsSource\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"billingTagsSource\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tvar ClientRequestToken string\n\tif s.ClientRequestToken != nil {\n\t\tClientRequestToken = *s.ClientRequestToken\n\t} else {\n\t\tClientRequestToken = protocol.GetIdempotencyToken()\n\t}\n\t{\n\t\tv := ClientRequestToken\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"clientRequestToken\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.JobTemplate != nil {\n\t\tv := *s.JobTemplate\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"jobTemplate\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Priority != nil {\n\t\tv := *s.Priority\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"priority\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.Queue != nil {\n\t\tv := *s.Queue\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"queue\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Role != nil {\n\t\tv := *s.Role\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"role\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Settings != nil {\n\t\tv := s.Settings\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"settings\", v, metadata)\n\t}\n\tif len(s.SimulateReservedQueue) > 0 {\n\t\tv := s.SimulateReservedQueue\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"simulateReservedQueue\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif len(s.StatusUpdateInterval) > 0 {\n\t\tv := s.StatusUpdateInterval\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"statusUpdateInterval\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.Tags != nil {\n\t\tv := s.Tags\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"tags\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.UserMetadata != nil {\n\t\tv := s.UserMetadata\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"userMetadata\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\treturn nil\n}", "func (s UpdateBrokerStorageInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.CurrentVersion != nil {\n\t\tv := *s.CurrentVersion\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"currentVersion\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.TargetBrokerEBSVolumeInfo != nil {\n\t\tv := s.TargetBrokerEBSVolumeInfo\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"targetBrokerEBSVolumeInfo\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddFields(v1)\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.ClusterArn != nil {\n\t\tv := *s.ClusterArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"clusterArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CustomCodeSigning) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.CertificateChain != nil {\n\t\tv := s.CertificateChain\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"certificateChain\", v, metadata)\n\t}\n\tif s.HashAlgorithm != nil {\n\t\tv := *s.HashAlgorithm\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"hashAlgorithm\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Signature != nil {\n\t\tv := s.Signature\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"signature\", v, metadata)\n\t}\n\tif s.SignatureAlgorithm != nil {\n\t\tv := *s.SignatureAlgorithm\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"signatureAlgorithm\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s GetMacieSessionInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\treturn nil\n}", "func (s MeshRef) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.Arn != nil {\n\t\tv := *s.Arn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"arn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.CreatedAt != nil {\n\t\tv := *s.CreatedAt\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"createdAt\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.LastUpdatedAt != nil {\n\t\tv := *s.LastUpdatedAt\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"lastUpdatedAt\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.MeshName != nil {\n\t\tv := *s.MeshName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"meshName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.MeshOwner != nil {\n\t\tv := *s.MeshOwner\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"meshOwner\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ResourceOwner != nil {\n\t\tv := *s.ResourceOwner\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"resourceOwner\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Version != nil {\n\t\tv := *s.Version\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"version\", protocol.Int64Value(v), metadata)\n\t}\n\treturn nil\n}", "func (s Resource) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.Arn != nil {\n\t\tv := *s.Arn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"arn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.Attributes) > 0 {\n\t\tv := s.Attributes\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"attributes\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.Feature != nil {\n\t\tv := *s.Feature\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"feature\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Type != nil {\n\t\tv := *s.Type\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"type\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s GatewayRouteRef) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.Arn != nil {\n\t\tv := *s.Arn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"arn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.CreatedAt != nil {\n\t\tv := *s.CreatedAt\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"createdAt\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.GatewayRouteName != nil {\n\t\tv := *s.GatewayRouteName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"gatewayRouteName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.LastUpdatedAt != nil {\n\t\tv := *s.LastUpdatedAt\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"lastUpdatedAt\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.MeshName != nil {\n\t\tv := *s.MeshName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"meshName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.MeshOwner != nil {\n\t\tv := *s.MeshOwner\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"meshOwner\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ResourceOwner != nil {\n\t\tv := *s.ResourceOwner\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"resourceOwner\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Version != nil {\n\t\tv := *s.Version\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"version\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.VirtualGatewayName != nil {\n\t\tv := *s.VirtualGatewayName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"virtualGatewayName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s RouteRef) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.Arn != nil {\n\t\tv := *s.Arn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"arn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.CreatedAt != nil {\n\t\tv := *s.CreatedAt\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"createdAt\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.LastUpdatedAt != nil {\n\t\tv := *s.LastUpdatedAt\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"lastUpdatedAt\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.MeshName != nil {\n\t\tv := *s.MeshName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"meshName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.MeshOwner != nil {\n\t\tv := *s.MeshOwner\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"meshOwner\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ResourceOwner != nil {\n\t\tv := *s.ResourceOwner\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"resourceOwner\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteName != nil {\n\t\tv := *s.RouteName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Version != nil {\n\t\tv := *s.Version\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"version\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.VirtualRouterName != nil {\n\t\tv := *s.VirtualRouterName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"virtualRouterName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s DescribePipelineOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.Pipeline != nil {\n\t\tv := s.Pipeline\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"pipeline\", v, metadata)\n\t}\n\treturn nil\n}", "func (s DescribePipelineOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.Pipeline != nil {\n\t\tv := s.Pipeline\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"pipeline\", v, metadata)\n\t}\n\treturn nil\n}", "func (s DescribeDetectorModelInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.DetectorModelName != nil {\n\t\tv := *s.DetectorModelName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"detectorModelName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.DetectorModelVersion != nil {\n\t\tv := *s.DetectorModelVersion\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.QueryTarget, \"version\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s AttachPolicyInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.Target != nil {\n\t\tv := *s.Target\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"target\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.PolicyName != nil {\n\t\tv := *s.PolicyName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"policyName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (v *Service) Encode(sw stream.Writer) error {\n\tif err := sw.WriteStructBegin(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := sw.WriteFieldBegin(stream.FieldHeader{ID: 7, Type: wire.TBinary}); err != nil {\n\t\treturn err\n\t}\n\tif err := sw.WriteString(v.Name); err != nil {\n\t\treturn err\n\t}\n\tif err := sw.WriteFieldEnd(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := sw.WriteFieldBegin(stream.FieldHeader{ID: 1, Type: wire.TBinary}); err != nil {\n\t\treturn err\n\t}\n\tif err := sw.WriteString(v.ThriftName); err != nil {\n\t\treturn err\n\t}\n\tif err := sw.WriteFieldEnd(); err != nil {\n\t\treturn err\n\t}\n\n\tif v.ParentID != nil {\n\t\tif err := sw.WriteFieldBegin(stream.FieldHeader{ID: 4, Type: wire.TI32}); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := v.ParentID.Encode(sw); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := sw.WriteFieldEnd(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif err := sw.WriteFieldBegin(stream.FieldHeader{ID: 5, Type: wire.TList}); err != nil {\n\t\treturn err\n\t}\n\tif err := _List_Function_Encode(v.Functions, sw); err != nil {\n\t\treturn err\n\t}\n\tif err := sw.WriteFieldEnd(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := sw.WriteFieldBegin(stream.FieldHeader{ID: 6, Type: wire.TI32}); err != nil {\n\t\treturn err\n\t}\n\tif err := v.ModuleID.Encode(sw); err != nil {\n\t\treturn err\n\t}\n\tif err := sw.WriteFieldEnd(); err != nil {\n\t\treturn err\n\t}\n\n\tif v.Annotations != nil {\n\t\tif err := sw.WriteFieldBegin(stream.FieldHeader{ID: 8, Type: wire.TMap}); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := _Map_String_String_Encode(v.Annotations, sw); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := sw.WriteFieldEnd(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn sw.WriteStructEnd()\n}", "func (s GetSigningPlatformOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif len(s.Category) > 0 {\n\t\tv := s.Category\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"category\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.DisplayName != nil {\n\t\tv := *s.DisplayName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"displayName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.MaxSizeInMB != nil {\n\t\tv := *s.MaxSizeInMB\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"maxSizeInMB\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.Partner != nil {\n\t\tv := *s.Partner\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"partner\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.PlatformId != nil {\n\t\tv := *s.PlatformId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"platformId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.SigningConfiguration != nil {\n\t\tv := s.SigningConfiguration\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"signingConfiguration\", v, metadata)\n\t}\n\tif s.SigningImageFormat != nil {\n\t\tv := s.SigningImageFormat\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"signingImageFormat\", v, metadata)\n\t}\n\tif s.Target != nil {\n\t\tv := *s.Target\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"target\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s Product) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ActivationUrl != nil {\n\t\tv := *s.ActivationUrl\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"ActivationUrl\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Categories != nil {\n\t\tv := s.Categories\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"Categories\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.CompanyName != nil {\n\t\tv := *s.CompanyName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"CompanyName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IntegrationTypes != nil {\n\t\tv := s.IntegrationTypes\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"IntegrationTypes\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.MarketplaceUrl != nil {\n\t\tv := *s.MarketplaceUrl\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"MarketplaceUrl\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ProductArn != nil {\n\t\tv := *s.ProductArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"ProductArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ProductName != nil {\n\t\tv := *s.ProductName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"ProductName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ProductSubscriptionResourcePolicy != nil {\n\t\tv := *s.ProductSubscriptionResourcePolicy\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"ProductSubscriptionResourcePolicy\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s AwsLambdaFunctionLayer) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.Arn != nil {\n\t\tv := *s.Arn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Arn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.CodeSize != nil {\n\t\tv := *s.CodeSize\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"CodeSize\", protocol.Int64Value(v), metadata)\n\t}\n\treturn nil\n}", "func (s HttpAuthorization) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.Sigv4 != nil {\n\t\tv := s.Sigv4\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"sigv4\", v, metadata)\n\t}\n\treturn nil\n}", "func (s DescribePipelineInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.PipelineName != nil {\n\t\tv := *s.PipelineName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"pipelineName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s GetModelInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ModelId != nil {\n\t\tv := *s.ModelId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"modelId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}" ]
[ "0.6349841", "0.6247435", "0.6194363", "0.61811036", "0.6169666", "0.61222064", "0.6101693", "0.607118", "0.6064306", "0.60118675", "0.60097605", "0.59601545", "0.5952495", "0.5934505", "0.59278136", "0.59241146", "0.5908956", "0.59053487", "0.5899269", "0.5884041", "0.58774006", "0.5876687", "0.58743125", "0.5870761", "0.58704513", "0.5858514", "0.58502346", "0.5848914", "0.5843701", "0.5837742", "0.58337116", "0.5821354", "0.5819604", "0.58140403", "0.5813909", "0.58107203", "0.5804623", "0.5802504", "0.58023864", "0.57862025", "0.57813287", "0.5776258", "0.5774969", "0.5773259", "0.5765068", "0.5761249", "0.57593006", "0.575849", "0.57512987", "0.5749242", "0.5747341", "0.5745933", "0.5742179", "0.5739372", "0.5733904", "0.5732178", "0.5729593", "0.57200307", "0.57197386", "0.5709011", "0.57020074", "0.5701465", "0.57012814", "0.56958497", "0.56948066", "0.5692317", "0.56855047", "0.56823933", "0.567788", "0.5676319", "0.56755185", "0.56655973", "0.566051", "0.56597316", "0.5659018", "0.56589156", "0.56532717", "0.56491584", "0.5645446", "0.5644516", "0.56436676", "0.56388444", "0.5637049", "0.56359494", "0.56343085", "0.5634301", "0.5630996", "0.5625861", "0.56246316", "0.56226236", "0.5621856", "0.5621856", "0.56201226", "0.5615716", "0.56143373", "0.56132203", "0.5611411", "0.5609817", "0.5609027", "0.56045544", "0.55895716" ]
0.0
-1
String returns the string representation
func (s DescribeUserHierarchyStructureOutput) String() string { return awsutil.Prettify(s) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (s CreateAlgorithmOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateAlgorithmOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s Library) String() string {\n\tres := make([]string, 5)\n\tres[0] = \"ID: \" + reform.Inspect(s.ID, true)\n\tres[1] = \"UserID: \" + reform.Inspect(s.UserID, true)\n\tres[2] = \"VolumeID: \" + reform.Inspect(s.VolumeID, true)\n\tres[3] = \"CreatedAt: \" + reform.Inspect(s.CreatedAt, true)\n\tres[4] = \"UpdatedAt: \" + reform.Inspect(s.UpdatedAt, true)\n\treturn strings.Join(res, \", \")\n}", "func (s CreateCanaryOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (r Info) String() string {\n\tJSON, err := json.Marshal(r)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\treturn string(JSON)\n}", "func (s ReEncryptOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateFHIRDatastoreOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateQuickConnectOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func String() string {\n\toutput := output{\n\t\tRerun: Rerun,\n\t\tVariables: Variables,\n\t\tItems: Items,\n\t}\n\tvar err error\n\tvar b []byte\n\tif Indent == \"\" {\n\t\tb, err = json.Marshal(output)\n\t} else {\n\t\tb, err = json.MarshalIndent(output, \"\", Indent)\n\t}\n\tif err != nil {\n\t\tmessageErr := Errorf(\"Error in parser. Please report this output to https://github.com/drgrib/alfred/issues: %v\", err)\n\t\tpanic(messageErr)\n\t}\n\ts := string(b)\n\treturn s\n}", "func (r *Registry) String() string {\n\tout := make([]string, 0, len(r.nameToObject))\n\tfor name, object := range r.nameToObject {\n\t\tout = append(out, fmt.Sprintf(\"* %s:\\n%s\", name, object.serialization))\n\t}\n\treturn strings.Join(out, \"\\n\\n\")\n}", "func (s CreateSceneOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateSafetyRuleOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateLanguageModelOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (o QtreeCreateResponse) String() string {\n\treturn ToString(reflect.ValueOf(o))\n}", "func (r SendAll) String() string {\n\tJSON, err := json.Marshal(r)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\treturn string(JSON)\n}", "func (r ReceiveAll) String() string {\n\tJSON, err := json.Marshal(r)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\treturn string(JSON)\n}", "func (enc *simpleEncoding) String() string {\n\treturn \"simpleEncoding(\" + enc.baseName + \")\"\n}", "func (s CreateDatabaseOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (z Zamowienium) String() string {\n\tjz, _ := json.Marshal(z)\n\treturn string(jz)\n}", "func (s CreateHITTypeOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateProgramOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateEntityOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateProjectVersionOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (o *Addshifttraderequest) String() string {\n \n \n \n \n o.AcceptableIntervals = []string{\"\"} \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (r Rooms) String() string {\n\tjr, _ := json.Marshal(r)\n\treturn string(jr)\n}", "func (s CreateUseCaseOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (i Info) String() string {\n\ts, _ := i.toJSON()\n\treturn s\n}", "func (o *Botversionsummary) String() string {\n \n \n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (e ExternalCfps) String() string {\n\tje, _ := json.Marshal(e)\n\treturn string(je)\n}", "func (s CreateTrustStoreOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func String() string {\n\treturn fmt.Sprintf(\n\t\t\"AppVersion = %s\\n\"+\n\t\t\t\"VCSRef = %s\\n\"+\n\t\t\t\"BuildVersion = %s\\n\"+\n\t\t\t\"BuildDate = %s\",\n\t\tAppVersion, VCSRef, BuildVersion, Date,\n\t)\n}", "func (s CreateDataLakeOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateSolutionVersionOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s GetSceneOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (i NotMachine) String() string { return toString(i) }", "func (s CreateRuleOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateRuleOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s StartPipelineReprocessingOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateDatastoreOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateSequenceStoreOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (o *Adjustablelivespeakerdetection) String() string {\n \n \n \n \n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (s CreateRateBasedRuleOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (r Resiliency) String() string {\n\tb, _ := json.Marshal(r)\n\treturn string(b)\n}", "func (s RestoreFromRecoveryPointOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (o QtreeCreateResponseResult) String() string {\n\treturn ToString(reflect.ValueOf(o))\n}", "func (s CreateWaveOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateRoomOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateBotLocaleOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s DeleteAlgorithmOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (z Zamowienia) String() string {\n\tjz, _ := json.Marshal(z)\n\treturn string(jz)\n}", "func (i *Info) String() string {\n\tb, _ := json.Marshal(i)\n\treturn string(b)\n}", "func (s ProcessingFeatureStoreOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s ExportProjectOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (r RoomOccupancies) String() string {\n\tjr, _ := json.Marshal(r)\n\treturn string(jr)\n}", "func (r *InterRecord) String() string {\n\tbuf := r.Bytes()\n\tdefer ffjson.Pool(buf)\n\n\treturn string(buf)\n}", "func (s CreateResolverRuleOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateResolverRuleOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateResolverRuleOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateLayerOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (o *Coretype) String() string {\n \n \n \n \n \n o.ValidationFields = []string{\"\"} \n \n o.ItemValidationFields = []string{\"\"} \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (s CreateModelCardOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (o *Limitchangerequestdetails) String() string {\n \n \n \n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (s NetworkPathComponentDetails) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (t Terms) String() string {\n\tjt, _ := json.Marshal(t)\n\treturn string(jt)\n}", "func (g GetObjectOutput) String() string {\n\treturn helper.Prettify(g)\n}", "func (s StartContactEvaluationOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateModelOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateModelOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (o *Interactionstatsalert) String() string {\n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (o *Digitalcondition) String() string {\n\tj, _ := json.Marshal(o)\n\tstr, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n\treturn str\n}", "func (r RoomOccupancy) String() string {\n\tjr, _ := json.Marshal(r)\n\treturn string(jr)\n}", "func (d *Diagram) String() string { return toString(d) }", "func (o *Outboundroute) String() string {\n \n \n \n \n o.ClassificationTypes = []string{\"\"} \n \n \n o.ExternalTrunkBases = []Domainentityref{{}} \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (s CreateCodeRepositoryOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateActivationOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateProjectOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateProjectOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateProjectOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateProjectOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateBotOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s ResolutionTechniques) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (c CourseCode) String() string {\n\tjc, _ := json.Marshal(c)\n\treturn string(jc)\n}", "func (s CreateTrialComponentOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (p *Parms) String() string {\n\tout, _ := json.MarshalIndent(p, \"\", \"\\t\")\n\treturn string(out)\n}", "func (p polynomial) String() (str string) {\n\tfor _, m := range p.monomials {\n\t\tstr = str + \" \" + m.String() + \" +\"\n\t}\n\tstr = strings.TrimRight(str, \"+\")\n\treturn \"f(x) = \" + strings.TrimSpace(str)\n}", "func (s CreateThingOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (r *RUT) String() string {\n\treturn r.Format(DefaultFormatter)\n}", "func (s CreatePatchBaselineOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (o *Crossplatformpolicycreate) String() string {\n \n \n \n \n \n \n \n \n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (s BotVersionLocaleDetails) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s DeleteMultiplexProgramOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s LifeCycleLastTestInitiated) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s GetObjectOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s LifeCycleLastTestReverted) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateDocumentOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateComponentOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s CreateIntegrationOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (o *Commonruleconditions) String() string {\n o.Clauses = []Commonruleconditions{{}} \n o.Predicates = []Commonrulepredicate{{}} \n \n \n\n j, _ := json.Marshal(o)\n str, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n return str\n}", "func (t Test1s) String() string {\n\tjt, _ := json.Marshal(t)\n\treturn string(jt)\n}", "func (s CreateContactFlowOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (o *Directrouting) String() string {\n\tj, _ := json.Marshal(o)\n\tstr, _ := strconv.Unquote(strings.Replace(strconv.Quote(string(j)), `\\\\u`, `\\u`, -1))\n\n\treturn str\n}" ]
[ "0.721496", "0.721496", "0.72003424", "0.72003067", "0.71778786", "0.71672124", "0.71179444", "0.7087169", "0.708676", "0.70792294", "0.7078306", "0.7067698", "0.7031764", "0.7027706", "0.7026941", "0.70254856", "0.7020726", "0.70168954", "0.7010962", "0.70102316", "0.7001954", "0.6997284", "0.69971234", "0.6991765", "0.69905907", "0.69903153", "0.69867015", "0.69845456", "0.69752043", "0.69743115", "0.69629383", "0.6961912", "0.6961005", "0.69509166", "0.6947246", "0.69455487", "0.69455487", "0.69446045", "0.6940138", "0.6936814", "0.69329786", "0.69286585", "0.69271654", "0.69254273", "0.6922031", "0.69216454", "0.69182205", "0.69178134", "0.6911453", "0.6910748", "0.6909815", "0.6908686", "0.69052964", "0.6899659", "0.6896323", "0.6893855", "0.6893855", "0.6893855", "0.68922645", "0.68918127", "0.6891583", "0.6888694", "0.68884104", "0.6884165", "0.6882656", "0.6880121", "0.68768877", "0.68768877", "0.68755984", "0.68748397", "0.68738985", "0.68732196", "0.68729943", "0.6871865", "0.6869235", "0.68684727", "0.68684727", "0.68684727", "0.68684727", "0.68683946", "0.68661034", "0.6862186", "0.6862099", "0.6858425", "0.6856829", "0.6853848", "0.68523717", "0.685183", "0.68458325", "0.6843906", "0.68433076", "0.68429965", "0.68427455", "0.68420583", "0.6840824", "0.68394357", "0.68362874", "0.68344057", "0.6833331", "0.6832562", "0.6832457" ]
0.0
-1
MarshalFields encodes the AWS API shape using the passed in protocol encoder.
func (s DescribeUserHierarchyStructureOutput) MarshalFields(e protocol.FieldEncoder) error { if s.HierarchyStructure != nil { v := s.HierarchyStructure metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "HierarchyStructure", v, metadata) } return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (s CreateApiOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ApiEndpoint != nil {\n\t\tv := *s.ApiEndpoint\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiEndpoint\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiKeySelectionExpression != nil {\n\t\tv := *s.ApiKeySelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiKeySelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.CreatedDate != nil {\n\t\tv := *s.CreatedDate\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"createdDate\", protocol.TimeValue{V: v, Format: protocol.UnixTimeFormat}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.DisableSchemaValidation != nil {\n\t\tv := *s.DisableSchemaValidation\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"disableSchemaValidation\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.ProtocolType) > 0 {\n\t\tv := s.ProtocolType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"protocolType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.RouteSelectionExpression != nil {\n\t\tv := *s.RouteSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Version != nil {\n\t\tv := *s.Version\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"version\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.Warnings) > 0 {\n\t\tv := s.Warnings\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"warnings\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\treturn nil\n}", "func (s OutputService9TestShapeSingleStructure) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.Foo != nil {\n\t\tv := *s.Foo\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Foo\", protocol.StringValue(v), metadata)\n\t}\n\treturn nil\n}", "func (s OutputService6TestShapeSingleStructure) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.Foo != nil {\n\t\tv := *s.Foo\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"foo\", protocol.StringValue(v), metadata)\n\t}\n\treturn nil\n}", "func (s CreateApiInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.ApiKeySelectionExpression != nil {\n\t\tv := *s.ApiKeySelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiKeySelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.DisableSchemaValidation != nil {\n\t\tv := *s.DisableSchemaValidation\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"disableSchemaValidation\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.ProtocolType) > 0 {\n\t\tv := s.ProtocolType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"protocolType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.RouteSelectionExpression != nil {\n\t\tv := *s.RouteSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Version != nil {\n\t\tv := *s.Version\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"version\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s Api) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ApiEndpoint != nil {\n\t\tv := *s.ApiEndpoint\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiEndpoint\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiKeySelectionExpression != nil {\n\t\tv := *s.ApiKeySelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiKeySelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.CreatedDate != nil {\n\t\tv := *s.CreatedDate\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"createdDate\", protocol.TimeValue{V: v, Format: protocol.UnixTimeFormat}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.DisableSchemaValidation != nil {\n\t\tv := *s.DisableSchemaValidation\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"disableSchemaValidation\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.ProtocolType) > 0 {\n\t\tv := s.ProtocolType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"protocolType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.RouteSelectionExpression != nil {\n\t\tv := *s.RouteSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Version != nil {\n\t\tv := *s.Version\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"version\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.Warnings) > 0 {\n\t\tv := s.Warnings\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"warnings\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\treturn nil\n}", "func (s GetApiOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ApiEndpoint != nil {\n\t\tv := *s.ApiEndpoint\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiEndpoint\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiKeySelectionExpression != nil {\n\t\tv := *s.ApiKeySelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiKeySelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.CreatedDate != nil {\n\t\tv := *s.CreatedDate\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"createdDate\", protocol.TimeValue{V: v, Format: protocol.UnixTimeFormat}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.DisableSchemaValidation != nil {\n\t\tv := *s.DisableSchemaValidation\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"disableSchemaValidation\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.ProtocolType) > 0 {\n\t\tv := s.ProtocolType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"protocolType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.RouteSelectionExpression != nil {\n\t\tv := *s.RouteSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Version != nil {\n\t\tv := *s.Version\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"version\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.Warnings) > 0 {\n\t\tv := s.Warnings\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"warnings\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\treturn nil\n}", "func (s UpdateApiOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ApiEndpoint != nil {\n\t\tv := *s.ApiEndpoint\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiEndpoint\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiKeySelectionExpression != nil {\n\t\tv := *s.ApiKeySelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiKeySelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.CreatedDate != nil {\n\t\tv := *s.CreatedDate\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"createdDate\", protocol.TimeValue{V: v, Format: protocol.UnixTimeFormat}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.DisableSchemaValidation != nil {\n\t\tv := *s.DisableSchemaValidation\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"disableSchemaValidation\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.ProtocolType) > 0 {\n\t\tv := s.ProtocolType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"protocolType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.RouteSelectionExpression != nil {\n\t\tv := *s.RouteSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Version != nil {\n\t\tv := *s.Version\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"version\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.Warnings) > 0 {\n\t\tv := s.Warnings\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"warnings\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\treturn nil\n}", "func (s UpdateRestApiOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif len(s.ApiKeySource) > 0 {\n\t\tv := s.ApiKeySource\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiKeySource\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.BinaryMediaTypes != nil {\n\t\tv := s.BinaryMediaTypes\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"binaryMediaTypes\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.CreatedDate != nil {\n\t\tv := *s.CreatedDate\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"createdDate\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.EndpointConfiguration != nil {\n\t\tv := s.EndpointConfiguration\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"endpointConfiguration\", v, metadata)\n\t}\n\tif s.Id != nil {\n\t\tv := *s.Id\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"id\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.MinimumCompressionSize != nil {\n\t\tv := *s.MinimumCompressionSize\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"minimumCompressionSize\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Policy != nil {\n\t\tv := *s.Policy\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"policy\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Tags != nil {\n\t\tv := s.Tags\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"tags\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.Version != nil {\n\t\tv := *s.Version\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"version\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Warnings != nil {\n\t\tv := s.Warnings\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"warnings\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\treturn nil\n}", "func (s CreateCanaryInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.ArtifactS3Location != nil {\n\t\tv := *s.ArtifactS3Location\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"ArtifactS3Location\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Code != nil {\n\t\tv := s.Code\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"Code\", v, metadata)\n\t}\n\tif s.ExecutionRoleArn != nil {\n\t\tv := *s.ExecutionRoleArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"ExecutionRoleArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.FailureRetentionPeriodInDays != nil {\n\t\tv := *s.FailureRetentionPeriodInDays\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"FailureRetentionPeriodInDays\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RunConfig != nil {\n\t\tv := s.RunConfig\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"RunConfig\", v, metadata)\n\t}\n\tif s.RuntimeVersion != nil {\n\t\tv := *s.RuntimeVersion\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"RuntimeVersion\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Schedule != nil {\n\t\tv := s.Schedule\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"Schedule\", v, metadata)\n\t}\n\tif s.SuccessRetentionPeriodInDays != nil {\n\t\tv := *s.SuccessRetentionPeriodInDays\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"SuccessRetentionPeriodInDays\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.Tags != nil {\n\t\tv := s.Tags\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"Tags\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.VpcConfig != nil {\n\t\tv := s.VpcConfig\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"VpcConfig\", v, metadata)\n\t}\n\treturn nil\n}", "func (s VirtualGatewayRef) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.Arn != nil {\n\t\tv := *s.Arn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"arn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.CreatedAt != nil {\n\t\tv := *s.CreatedAt\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"createdAt\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.LastUpdatedAt != nil {\n\t\tv := *s.LastUpdatedAt\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"lastUpdatedAt\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.MeshName != nil {\n\t\tv := *s.MeshName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"meshName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.MeshOwner != nil {\n\t\tv := *s.MeshOwner\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"meshOwner\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ResourceOwner != nil {\n\t\tv := *s.ResourceOwner\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"resourceOwner\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Version != nil {\n\t\tv := *s.Version\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"version\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.VirtualGatewayName != nil {\n\t\tv := *s.VirtualGatewayName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"virtualGatewayName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreateAccessPointInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tvar ClientToken string\n\tif s.ClientToken != nil {\n\t\tClientToken = *s.ClientToken\n\t} else {\n\t\tClientToken = protocol.GetIdempotencyToken()\n\t}\n\t{\n\t\tv := ClientToken\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"ClientToken\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.FileSystemId != nil {\n\t\tv := *s.FileSystemId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"FileSystemId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.PosixUser != nil {\n\t\tv := s.PosixUser\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"PosixUser\", v, metadata)\n\t}\n\tif s.RootDirectory != nil {\n\t\tv := s.RootDirectory\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"RootDirectory\", v, metadata)\n\t}\n\tif s.Tags != nil {\n\t\tv := s.Tags\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"Tags\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddFields(v1)\n\t\t}\n\t\tls0.End()\n\n\t}\n\treturn nil\n}", "func (s UpdateRouteInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.ApiKeyRequired != nil {\n\t\tv := *s.ApiKeyRequired\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiKeyRequired\", protocol.BoolValue(v), metadata)\n\t}\n\tif len(s.AuthorizationScopes) > 0 {\n\t\tv := s.AuthorizationScopes\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"authorizationScopes\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif len(s.AuthorizationType) > 0 {\n\t\tv := s.AuthorizationType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"authorizationType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.AuthorizerId != nil {\n\t\tv := *s.AuthorizerId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"authorizerId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ModelSelectionExpression != nil {\n\t\tv := *s.ModelSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"modelSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.OperationName != nil {\n\t\tv := *s.OperationName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"operationName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.RequestModels) > 0 {\n\t\tv := s.RequestModels\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestModels\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif len(s.RequestParameters) > 0 {\n\t\tv := s.RequestParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetFields(k1, v1)\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.RouteKey != nil {\n\t\tv := *s.RouteKey\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeKey\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteResponseSelectionExpression != nil {\n\t\tv := *s.RouteResponseSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeResponseSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Target != nil {\n\t\tv := *s.Target\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"target\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteId != nil {\n\t\tv := *s.RouteId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"routeId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreateRouteOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ApiKeyRequired != nil {\n\t\tv := *s.ApiKeyRequired\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiKeyRequired\", protocol.BoolValue(v), metadata)\n\t}\n\tif len(s.AuthorizationScopes) > 0 {\n\t\tv := s.AuthorizationScopes\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"authorizationScopes\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif len(s.AuthorizationType) > 0 {\n\t\tv := s.AuthorizationType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"authorizationType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.AuthorizerId != nil {\n\t\tv := *s.AuthorizerId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"authorizerId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ModelSelectionExpression != nil {\n\t\tv := *s.ModelSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"modelSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.OperationName != nil {\n\t\tv := *s.OperationName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"operationName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.RequestModels) > 0 {\n\t\tv := s.RequestModels\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestModels\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif len(s.RequestParameters) > 0 {\n\t\tv := s.RequestParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetFields(k1, v1)\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.RouteId != nil {\n\t\tv := *s.RouteId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteKey != nil {\n\t\tv := *s.RouteKey\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeKey\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteResponseSelectionExpression != nil {\n\t\tv := *s.RouteResponseSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeResponseSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Target != nil {\n\t\tv := *s.Target\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"target\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreateModelInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.ContentType != nil {\n\t\tv := *s.ContentType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"contentType\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Schema != nil {\n\t\tv := *s.Schema\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"schema\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreateRouteInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.ApiKeyRequired != nil {\n\t\tv := *s.ApiKeyRequired\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiKeyRequired\", protocol.BoolValue(v), metadata)\n\t}\n\tif len(s.AuthorizationScopes) > 0 {\n\t\tv := s.AuthorizationScopes\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"authorizationScopes\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif len(s.AuthorizationType) > 0 {\n\t\tv := s.AuthorizationType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"authorizationType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.AuthorizerId != nil {\n\t\tv := *s.AuthorizerId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"authorizerId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ModelSelectionExpression != nil {\n\t\tv := *s.ModelSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"modelSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.OperationName != nil {\n\t\tv := *s.OperationName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"operationName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.RequestModels) > 0 {\n\t\tv := s.RequestModels\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestModels\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif len(s.RequestParameters) > 0 {\n\t\tv := s.RequestParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetFields(k1, v1)\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.RouteKey != nil {\n\t\tv := *s.RouteKey\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeKey\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteResponseSelectionExpression != nil {\n\t\tv := *s.RouteResponseSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeResponseSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Target != nil {\n\t\tv := *s.Target\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"target\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s Api) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ApiEndpoint != nil {\n\t\tv := *s.ApiEndpoint\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiEndpoint\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiKeySelectionExpression != nil {\n\t\tv := *s.ApiKeySelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiKeySelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.CorsConfiguration != nil {\n\t\tv := s.CorsConfiguration\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"corsConfiguration\", v, metadata)\n\t}\n\tif s.CreatedDate != nil {\n\t\tv := *s.CreatedDate\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"createdDate\",\n\t\t\tprotocol.TimeValue{V: v, Format: \"iso8601\", QuotedFormatTime: true}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.DisableSchemaValidation != nil {\n\t\tv := *s.DisableSchemaValidation\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"disableSchemaValidation\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.ImportInfo != nil {\n\t\tv := s.ImportInfo\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"importInfo\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.ProtocolType) > 0 {\n\t\tv := s.ProtocolType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"protocolType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.RouteSelectionExpression != nil {\n\t\tv := *s.RouteSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Tags != nil {\n\t\tv := s.Tags\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"tags\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.Version != nil {\n\t\tv := *s.Version\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"version\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Warnings != nil {\n\t\tv := s.Warnings\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"warnings\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\treturn nil\n}", "func (s OutputService15TestShapeItemDetailShape) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ID != nil {\n\t\tv := *s.ID\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"ID\", protocol.StringValue(v), metadata)\n\t}\n\t// Skipping Type XML Attribute.\n\treturn nil\n}", "func (s CreateProxySessionInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.Capabilities != nil {\n\t\tv := s.Capabilities\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"Capabilities\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.ExpiryMinutes != nil {\n\t\tv := *s.ExpiryMinutes\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"ExpiryMinutes\", protocol.Int64Value(v), metadata)\n\t}\n\tif len(s.GeoMatchLevel) > 0 {\n\t\tv := s.GeoMatchLevel\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"GeoMatchLevel\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.GeoMatchParams != nil {\n\t\tv := s.GeoMatchParams\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"GeoMatchParams\", v, metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.NumberSelectionBehavior) > 0 {\n\t\tv := s.NumberSelectionBehavior\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"NumberSelectionBehavior\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.ParticipantPhoneNumbers != nil {\n\t\tv := s.ParticipantPhoneNumbers\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"ParticipantPhoneNumbers\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.VoiceConnectorId != nil {\n\t\tv := *s.VoiceConnectorId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"voiceConnectorId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s UpdateRouteOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ApiKeyRequired != nil {\n\t\tv := *s.ApiKeyRequired\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiKeyRequired\", protocol.BoolValue(v), metadata)\n\t}\n\tif len(s.AuthorizationScopes) > 0 {\n\t\tv := s.AuthorizationScopes\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"authorizationScopes\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif len(s.AuthorizationType) > 0 {\n\t\tv := s.AuthorizationType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"authorizationType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.AuthorizerId != nil {\n\t\tv := *s.AuthorizerId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"authorizerId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ModelSelectionExpression != nil {\n\t\tv := *s.ModelSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"modelSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.OperationName != nil {\n\t\tv := *s.OperationName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"operationName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.RequestModels) > 0 {\n\t\tv := s.RequestModels\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestModels\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif len(s.RequestParameters) > 0 {\n\t\tv := s.RequestParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetFields(k1, v1)\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.RouteId != nil {\n\t\tv := *s.RouteId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteKey != nil {\n\t\tv := *s.RouteKey\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeKey\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteResponseSelectionExpression != nil {\n\t\tv := *s.RouteResponseSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeResponseSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Target != nil {\n\t\tv := *s.Target\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"target\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s UpdateStageInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.AccessLogSettings != nil {\n\t\tv := s.AccessLogSettings\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"accessLogSettings\", v, metadata)\n\t}\n\tif s.ClientCertificateId != nil {\n\t\tv := *s.ClientCertificateId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"clientCertificateId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.DefaultRouteSettings != nil {\n\t\tv := s.DefaultRouteSettings\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"defaultRouteSettings\", v, metadata)\n\t}\n\tif s.DeploymentId != nil {\n\t\tv := *s.DeploymentId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"deploymentId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.RouteSettings) > 0 {\n\t\tv := s.RouteSettings\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"routeSettings\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetFields(k1, v1)\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif len(s.StageVariables) > 0 {\n\t\tv := s.StageVariables\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"stageVariables\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.StageName != nil {\n\t\tv := *s.StageName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"stageName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s VirtualGatewaySpec) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.BackendDefaults != nil {\n\t\tv := s.BackendDefaults\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"backendDefaults\", v, metadata)\n\t}\n\tif s.Listeners != nil {\n\t\tv := s.Listeners\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"listeners\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddFields(v1)\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.Logging != nil {\n\t\tv := s.Logging\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"logging\", v, metadata)\n\t}\n\treturn nil\n}", "func (s UpdateApiInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.ApiKeySelectionExpression != nil {\n\t\tv := *s.ApiKeySelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiKeySelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.DisableSchemaValidation != nil {\n\t\tv := *s.DisableSchemaValidation\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"disableSchemaValidation\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteSelectionExpression != nil {\n\t\tv := *s.RouteSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Version != nil {\n\t\tv := *s.Version\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"version\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreateIntegrationOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ConnectionId != nil {\n\t\tv := *s.ConnectionId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"connectionId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.ConnectionType) > 0 {\n\t\tv := s.ConnectionType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"connectionType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif len(s.ContentHandlingStrategy) > 0 {\n\t\tv := s.ContentHandlingStrategy\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"contentHandlingStrategy\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.CredentialsArn != nil {\n\t\tv := *s.CredentialsArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"credentialsArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IntegrationId != nil {\n\t\tv := *s.IntegrationId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IntegrationMethod != nil {\n\t\tv := *s.IntegrationMethod\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationMethod\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IntegrationResponseSelectionExpression != nil {\n\t\tv := *s.IntegrationResponseSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationResponseSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.IntegrationType) > 0 {\n\t\tv := s.IntegrationType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.IntegrationUri != nil {\n\t\tv := *s.IntegrationUri\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationUri\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.PassthroughBehavior) > 0 {\n\t\tv := s.PassthroughBehavior\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"passthroughBehavior\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif len(s.RequestParameters) > 0 {\n\t\tv := s.RequestParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif len(s.RequestTemplates) > 0 {\n\t\tv := s.RequestTemplates\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestTemplates\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.TemplateSelectionExpression != nil {\n\t\tv := *s.TemplateSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"templateSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.TimeoutInMillis != nil {\n\t\tv := *s.TimeoutInMillis\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"timeoutInMillis\", protocol.Int64Value(v), metadata)\n\t}\n\treturn nil\n}", "func (s UpdateSignalingChannelInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.ChannelARN != nil {\n\t\tv := *s.ChannelARN\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"ChannelARN\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.CurrentVersion != nil {\n\t\tv := *s.CurrentVersion\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"CurrentVersion\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.SingleMasterConfiguration != nil {\n\t\tv := s.SingleMasterConfiguration\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"SingleMasterConfiguration\", v, metadata)\n\t}\n\treturn nil\n}", "func (s DescribeDetectorInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.DetectorModelName != nil {\n\t\tv := *s.DetectorModelName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"detectorModelName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.KeyValue != nil {\n\t\tv := *s.KeyValue\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.QueryTarget, \"keyValue\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreateApiMappingInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiMappingKey != nil {\n\t\tv := *s.ApiMappingKey\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiMappingKey\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Stage != nil {\n\t\tv := *s.Stage\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"stage\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.DomainName != nil {\n\t\tv := *s.DomainName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"domainName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreateThingInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.AttributePayload != nil {\n\t\tv := s.AttributePayload\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"attributePayload\", v, metadata)\n\t}\n\tif s.BillingGroupName != nil {\n\t\tv := *s.BillingGroupName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"billingGroupName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ThingTypeName != nil {\n\t\tv := *s.ThingTypeName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"thingTypeName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ThingName != nil {\n\t\tv := *s.ThingName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"thingName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func encodeSchema(w io.Writer, s *schema.Schema) (err error) {\n\tif s == nil {\n\t\treturn\n\t}\n\n\tew := errWriter{w: w}\n\tif s.Description != \"\" {\n\t\tew.writeFormat(`\"description\": %q, `, s.Description)\n\t}\n\tew.writeString(`\"type\": \"object\", `)\n\tew.writeString(`\"additionalProperties\": false, `)\n\tew.writeString(`\"properties\": {`)\n\tvar required []string\n\tvar notFirst bool\n\tfor _, key := range sortedFieldNames(s.Fields) {\n\t\tfield := s.Fields[key]\n\t\tif notFirst {\n\t\t\tew.writeString(\", \")\n\t\t}\n\t\tnotFirst = true\n\t\tif field.Required {\n\t\t\trequired = append(required, fmt.Sprintf(\"%q\", key))\n\t\t}\n\t\tew.err = encodeField(ew, key, field)\n\t\tif ew.err != nil {\n\t\t\treturn ew.err\n\t\t}\n\t}\n\tew.writeString(\"}\")\n\tif s.MinLen > 0 {\n\t\tew.writeFormat(`, \"minProperties\": %s`, strconv.FormatInt(int64(s.MinLen), 10))\n\t}\n\tif s.MaxLen > 0 {\n\t\tew.writeFormat(`, \"maxProperties\": %s`, strconv.FormatInt(int64(s.MaxLen), 10))\n\t}\n\n\tif len(required) > 0 {\n\t\tew.writeFormat(`, \"required\": [%s]`, strings.Join(required, \", \"))\n\t}\n\treturn ew.err\n}", "func (s OutputService15TestShapeItemShape) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ItemDetail != nil {\n\t\tv := s.ItemDetail\n\t\tattrs := make([]protocol.Attribute, 0, 1)\n\n\t\tif len(s.ItemDetail.Type) > 0 {\n\n\t\t\tv := s.ItemDetail.Type\n\t\t\tattrs = append(attrs, protocol.Attribute{Name: \"xsi:type\", Value: v, Meta: protocol.Metadata{}})\n\t\t}\n\t\tmetadata := protocol.Metadata{Attributes: attrs, XMLNamespacePrefix: \"xsi\", XMLNamespaceURI: \"http://www.w3.org/2001/XMLSchema-instance\"}\n\t\te.SetFields(protocol.BodyTarget, \"ItemDetail\", v, metadata)\n\t}\n\treturn nil\n}", "func (s UpdateApiMappingInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiMappingKey != nil {\n\t\tv := *s.ApiMappingKey\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiMappingKey\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Stage != nil {\n\t\tv := *s.Stage\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"stage\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiMappingId != nil {\n\t\tv := *s.ApiMappingId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"apiMappingId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.DomainName != nil {\n\t\tv := *s.DomainName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"domainName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s VirtualServiceRef) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.Arn != nil {\n\t\tv := *s.Arn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"arn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.CreatedAt != nil {\n\t\tv := *s.CreatedAt\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"createdAt\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.LastUpdatedAt != nil {\n\t\tv := *s.LastUpdatedAt\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"lastUpdatedAt\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.MeshName != nil {\n\t\tv := *s.MeshName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"meshName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.MeshOwner != nil {\n\t\tv := *s.MeshOwner\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"meshOwner\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ResourceOwner != nil {\n\t\tv := *s.ResourceOwner\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"resourceOwner\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Version != nil {\n\t\tv := *s.Version\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"version\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.VirtualServiceName != nil {\n\t\tv := *s.VirtualServiceName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"virtualServiceName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s UpdateIntegrationOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ConnectionId != nil {\n\t\tv := *s.ConnectionId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"connectionId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.ConnectionType) > 0 {\n\t\tv := s.ConnectionType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"connectionType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif len(s.ContentHandlingStrategy) > 0 {\n\t\tv := s.ContentHandlingStrategy\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"contentHandlingStrategy\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.CredentialsArn != nil {\n\t\tv := *s.CredentialsArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"credentialsArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IntegrationId != nil {\n\t\tv := *s.IntegrationId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IntegrationMethod != nil {\n\t\tv := *s.IntegrationMethod\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationMethod\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IntegrationResponseSelectionExpression != nil {\n\t\tv := *s.IntegrationResponseSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationResponseSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.IntegrationType) > 0 {\n\t\tv := s.IntegrationType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.IntegrationUri != nil {\n\t\tv := *s.IntegrationUri\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationUri\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.PassthroughBehavior) > 0 {\n\t\tv := s.PassthroughBehavior\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"passthroughBehavior\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif len(s.RequestParameters) > 0 {\n\t\tv := s.RequestParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif len(s.RequestTemplates) > 0 {\n\t\tv := s.RequestTemplates\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestTemplates\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.TemplateSelectionExpression != nil {\n\t\tv := *s.TemplateSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"templateSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.TimeoutInMillis != nil {\n\t\tv := *s.TimeoutInMillis\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"timeoutInMillis\", protocol.Int64Value(v), metadata)\n\t}\n\treturn nil\n}", "func (s UpdateIPSetInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.Activate != nil {\n\t\tv := *s.Activate\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"activate\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.Location != nil {\n\t\tv := *s.Location\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"location\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.DetectorId != nil {\n\t\tv := *s.DetectorId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"detectorId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IpSetId != nil {\n\t\tv := *s.IpSetId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"ipSetId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s GetSigningPlatformInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.PlatformId != nil {\n\t\tv := *s.PlatformId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"platformId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreateStageInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.AccessLogSettings != nil {\n\t\tv := s.AccessLogSettings\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"accessLogSettings\", v, metadata)\n\t}\n\tif s.ClientCertificateId != nil {\n\t\tv := *s.ClientCertificateId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"clientCertificateId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.DefaultRouteSettings != nil {\n\t\tv := s.DefaultRouteSettings\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"defaultRouteSettings\", v, metadata)\n\t}\n\tif s.DeploymentId != nil {\n\t\tv := *s.DeploymentId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"deploymentId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.RouteSettings) > 0 {\n\t\tv := s.RouteSettings\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"routeSettings\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetFields(k1, v1)\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.StageName != nil {\n\t\tv := *s.StageName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"stageName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.StageVariables) > 0 {\n\t\tv := s.StageVariables\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"stageVariables\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s Robot) MarshalFields(e protocol.FieldEncoder) error {\n\tif len(s.Architecture) > 0 {\n\t\tv := s.Architecture\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"architecture\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.Arn != nil {\n\t\tv := *s.Arn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"arn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.CreatedAt != nil {\n\t\tv := *s.CreatedAt\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"createdAt\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.FleetArn != nil {\n\t\tv := *s.FleetArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"fleetArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.GreenGrassGroupId != nil {\n\t\tv := *s.GreenGrassGroupId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"greenGrassGroupId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.LastDeploymentJob != nil {\n\t\tv := *s.LastDeploymentJob\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"lastDeploymentJob\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.LastDeploymentTime != nil {\n\t\tv := *s.LastDeploymentTime\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"lastDeploymentTime\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.Status) > 0 {\n\t\tv := s.Status\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"status\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\treturn nil\n}", "func (s UpdateIntegrationInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.ConnectionId != nil {\n\t\tv := *s.ConnectionId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"connectionId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.ConnectionType) > 0 {\n\t\tv := s.ConnectionType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"connectionType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif len(s.ContentHandlingStrategy) > 0 {\n\t\tv := s.ContentHandlingStrategy\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"contentHandlingStrategy\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.CredentialsArn != nil {\n\t\tv := *s.CredentialsArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"credentialsArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IntegrationMethod != nil {\n\t\tv := *s.IntegrationMethod\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationMethod\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.IntegrationType) > 0 {\n\t\tv := s.IntegrationType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.IntegrationUri != nil {\n\t\tv := *s.IntegrationUri\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationUri\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.PassthroughBehavior) > 0 {\n\t\tv := s.PassthroughBehavior\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"passthroughBehavior\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif len(s.RequestParameters) > 0 {\n\t\tv := s.RequestParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif len(s.RequestTemplates) > 0 {\n\t\tv := s.RequestTemplates\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestTemplates\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.TemplateSelectionExpression != nil {\n\t\tv := *s.TemplateSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"templateSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.TimeoutInMillis != nil {\n\t\tv := *s.TimeoutInMillis\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"timeoutInMillis\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IntegrationId != nil {\n\t\tv := *s.IntegrationId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"integrationId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s DescribeInputDeviceOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.Arn != nil {\n\t\tv := *s.Arn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"arn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.ConnectionState) > 0 {\n\t\tv := s.ConnectionState\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"connectionState\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif len(s.DeviceSettingsSyncState) > 0 {\n\t\tv := s.DeviceSettingsSyncState\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"deviceSettingsSyncState\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.HdDeviceSettings != nil {\n\t\tv := s.HdDeviceSettings\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"hdDeviceSettings\", v, metadata)\n\t}\n\tif s.Id != nil {\n\t\tv := *s.Id\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"id\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.MacAddress != nil {\n\t\tv := *s.MacAddress\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"macAddress\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.NetworkSettings != nil {\n\t\tv := s.NetworkSettings\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"networkSettings\", v, metadata)\n\t}\n\tif s.SerialNumber != nil {\n\t\tv := *s.SerialNumber\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"serialNumber\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.Type) > 0 {\n\t\tv := s.Type\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"type\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\treturn nil\n}", "func (s CreateAliasInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.FunctionVersion != nil {\n\t\tv := *s.FunctionVersion\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"FunctionVersion\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RoutingConfig != nil {\n\t\tv := s.RoutingConfig\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"RoutingConfig\", v, metadata)\n\t}\n\tif s.FunctionName != nil {\n\t\tv := *s.FunctionName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"FunctionName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreateImageInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tvar ClientToken string\n\tif s.ClientToken != nil {\n\t\tClientToken = *s.ClientToken\n\t} else {\n\t\tClientToken = protocol.GetIdempotencyToken()\n\t}\n\t{\n\t\tv := ClientToken\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"clientToken\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.DistributionConfigurationArn != nil {\n\t\tv := *s.DistributionConfigurationArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"distributionConfigurationArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.EnhancedImageMetadataEnabled != nil {\n\t\tv := *s.EnhancedImageMetadataEnabled\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"enhancedImageMetadataEnabled\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.ImageRecipeArn != nil {\n\t\tv := *s.ImageRecipeArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"imageRecipeArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ImageTestsConfiguration != nil {\n\t\tv := s.ImageTestsConfiguration\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"imageTestsConfiguration\", v, metadata)\n\t}\n\tif s.InfrastructureConfigurationArn != nil {\n\t\tv := *s.InfrastructureConfigurationArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"infrastructureConfigurationArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Tags != nil {\n\t\tv := s.Tags\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"tags\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\treturn nil\n}", "func (s VpcLink) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.CreatedDate != nil {\n\t\tv := *s.CreatedDate\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"createdDate\",\n\t\t\tprotocol.TimeValue{V: v, Format: \"iso8601\", QuotedFormatTime: true}, metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.SecurityGroupIds != nil {\n\t\tv := s.SecurityGroupIds\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"securityGroupIds\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.SubnetIds != nil {\n\t\tv := s.SubnetIds\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"subnetIds\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.Tags != nil {\n\t\tv := s.Tags\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"tags\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.VpcLinkId != nil {\n\t\tv := *s.VpcLinkId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"vpcLinkId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.VpcLinkStatus) > 0 {\n\t\tv := s.VpcLinkStatus\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"vpcLinkStatus\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.VpcLinkStatusMessage != nil {\n\t\tv := *s.VpcLinkStatusMessage\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"vpcLinkStatusMessage\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.VpcLinkVersion) > 0 {\n\t\tv := s.VpcLinkVersion\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"vpcLinkVersion\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\treturn nil\n}", "func (s Route) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ApiGatewayManaged != nil {\n\t\tv := *s.ApiGatewayManaged\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiGatewayManaged\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.ApiKeyRequired != nil {\n\t\tv := *s.ApiKeyRequired\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiKeyRequired\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.AuthorizationScopes != nil {\n\t\tv := s.AuthorizationScopes\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"authorizationScopes\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif len(s.AuthorizationType) > 0 {\n\t\tv := s.AuthorizationType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"authorizationType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.AuthorizerId != nil {\n\t\tv := *s.AuthorizerId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"authorizerId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ModelSelectionExpression != nil {\n\t\tv := *s.ModelSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"modelSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.OperationName != nil {\n\t\tv := *s.OperationName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"operationName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RequestModels != nil {\n\t\tv := s.RequestModels\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestModels\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.RequestParameters != nil {\n\t\tv := s.RequestParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetFields(k1, v1)\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.RouteId != nil {\n\t\tv := *s.RouteId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteKey != nil {\n\t\tv := *s.RouteKey\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeKey\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteResponseSelectionExpression != nil {\n\t\tv := *s.RouteResponseSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeResponseSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Target != nil {\n\t\tv := *s.Target\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"target\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s Integration) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ApiGatewayManaged != nil {\n\t\tv := *s.ApiGatewayManaged\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiGatewayManaged\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.ConnectionId != nil {\n\t\tv := *s.ConnectionId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"connectionId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.ConnectionType) > 0 {\n\t\tv := s.ConnectionType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"connectionType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif len(s.ContentHandlingStrategy) > 0 {\n\t\tv := s.ContentHandlingStrategy\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"contentHandlingStrategy\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.CredentialsArn != nil {\n\t\tv := *s.CredentialsArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"credentialsArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IntegrationId != nil {\n\t\tv := *s.IntegrationId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IntegrationMethod != nil {\n\t\tv := *s.IntegrationMethod\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationMethod\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IntegrationResponseSelectionExpression != nil {\n\t\tv := *s.IntegrationResponseSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationResponseSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.IntegrationType) > 0 {\n\t\tv := s.IntegrationType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.IntegrationUri != nil {\n\t\tv := *s.IntegrationUri\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationUri\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.PassthroughBehavior) > 0 {\n\t\tv := s.PassthroughBehavior\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"passthroughBehavior\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.PayloadFormatVersion != nil {\n\t\tv := *s.PayloadFormatVersion\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"payloadFormatVersion\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RequestParameters != nil {\n\t\tv := s.RequestParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.RequestTemplates != nil {\n\t\tv := s.RequestTemplates\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestTemplates\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.TemplateSelectionExpression != nil {\n\t\tv := *s.TemplateSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"templateSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.TimeoutInMillis != nil {\n\t\tv := *s.TimeoutInMillis\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"timeoutInMillis\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.TlsConfig != nil {\n\t\tv := s.TlsConfig\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"tlsConfig\", v, metadata)\n\t}\n\treturn nil\n}", "func (s UpdateModelInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.ContentType != nil {\n\t\tv := *s.ContentType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"contentType\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Schema != nil {\n\t\tv := *s.Schema\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"schema\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ModelId != nil {\n\t\tv := *s.ModelId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"modelId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreateOTAUpdateInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.AdditionalParameters != nil {\n\t\tv := s.AdditionalParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"additionalParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.AwsJobExecutionsRolloutConfig != nil {\n\t\tv := s.AwsJobExecutionsRolloutConfig\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"awsJobExecutionsRolloutConfig\", v, metadata)\n\t}\n\tif s.AwsJobPresignedUrlConfig != nil {\n\t\tv := s.AwsJobPresignedUrlConfig\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"awsJobPresignedUrlConfig\", v, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Files != nil {\n\t\tv := s.Files\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"files\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddFields(v1)\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.Protocols != nil {\n\t\tv := s.Protocols\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"protocols\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.RoleArn != nil {\n\t\tv := *s.RoleArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"roleArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Tags != nil {\n\t\tv := s.Tags\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"tags\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddFields(v1)\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif len(s.TargetSelection) > 0 {\n\t\tv := s.TargetSelection\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"targetSelection\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.Targets != nil {\n\t\tv := s.Targets\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"targets\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.OtaUpdateId != nil {\n\t\tv := *s.OtaUpdateId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"otaUpdateId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreateIntegrationInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.ConnectionId != nil {\n\t\tv := *s.ConnectionId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"connectionId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.ConnectionType) > 0 {\n\t\tv := s.ConnectionType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"connectionType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif len(s.ContentHandlingStrategy) > 0 {\n\t\tv := s.ContentHandlingStrategy\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"contentHandlingStrategy\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.CredentialsArn != nil {\n\t\tv := *s.CredentialsArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"credentialsArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IntegrationMethod != nil {\n\t\tv := *s.IntegrationMethod\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationMethod\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.IntegrationType) > 0 {\n\t\tv := s.IntegrationType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.IntegrationUri != nil {\n\t\tv := *s.IntegrationUri\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationUri\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.PassthroughBehavior) > 0 {\n\t\tv := s.PassthroughBehavior\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"passthroughBehavior\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif len(s.RequestParameters) > 0 {\n\t\tv := s.RequestParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif len(s.RequestTemplates) > 0 {\n\t\tv := s.RequestTemplates\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestTemplates\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.TemplateSelectionExpression != nil {\n\t\tv := *s.TemplateSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"templateSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.TimeoutInMillis != nil {\n\t\tv := *s.TimeoutInMillis\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"timeoutInMillis\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreateApiKeyInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Expires != nil {\n\t\tv := *s.Expires\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"expires\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreateBranchInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.BackendEnvironmentArn != nil {\n\t\tv := *s.BackendEnvironmentArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"backendEnvironmentArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.BasicAuthCredentials != nil {\n\t\tv := *s.BasicAuthCredentials\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"basicAuthCredentials\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.BranchName != nil {\n\t\tv := *s.BranchName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"branchName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.BuildSpec != nil {\n\t\tv := *s.BuildSpec\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"buildSpec\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.DisplayName != nil {\n\t\tv := *s.DisplayName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"displayName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.EnableAutoBuild != nil {\n\t\tv := *s.EnableAutoBuild\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"enableAutoBuild\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.EnableBasicAuth != nil {\n\t\tv := *s.EnableBasicAuth\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"enableBasicAuth\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.EnableNotification != nil {\n\t\tv := *s.EnableNotification\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"enableNotification\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.EnablePullRequestPreview != nil {\n\t\tv := *s.EnablePullRequestPreview\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"enablePullRequestPreview\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.EnvironmentVariables != nil {\n\t\tv := s.EnvironmentVariables\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"environmentVariables\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.Framework != nil {\n\t\tv := *s.Framework\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"framework\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.PullRequestEnvironmentName != nil {\n\t\tv := *s.PullRequestEnvironmentName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"pullRequestEnvironmentName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.Stage) > 0 {\n\t\tv := s.Stage\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"stage\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.Tags != nil {\n\t\tv := s.Tags\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"tags\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.Ttl != nil {\n\t\tv := *s.Ttl\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"ttl\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.AppId != nil {\n\t\tv := *s.AppId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"appId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s UpdateRouteResponseInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.ModelSelectionExpression != nil {\n\t\tv := *s.ModelSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"modelSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.ResponseModels) > 0 {\n\t\tv := s.ResponseModels\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"responseModels\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif len(s.ResponseParameters) > 0 {\n\t\tv := s.ResponseParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"responseParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetFields(k1, v1)\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.RouteResponseKey != nil {\n\t\tv := *s.RouteResponseKey\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeResponseKey\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteId != nil {\n\t\tv := *s.RouteId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"routeId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteResponseId != nil {\n\t\tv := *s.RouteResponseId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"routeResponseId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreatePackageInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.PackageDescription != nil {\n\t\tv := *s.PackageDescription\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"PackageDescription\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.PackageName != nil {\n\t\tv := *s.PackageName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"PackageName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.PackageSource != nil {\n\t\tv := s.PackageSource\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"PackageSource\", v, metadata)\n\t}\n\tif len(s.PackageType) > 0 {\n\t\tv := s.PackageType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"PackageType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\treturn nil\n}", "func (s UpdatePipelineOutput) MarshalFields(e protocol.FieldEncoder) error {\n\treturn nil\n}", "func (s OutputService11TestShapeOutputService11TestCaseOperation1Output) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.Char != nil {\n\t\tv := *s.Char\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"x-char\", protocol.StringValue(v), metadata)\n\t}\n\tif s.Double != nil {\n\t\tv := *s.Double\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"x-double\", protocol.Float64Value(v), metadata)\n\t}\n\tif s.FalseBool != nil {\n\t\tv := *s.FalseBool\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"x-false-bool\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.Float != nil {\n\t\tv := *s.Float\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"x-float\", protocol.Float64Value(v), metadata)\n\t}\n\tif s.Integer != nil {\n\t\tv := *s.Integer\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"x-int\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.Long != nil {\n\t\tv := *s.Long\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"x-long\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.Str != nil {\n\t\tv := *s.Str\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"x-str\", protocol.StringValue(v), metadata)\n\t}\n\tif s.Timestamp != nil {\n\t\tv := *s.Timestamp\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"x-timestamp\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.RFC822TimeFormatName, QuotedFormatTime: false}, metadata)\n\t}\n\tif s.TrueBool != nil {\n\t\tv := *s.TrueBool\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"x-true-bool\", protocol.BoolValue(v), metadata)\n\t}\n\treturn nil\n}", "func (s CreateRouteResponseInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.ModelSelectionExpression != nil {\n\t\tv := *s.ModelSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"modelSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.ResponseModels) > 0 {\n\t\tv := s.ResponseModels\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"responseModels\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif len(s.ResponseParameters) > 0 {\n\t\tv := s.ResponseParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"responseParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetFields(k1, v1)\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.RouteResponseKey != nil {\n\t\tv := *s.RouteResponseKey\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeResponseKey\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteId != nil {\n\t\tv := *s.RouteId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"routeId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreateAppInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.BasicAuthCredentials != nil {\n\t\tv := *s.BasicAuthCredentials\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"basicAuthCredentials\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.BuildSpec != nil {\n\t\tv := *s.BuildSpec\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"buildSpec\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.CustomRules) > 0 {\n\t\tv := s.CustomRules\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"customRules\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddFields(v1)\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.EnableBasicAuth != nil {\n\t\tv := *s.EnableBasicAuth\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"enableBasicAuth\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.EnableBranchAutoBuild != nil {\n\t\tv := *s.EnableBranchAutoBuild\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"enableBranchAutoBuild\", protocol.BoolValue(v), metadata)\n\t}\n\tif len(s.EnvironmentVariables) > 0 {\n\t\tv := s.EnvironmentVariables\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"environmentVariables\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.IamServiceRoleArn != nil {\n\t\tv := *s.IamServiceRoleArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"iamServiceRoleArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.OauthToken != nil {\n\t\tv := *s.OauthToken\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"oauthToken\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.Platform) > 0 {\n\t\tv := s.Platform\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"platform\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.Repository != nil {\n\t\tv := *s.Repository\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"repository\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.Tags) > 0 {\n\t\tv := s.Tags\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"tags\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\treturn nil\n}", "func (s GetApiInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreateBranchInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.BasicAuthCredentials != nil {\n\t\tv := *s.BasicAuthCredentials\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"basicAuthCredentials\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.BranchName != nil {\n\t\tv := *s.BranchName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"branchName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.BuildSpec != nil {\n\t\tv := *s.BuildSpec\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"buildSpec\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.EnableAutoBuild != nil {\n\t\tv := *s.EnableAutoBuild\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"enableAutoBuild\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.EnableBasicAuth != nil {\n\t\tv := *s.EnableBasicAuth\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"enableBasicAuth\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.EnableNotification != nil {\n\t\tv := *s.EnableNotification\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"enableNotification\", protocol.BoolValue(v), metadata)\n\t}\n\tif len(s.EnvironmentVariables) > 0 {\n\t\tv := s.EnvironmentVariables\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"environmentVariables\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.Framework != nil {\n\t\tv := *s.Framework\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"framework\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.Stage) > 0 {\n\t\tv := s.Stage\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"stage\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif len(s.Tags) > 0 {\n\t\tv := s.Tags\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"tags\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.Ttl != nil {\n\t\tv := *s.Ttl\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"ttl\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.AppId != nil {\n\t\tv := *s.AppId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"appId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreateApiMappingOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiMappingId != nil {\n\t\tv := *s.ApiMappingId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiMappingId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiMappingKey != nil {\n\t\tv := *s.ApiMappingKey\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiMappingKey\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Stage != nil {\n\t\tv := *s.Stage\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"stage\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s VirtualNodeSpec) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.BackendDefaults != nil {\n\t\tv := s.BackendDefaults\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"backendDefaults\", v, metadata)\n\t}\n\tif s.Backends != nil {\n\t\tv := s.Backends\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"backends\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddFields(v1)\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.Listeners != nil {\n\t\tv := s.Listeners\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"listeners\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddFields(v1)\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.Logging != nil {\n\t\tv := s.Logging\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"logging\", v, metadata)\n\t}\n\tif s.ServiceDiscovery != nil {\n\t\tv := s.ServiceDiscovery\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"serviceDiscovery\", v, metadata)\n\t}\n\treturn nil\n}", "func (s Integration) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ConnectionId != nil {\n\t\tv := *s.ConnectionId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"connectionId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.ConnectionType) > 0 {\n\t\tv := s.ConnectionType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"connectionType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif len(s.ContentHandlingStrategy) > 0 {\n\t\tv := s.ContentHandlingStrategy\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"contentHandlingStrategy\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.CredentialsArn != nil {\n\t\tv := *s.CredentialsArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"credentialsArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IntegrationId != nil {\n\t\tv := *s.IntegrationId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IntegrationMethod != nil {\n\t\tv := *s.IntegrationMethod\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationMethod\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IntegrationResponseSelectionExpression != nil {\n\t\tv := *s.IntegrationResponseSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationResponseSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.IntegrationType) > 0 {\n\t\tv := s.IntegrationType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.IntegrationUri != nil {\n\t\tv := *s.IntegrationUri\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationUri\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.PassthroughBehavior) > 0 {\n\t\tv := s.PassthroughBehavior\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"passthroughBehavior\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif len(s.RequestParameters) > 0 {\n\t\tv := s.RequestParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif len(s.RequestTemplates) > 0 {\n\t\tv := s.RequestTemplates\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestTemplates\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.TemplateSelectionExpression != nil {\n\t\tv := *s.TemplateSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"templateSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.TimeoutInMillis != nil {\n\t\tv := *s.TimeoutInMillis\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"timeoutInMillis\", protocol.Int64Value(v), metadata)\n\t}\n\treturn nil\n}", "func (s NetworkPathComponent) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ComponentId != nil {\n\t\tv := *s.ComponentId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"ComponentId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ComponentType != nil {\n\t\tv := *s.ComponentType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"ComponentType\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Egress != nil {\n\t\tv := s.Egress\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"Egress\", v, metadata)\n\t}\n\tif s.Ingress != nil {\n\t\tv := s.Ingress\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"Ingress\", v, metadata)\n\t}\n\treturn nil\n}", "func (s UpdateBranchInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.BasicAuthCredentials != nil {\n\t\tv := *s.BasicAuthCredentials\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"basicAuthCredentials\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.BuildSpec != nil {\n\t\tv := *s.BuildSpec\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"buildSpec\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.EnableAutoBuild != nil {\n\t\tv := *s.EnableAutoBuild\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"enableAutoBuild\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.EnableBasicAuth != nil {\n\t\tv := *s.EnableBasicAuth\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"enableBasicAuth\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.EnableNotification != nil {\n\t\tv := *s.EnableNotification\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"enableNotification\", protocol.BoolValue(v), metadata)\n\t}\n\tif len(s.EnvironmentVariables) > 0 {\n\t\tv := s.EnvironmentVariables\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"environmentVariables\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.Framework != nil {\n\t\tv := *s.Framework\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"framework\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.Stage) > 0 {\n\t\tv := s.Stage\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"stage\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.Ttl != nil {\n\t\tv := *s.Ttl\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"ttl\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.AppId != nil {\n\t\tv := *s.AppId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"appId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.BranchName != nil {\n\t\tv := *s.BranchName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"branchName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s OutputService13TestShapeTimeContainer) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.Bar != nil {\n\t\tv := *s.Bar\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"bar\",\n\t\t\tprotocol.TimeValue{V: v, Format: \"unixTimestamp\", QuotedFormatTime: false}, metadata)\n\t}\n\tif s.Foo != nil {\n\t\tv := *s.Foo\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"foo\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.ISO8601TimeFormatName, QuotedFormatTime: false}, metadata)\n\t}\n\treturn nil\n}", "func (s ImportComponentInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.ChangeDescription != nil {\n\t\tv := *s.ChangeDescription\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"changeDescription\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tvar ClientToken string\n\tif s.ClientToken != nil {\n\t\tClientToken = *s.ClientToken\n\t} else {\n\t\tClientToken = protocol.GetIdempotencyToken()\n\t}\n\t{\n\t\tv := ClientToken\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"clientToken\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Data != nil {\n\t\tv := *s.Data\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"data\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.Format) > 0 {\n\t\tv := s.Format\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"format\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.KmsKeyId != nil {\n\t\tv := *s.KmsKeyId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"kmsKeyId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.Platform) > 0 {\n\t\tv := s.Platform\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"platform\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.SemanticVersion != nil {\n\t\tv := *s.SemanticVersion\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"semanticVersion\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Tags != nil {\n\t\tv := s.Tags\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"tags\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif len(s.Type) > 0 {\n\t\tv := s.Type\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"type\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.Uri != nil {\n\t\tv := *s.Uri\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"uri\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s Route) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ApiKeyRequired != nil {\n\t\tv := *s.ApiKeyRequired\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiKeyRequired\", protocol.BoolValue(v), metadata)\n\t}\n\tif len(s.AuthorizationScopes) > 0 {\n\t\tv := s.AuthorizationScopes\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"authorizationScopes\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif len(s.AuthorizationType) > 0 {\n\t\tv := s.AuthorizationType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"authorizationType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.AuthorizerId != nil {\n\t\tv := *s.AuthorizerId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"authorizerId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ModelSelectionExpression != nil {\n\t\tv := *s.ModelSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"modelSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.OperationName != nil {\n\t\tv := *s.OperationName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"operationName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.RequestModels) > 0 {\n\t\tv := s.RequestModels\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestModels\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif len(s.RequestParameters) > 0 {\n\t\tv := s.RequestParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetFields(k1, v1)\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.RouteId != nil {\n\t\tv := *s.RouteId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteKey != nil {\n\t\tv := *s.RouteKey\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeKey\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteResponseSelectionExpression != nil {\n\t\tv := *s.RouteResponseSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeResponseSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Target != nil {\n\t\tv := *s.Target\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"target\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CodeReview) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.CodeReviewArn != nil {\n\t\tv := *s.CodeReviewArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"CodeReviewArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.CreatedTimeStamp != nil {\n\t\tv := *s.CreatedTimeStamp\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"CreatedTimeStamp\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.LastUpdatedTimeStamp != nil {\n\t\tv := *s.LastUpdatedTimeStamp\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"LastUpdatedTimeStamp\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.Metrics != nil {\n\t\tv := s.Metrics\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"Metrics\", v, metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Owner != nil {\n\t\tv := *s.Owner\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Owner\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.ProviderType) > 0 {\n\t\tv := s.ProviderType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"ProviderType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.PullRequestId != nil {\n\t\tv := *s.PullRequestId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"PullRequestId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RepositoryName != nil {\n\t\tv := *s.RepositoryName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"RepositoryName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.SourceCodeType != nil {\n\t\tv := s.SourceCodeType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"SourceCodeType\", v, metadata)\n\t}\n\tif len(s.State) > 0 {\n\t\tv := s.State\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"State\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.StateReason != nil {\n\t\tv := *s.StateReason\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"StateReason\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.Type) > 0 {\n\t\tv := s.Type\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Type\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\treturn nil\n}", "func (s PutObjectOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ContentSHA256 != nil {\n\t\tv := *s.ContentSHA256\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"ContentSHA256\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ETag != nil {\n\t\tv := *s.ETag\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"ETag\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.StorageClass) > 0 {\n\t\tv := s.StorageClass\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"StorageClass\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\treturn nil\n}", "func (s GetRouteOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ApiKeyRequired != nil {\n\t\tv := *s.ApiKeyRequired\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiKeyRequired\", protocol.BoolValue(v), metadata)\n\t}\n\tif len(s.AuthorizationScopes) > 0 {\n\t\tv := s.AuthorizationScopes\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"authorizationScopes\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif len(s.AuthorizationType) > 0 {\n\t\tv := s.AuthorizationType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"authorizationType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.AuthorizerId != nil {\n\t\tv := *s.AuthorizerId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"authorizerId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ModelSelectionExpression != nil {\n\t\tv := *s.ModelSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"modelSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.OperationName != nil {\n\t\tv := *s.OperationName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"operationName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.RequestModels) > 0 {\n\t\tv := s.RequestModels\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestModels\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif len(s.RequestParameters) > 0 {\n\t\tv := s.RequestParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetFields(k1, v1)\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.RouteId != nil {\n\t\tv := *s.RouteId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteKey != nil {\n\t\tv := *s.RouteKey\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeKey\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteResponseSelectionExpression != nil {\n\t\tv := *s.RouteResponseSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeResponseSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Target != nil {\n\t\tv := *s.Target\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"target\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s OutputService1TestShapeOutputService1TestCaseOperation1Output) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.Char != nil {\n\t\tv := *s.Char\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Char\", protocol.StringValue(v), metadata)\n\t}\n\tif s.Double != nil {\n\t\tv := *s.Double\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Double\", protocol.Float64Value(v), metadata)\n\t}\n\tif s.FalseBool != nil {\n\t\tv := *s.FalseBool\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"FalseBool\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.Float != nil {\n\t\tv := *s.Float\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Float\", protocol.Float64Value(v), metadata)\n\t}\n\tif s.Float64s != nil {\n\t\tv := s.Float64s\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"Float64s\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.Float64Value(v1))\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.Long != nil {\n\t\tv := *s.Long\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Long\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.Num != nil {\n\t\tv := *s.Num\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"FooNum\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.Str != nil {\n\t\tv := *s.Str\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Str\", protocol.StringValue(v), metadata)\n\t}\n\tif s.Timestamp != nil {\n\t\tv := *s.Timestamp\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Timestamp\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.ISO8601TimeFormatName, QuotedFormatTime: false}, metadata)\n\t}\n\tif s.TrueBool != nil {\n\t\tv := *s.TrueBool\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"TrueBool\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.ImaHeader != nil {\n\t\tv := *s.ImaHeader\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"ImaHeader\", protocol.StringValue(v), metadata)\n\t}\n\tif s.ImaHeaderLocation != nil {\n\t\tv := *s.ImaHeaderLocation\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"X-Foo\", protocol.StringValue(v), metadata)\n\t}\n\treturn nil\n}", "func (s UpdateAppInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.BasicAuthCredentials != nil {\n\t\tv := *s.BasicAuthCredentials\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"basicAuthCredentials\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.BuildSpec != nil {\n\t\tv := *s.BuildSpec\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"buildSpec\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.CustomRules) > 0 {\n\t\tv := s.CustomRules\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"customRules\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddFields(v1)\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.EnableBasicAuth != nil {\n\t\tv := *s.EnableBasicAuth\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"enableBasicAuth\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.EnableBranchAutoBuild != nil {\n\t\tv := *s.EnableBranchAutoBuild\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"enableBranchAutoBuild\", protocol.BoolValue(v), metadata)\n\t}\n\tif len(s.EnvironmentVariables) > 0 {\n\t\tv := s.EnvironmentVariables\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"environmentVariables\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.IamServiceRoleArn != nil {\n\t\tv := *s.IamServiceRoleArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"iamServiceRoleArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.Platform) > 0 {\n\t\tv := s.Platform\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"platform\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.AppId != nil {\n\t\tv := *s.AppId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"appId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s GetStageInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.StageName != nil {\n\t\tv := *s.StageName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"stageName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s OutputService1TestShapeOutputService1TestCaseOperation2Output) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.Char != nil {\n\t\tv := *s.Char\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Char\", protocol.StringValue(v), metadata)\n\t}\n\tif s.Double != nil {\n\t\tv := *s.Double\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Double\", protocol.Float64Value(v), metadata)\n\t}\n\tif s.FalseBool != nil {\n\t\tv := *s.FalseBool\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"FalseBool\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.Float != nil {\n\t\tv := *s.Float\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Float\", protocol.Float64Value(v), metadata)\n\t}\n\tif s.Float64s != nil {\n\t\tv := s.Float64s\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"Float64s\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.Float64Value(v1))\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.Long != nil {\n\t\tv := *s.Long\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Long\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.Num != nil {\n\t\tv := *s.Num\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"FooNum\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.Str != nil {\n\t\tv := *s.Str\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Str\", protocol.StringValue(v), metadata)\n\t}\n\tif s.Timestamp != nil {\n\t\tv := *s.Timestamp\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Timestamp\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.ISO8601TimeFormatName, QuotedFormatTime: false}, metadata)\n\t}\n\tif s.TrueBool != nil {\n\t\tv := *s.TrueBool\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"TrueBool\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.ImaHeader != nil {\n\t\tv := *s.ImaHeader\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"ImaHeader\", protocol.StringValue(v), metadata)\n\t}\n\tif s.ImaHeaderLocation != nil {\n\t\tv := *s.ImaHeaderLocation\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"X-Foo\", protocol.StringValue(v), metadata)\n\t}\n\treturn nil\n}", "func (s GetIntegrationOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ConnectionId != nil {\n\t\tv := *s.ConnectionId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"connectionId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.ConnectionType) > 0 {\n\t\tv := s.ConnectionType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"connectionType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif len(s.ContentHandlingStrategy) > 0 {\n\t\tv := s.ContentHandlingStrategy\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"contentHandlingStrategy\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.CredentialsArn != nil {\n\t\tv := *s.CredentialsArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"credentialsArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IntegrationId != nil {\n\t\tv := *s.IntegrationId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IntegrationMethod != nil {\n\t\tv := *s.IntegrationMethod\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationMethod\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IntegrationResponseSelectionExpression != nil {\n\t\tv := *s.IntegrationResponseSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationResponseSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.IntegrationType) > 0 {\n\t\tv := s.IntegrationType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.IntegrationUri != nil {\n\t\tv := *s.IntegrationUri\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"integrationUri\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.PassthroughBehavior) > 0 {\n\t\tv := s.PassthroughBehavior\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"passthroughBehavior\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif len(s.RequestParameters) > 0 {\n\t\tv := s.RequestParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif len(s.RequestTemplates) > 0 {\n\t\tv := s.RequestTemplates\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"requestTemplates\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.TemplateSelectionExpression != nil {\n\t\tv := *s.TemplateSelectionExpression\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"templateSelectionExpression\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.TimeoutInMillis != nil {\n\t\tv := *s.TimeoutInMillis\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"timeoutInMillis\", protocol.Int64Value(v), metadata)\n\t}\n\treturn nil\n}", "func (s CreatePolicyInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.PolicyDocument != nil {\n\t\tv := *s.PolicyDocument\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"policyDocument\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.PolicyName != nil {\n\t\tv := *s.PolicyName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"policyName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s VirtualServiceBackend) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ClientPolicy != nil {\n\t\tv := s.ClientPolicy\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"clientPolicy\", v, metadata)\n\t}\n\tif s.VirtualServiceName != nil {\n\t\tv := *s.VirtualServiceName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"virtualServiceName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreateBucketInput) MarshalFields(e protocol.FieldEncoder) error {\n\n\tif len(s.ACL) > 0 {\n\t\tv := s.ACL\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"x-amz-acl\", v, metadata)\n\t}\n\tif s.GrantFullControl != nil {\n\t\tv := *s.GrantFullControl\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"x-amz-grant-full-control\", protocol.StringValue(v), metadata)\n\t}\n\tif s.GrantRead != nil {\n\t\tv := *s.GrantRead\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"x-amz-grant-read\", protocol.StringValue(v), metadata)\n\t}\n\tif s.GrantReadACP != nil {\n\t\tv := *s.GrantReadACP\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"x-amz-grant-read-acp\", protocol.StringValue(v), metadata)\n\t}\n\tif s.GrantWrite != nil {\n\t\tv := *s.GrantWrite\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"x-amz-grant-write\", protocol.StringValue(v), metadata)\n\t}\n\tif s.GrantWriteACP != nil {\n\t\tv := *s.GrantWriteACP\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"x-amz-grant-write-acp\", protocol.StringValue(v), metadata)\n\t}\n\tif s.ObjectLockEnabledForBucket != nil {\n\t\tv := *s.ObjectLockEnabledForBucket\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.HeaderTarget, \"x-amz-bucket-object-lock-enabled\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.Bucket != nil {\n\t\tv := *s.Bucket\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"Bucket\", protocol.StringValue(v), metadata)\n\t}\n\tif s.CreateBucketConfiguration != nil {\n\t\tv := s.CreateBucketConfiguration\n\n\t\tmetadata := protocol.Metadata{XMLNamespaceURI: \"http://s3.amazonaws.com/doc/2006-03-01/\"}\n\t\te.SetFields(protocol.PayloadTarget, \"CreateBucketConfiguration\", v, metadata)\n\t}\n\treturn nil\n}", "func (s UpdateApiMappingOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiMappingId != nil {\n\t\tv := *s.ApiMappingId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiMappingId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ApiMappingKey != nil {\n\t\tv := *s.ApiMappingKey\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"apiMappingKey\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Stage != nil {\n\t\tv := *s.Stage\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"stage\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s Pipeline) MarshalFields(e protocol.FieldEncoder) error {\n\tif len(s.Activities) > 0 {\n\t\tv := s.Activities\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"activities\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddFields(v1)\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.Arn != nil {\n\t\tv := *s.Arn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"arn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.CreationTime != nil {\n\t\tv := *s.CreationTime\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"creationTime\", protocol.TimeValue{V: v, Format: protocol.UnixTimeFormat}, metadata)\n\t}\n\tif s.LastUpdateTime != nil {\n\t\tv := *s.LastUpdateTime\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"lastUpdateTime\", protocol.TimeValue{V: v, Format: protocol.UnixTimeFormat}, metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.ReprocessingSummaries) > 0 {\n\t\tv := s.ReprocessingSummaries\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"reprocessingSummaries\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddFields(v1)\n\t\t}\n\t\tls0.End()\n\n\t}\n\treturn nil\n}", "func (s VirtualNodeRef) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.Arn != nil {\n\t\tv := *s.Arn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"arn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.CreatedAt != nil {\n\t\tv := *s.CreatedAt\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"createdAt\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.LastUpdatedAt != nil {\n\t\tv := *s.LastUpdatedAt\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"lastUpdatedAt\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.MeshName != nil {\n\t\tv := *s.MeshName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"meshName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.MeshOwner != nil {\n\t\tv := *s.MeshOwner\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"meshOwner\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ResourceOwner != nil {\n\t\tv := *s.ResourceOwner\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"resourceOwner\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Version != nil {\n\t\tv := *s.Version\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"version\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.VirtualNodeName != nil {\n\t\tv := *s.VirtualNodeName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"virtualNodeName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s OTAUpdateInfo) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.AdditionalParameters != nil {\n\t\tv := s.AdditionalParameters\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"additionalParameters\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.AwsIotJobArn != nil {\n\t\tv := *s.AwsIotJobArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"awsIotJobArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.AwsIotJobId != nil {\n\t\tv := *s.AwsIotJobId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"awsIotJobId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.AwsJobExecutionsRolloutConfig != nil {\n\t\tv := s.AwsJobExecutionsRolloutConfig\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"awsJobExecutionsRolloutConfig\", v, metadata)\n\t}\n\tif s.AwsJobPresignedUrlConfig != nil {\n\t\tv := s.AwsJobPresignedUrlConfig\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"awsJobPresignedUrlConfig\", v, metadata)\n\t}\n\tif s.CreationDate != nil {\n\t\tv := *s.CreationDate\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"creationDate\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ErrorInfo != nil {\n\t\tv := s.ErrorInfo\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"errorInfo\", v, metadata)\n\t}\n\tif s.LastModifiedDate != nil {\n\t\tv := *s.LastModifiedDate\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"lastModifiedDate\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.OtaUpdateArn != nil {\n\t\tv := *s.OtaUpdateArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"otaUpdateArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.OtaUpdateFiles != nil {\n\t\tv := s.OtaUpdateFiles\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"otaUpdateFiles\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddFields(v1)\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.OtaUpdateId != nil {\n\t\tv := *s.OtaUpdateId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"otaUpdateId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.OtaUpdateStatus) > 0 {\n\t\tv := s.OtaUpdateStatus\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"otaUpdateStatus\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.Protocols != nil {\n\t\tv := s.Protocols\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"protocols\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif len(s.TargetSelection) > 0 {\n\t\tv := s.TargetSelection\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"targetSelection\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.Targets != nil {\n\t\tv := s.Targets\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"targets\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\treturn nil\n}", "func (s GetIntrospectionSchemaInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.Format) > 0 {\n\t\tv := s.Format\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.QueryTarget, \"format\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.IncludeDirectives != nil {\n\t\tv := *s.IncludeDirectives\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.QueryTarget, \"includeDirectives\", protocol.BoolValue(v), metadata)\n\t}\n\treturn nil\n}", "func (s Source) MarshalFields(e protocol.FieldEncoder) error {\n\tif len(s.Architecture) > 0 {\n\t\tv := s.Architecture\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"architecture\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.Etag != nil {\n\t\tv := *s.Etag\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"etag\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.S3Bucket != nil {\n\t\tv := *s.S3Bucket\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"s3Bucket\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.S3Key != nil {\n\t\tv := *s.S3Key\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"s3Key\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CreateJobInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.HopDestinations != nil {\n\t\tv := s.HopDestinations\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"hopDestinations\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddFields(v1)\n\t\t}\n\t\tls0.End()\n\t}\n\tif s.AccelerationSettings != nil {\n\t\tv := s.AccelerationSettings\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"accelerationSettings\", v, metadata)\n\t}\n\tif len(s.BillingTagsSource) > 0 {\n\t\tv := s.BillingTagsSource\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"billingTagsSource\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tvar ClientRequestToken string\n\tif s.ClientRequestToken != nil {\n\t\tClientRequestToken = *s.ClientRequestToken\n\t} else {\n\t\tClientRequestToken = protocol.GetIdempotencyToken()\n\t}\n\t{\n\t\tv := ClientRequestToken\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"clientRequestToken\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.JobTemplate != nil {\n\t\tv := *s.JobTemplate\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"jobTemplate\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Priority != nil {\n\t\tv := *s.Priority\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"priority\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.Queue != nil {\n\t\tv := *s.Queue\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"queue\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Role != nil {\n\t\tv := *s.Role\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"role\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Settings != nil {\n\t\tv := s.Settings\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"settings\", v, metadata)\n\t}\n\tif len(s.SimulateReservedQueue) > 0 {\n\t\tv := s.SimulateReservedQueue\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"simulateReservedQueue\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif len(s.StatusUpdateInterval) > 0 {\n\t\tv := s.StatusUpdateInterval\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"statusUpdateInterval\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.Tags != nil {\n\t\tv := s.Tags\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"tags\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.UserMetadata != nil {\n\t\tv := s.UserMetadata\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"userMetadata\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\treturn nil\n}", "func (s CreateModelOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ContentType != nil {\n\t\tv := *s.ContentType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"contentType\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ModelId != nil {\n\t\tv := *s.ModelId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"modelId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Schema != nil {\n\t\tv := *s.Schema\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"schema\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s UpdateBrokerStorageInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.CurrentVersion != nil {\n\t\tv := *s.CurrentVersion\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"currentVersion\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.TargetBrokerEBSVolumeInfo != nil {\n\t\tv := s.TargetBrokerEBSVolumeInfo\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"targetBrokerEBSVolumeInfo\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddFields(v1)\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.ClusterArn != nil {\n\t\tv := *s.ClusterArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"clusterArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s CustomCodeSigning) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.CertificateChain != nil {\n\t\tv := s.CertificateChain\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"certificateChain\", v, metadata)\n\t}\n\tif s.HashAlgorithm != nil {\n\t\tv := *s.HashAlgorithm\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"hashAlgorithm\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Signature != nil {\n\t\tv := s.Signature\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"signature\", v, metadata)\n\t}\n\tif s.SignatureAlgorithm != nil {\n\t\tv := *s.SignatureAlgorithm\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"signatureAlgorithm\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s GetMacieSessionInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\treturn nil\n}", "func (s MeshRef) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.Arn != nil {\n\t\tv := *s.Arn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"arn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.CreatedAt != nil {\n\t\tv := *s.CreatedAt\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"createdAt\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.LastUpdatedAt != nil {\n\t\tv := *s.LastUpdatedAt\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"lastUpdatedAt\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.MeshName != nil {\n\t\tv := *s.MeshName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"meshName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.MeshOwner != nil {\n\t\tv := *s.MeshOwner\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"meshOwner\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ResourceOwner != nil {\n\t\tv := *s.ResourceOwner\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"resourceOwner\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Version != nil {\n\t\tv := *s.Version\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"version\", protocol.Int64Value(v), metadata)\n\t}\n\treturn nil\n}", "func (s GatewayRouteRef) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.Arn != nil {\n\t\tv := *s.Arn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"arn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.CreatedAt != nil {\n\t\tv := *s.CreatedAt\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"createdAt\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.GatewayRouteName != nil {\n\t\tv := *s.GatewayRouteName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"gatewayRouteName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.LastUpdatedAt != nil {\n\t\tv := *s.LastUpdatedAt\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"lastUpdatedAt\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.MeshName != nil {\n\t\tv := *s.MeshName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"meshName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.MeshOwner != nil {\n\t\tv := *s.MeshOwner\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"meshOwner\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ResourceOwner != nil {\n\t\tv := *s.ResourceOwner\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"resourceOwner\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Version != nil {\n\t\tv := *s.Version\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"version\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.VirtualGatewayName != nil {\n\t\tv := *s.VirtualGatewayName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"virtualGatewayName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s Resource) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.Arn != nil {\n\t\tv := *s.Arn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"arn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.Attributes) > 0 {\n\t\tv := s.Attributes\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"attributes\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.Feature != nil {\n\t\tv := *s.Feature\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"feature\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Name != nil {\n\t\tv := *s.Name\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"name\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Type != nil {\n\t\tv := *s.Type\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"type\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s RouteRef) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.Arn != nil {\n\t\tv := *s.Arn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"arn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.CreatedAt != nil {\n\t\tv := *s.CreatedAt\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"createdAt\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.LastUpdatedAt != nil {\n\t\tv := *s.LastUpdatedAt\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"lastUpdatedAt\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.MeshName != nil {\n\t\tv := *s.MeshName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"meshName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.MeshOwner != nil {\n\t\tv := *s.MeshOwner\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"meshOwner\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ResourceOwner != nil {\n\t\tv := *s.ResourceOwner\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"resourceOwner\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.RouteName != nil {\n\t\tv := *s.RouteName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"routeName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Version != nil {\n\t\tv := *s.Version\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"version\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.VirtualRouterName != nil {\n\t\tv := *s.VirtualRouterName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"virtualRouterName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s DescribePipelineOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.Pipeline != nil {\n\t\tv := s.Pipeline\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"pipeline\", v, metadata)\n\t}\n\treturn nil\n}", "func (s DescribePipelineOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.Pipeline != nil {\n\t\tv := s.Pipeline\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"pipeline\", v, metadata)\n\t}\n\treturn nil\n}", "func (s DescribeDetectorModelInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.DetectorModelName != nil {\n\t\tv := *s.DetectorModelName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"detectorModelName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.DetectorModelVersion != nil {\n\t\tv := *s.DetectorModelVersion\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.QueryTarget, \"version\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s AttachPolicyInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.Target != nil {\n\t\tv := *s.Target\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"target\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.PolicyName != nil {\n\t\tv := *s.PolicyName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"policyName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s GetSigningPlatformOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif len(s.Category) > 0 {\n\t\tv := s.Category\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"category\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.DisplayName != nil {\n\t\tv := *s.DisplayName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"displayName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.MaxSizeInMB != nil {\n\t\tv := *s.MaxSizeInMB\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"maxSizeInMB\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.Partner != nil {\n\t\tv := *s.Partner\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"partner\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.PlatformId != nil {\n\t\tv := *s.PlatformId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"platformId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.SigningConfiguration != nil {\n\t\tv := s.SigningConfiguration\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"signingConfiguration\", v, metadata)\n\t}\n\tif s.SigningImageFormat != nil {\n\t\tv := s.SigningImageFormat\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"signingImageFormat\", v, metadata)\n\t}\n\tif s.Target != nil {\n\t\tv := *s.Target\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"target\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (v *Service) Encode(sw stream.Writer) error {\n\tif err := sw.WriteStructBegin(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := sw.WriteFieldBegin(stream.FieldHeader{ID: 7, Type: wire.TBinary}); err != nil {\n\t\treturn err\n\t}\n\tif err := sw.WriteString(v.Name); err != nil {\n\t\treturn err\n\t}\n\tif err := sw.WriteFieldEnd(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := sw.WriteFieldBegin(stream.FieldHeader{ID: 1, Type: wire.TBinary}); err != nil {\n\t\treturn err\n\t}\n\tif err := sw.WriteString(v.ThriftName); err != nil {\n\t\treturn err\n\t}\n\tif err := sw.WriteFieldEnd(); err != nil {\n\t\treturn err\n\t}\n\n\tif v.ParentID != nil {\n\t\tif err := sw.WriteFieldBegin(stream.FieldHeader{ID: 4, Type: wire.TI32}); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := v.ParentID.Encode(sw); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := sw.WriteFieldEnd(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif err := sw.WriteFieldBegin(stream.FieldHeader{ID: 5, Type: wire.TList}); err != nil {\n\t\treturn err\n\t}\n\tif err := _List_Function_Encode(v.Functions, sw); err != nil {\n\t\treturn err\n\t}\n\tif err := sw.WriteFieldEnd(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := sw.WriteFieldBegin(stream.FieldHeader{ID: 6, Type: wire.TI32}); err != nil {\n\t\treturn err\n\t}\n\tif err := v.ModuleID.Encode(sw); err != nil {\n\t\treturn err\n\t}\n\tif err := sw.WriteFieldEnd(); err != nil {\n\t\treturn err\n\t}\n\n\tif v.Annotations != nil {\n\t\tif err := sw.WriteFieldBegin(stream.FieldHeader{ID: 8, Type: wire.TMap}); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := _Map_String_String_Encode(v.Annotations, sw); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := sw.WriteFieldEnd(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn sw.WriteStructEnd()\n}", "func (s Product) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ActivationUrl != nil {\n\t\tv := *s.ActivationUrl\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"ActivationUrl\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Categories != nil {\n\t\tv := s.Categories\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"Categories\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.CompanyName != nil {\n\t\tv := *s.CompanyName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"CompanyName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.IntegrationTypes != nil {\n\t\tv := s.IntegrationTypes\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"IntegrationTypes\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.MarketplaceUrl != nil {\n\t\tv := *s.MarketplaceUrl\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"MarketplaceUrl\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ProductArn != nil {\n\t\tv := *s.ProductArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"ProductArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ProductName != nil {\n\t\tv := *s.ProductName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"ProductName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ProductSubscriptionResourcePolicy != nil {\n\t\tv := *s.ProductSubscriptionResourcePolicy\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"ProductSubscriptionResourcePolicy\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s AwsLambdaFunctionLayer) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.Arn != nil {\n\t\tv := *s.Arn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"Arn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.CodeSize != nil {\n\t\tv := *s.CodeSize\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"CodeSize\", protocol.Int64Value(v), metadata)\n\t}\n\treturn nil\n}", "func (s HttpAuthorization) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.Sigv4 != nil {\n\t\tv := s.Sigv4\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"sigv4\", v, metadata)\n\t}\n\treturn nil\n}", "func (s DescribePipelineInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.PipelineName != nil {\n\t\tv := *s.PipelineName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"pipelineName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s GetModelInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.ApiId != nil {\n\t\tv := *s.ApiId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"apiId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ModelId != nil {\n\t\tv := *s.ModelId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"modelId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}" ]
[ "0.63462156", "0.6244603", "0.6192145", "0.6177832", "0.6165666", "0.61186266", "0.60982037", "0.60677594", "0.6061786", "0.60107094", "0.6006523", "0.5958391", "0.5950599", "0.59313834", "0.59261024", "0.5919634", "0.5908056", "0.5903742", "0.5897319", "0.5880696", "0.5874844", "0.5873327", "0.58714265", "0.5867448", "0.5867433", "0.5854598", "0.58473366", "0.58472943", "0.5843353", "0.58340627", "0.5831743", "0.5818388", "0.58168674", "0.58119106", "0.5810605", "0.5808661", "0.5801962", "0.58001596", "0.5799135", "0.5784449", "0.57781595", "0.57743233", "0.57717377", "0.57703775", "0.57629114", "0.5758634", "0.5756448", "0.57562757", "0.5749415", "0.5746013", "0.5744098", "0.5743717", "0.5740336", "0.57374614", "0.5730066", "0.5729947", "0.57255423", "0.5717375", "0.5716901", "0.5706139", "0.5699765", "0.56990606", "0.56980914", "0.5694049", "0.5692233", "0.5688885", "0.5683727", "0.5680244", "0.5674424", "0.56738365", "0.5673697", "0.5662647", "0.56578994", "0.56576484", "0.5656073", "0.5655038", "0.5650553", "0.5647669", "0.5641901", "0.5641728", "0.5639997", "0.5636208", "0.56356674", "0.5633682", "0.5632424", "0.563218", "0.562974", "0.5624169", "0.5623405", "0.5621904", "0.56198096", "0.56198096", "0.5617169", "0.5613568", "0.56114084", "0.5610602", "0.5609313", "0.5607009", "0.5606507", "0.560251", "0.5586581" ]
0.0
-1
Send marshals and sends the DescribeUserHierarchyStructure API request.
func (r DescribeUserHierarchyStructureRequest) Send(ctx context.Context) (*DescribeUserHierarchyStructureResponse, error) { r.Request.SetContext(ctx) err := r.Request.Send() if err != nil { return nil, err } resp := &DescribeUserHierarchyStructureResponse{ DescribeUserHierarchyStructureOutput: r.Request.Data.(*DescribeUserHierarchyStructureOutput), response: &aws.Response{Request: r.Request}, } return resp, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (s *DescribeUserHierarchyStructureOutput) SetHierarchyStructure(v *HierarchyStructure) *DescribeUserHierarchyStructureOutput {\n\ts.HierarchyStructure = v\n\treturn s\n}", "func (s *UpdateUserHierarchyStructureInput) SetHierarchyStructure(v *HierarchyStructureUpdate) *UpdateUserHierarchyStructureInput {\n\ts.HierarchyStructure = v\n\treturn s\n}", "func (s DescribeUserHierarchyStructureOutput) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.HierarchyStructure != nil {\n\t\tv := s.HierarchyStructure\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"HierarchyStructure\", v, metadata)\n\t}\n\treturn nil\n}", "func (s DescribeUserHierarchyStructureInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.InstanceId != nil {\n\t\tv := *s.InstanceId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"InstanceId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (s DescribeUserHierarchyStructureInput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s DescribeUserHierarchyStructureInput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s DescribeUserHierarchyStructureOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s DescribeUserHierarchyStructureOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (status UpstreamStatus) MarshalHierarchical() string {\n\thierarchy := hierr.Push(\n\t\t\"upstream\",\n\t\t\"total: \"+strconv.Itoa(status.Total),\n\t\tfmt.Sprintf(\n\t\t\t\"success: %d (%.2f%%)\",\n\t\t\tstatus.Success, status.SuccessPercent,\n\t\t),\n\t\tfmt.Sprintf(\n\t\t\t\"error: %d (%.2f%%)\",\n\t\t\tstatus.Error, status.ErrorPercent,\n\t\t),\n\t)\n\n\tif len(status.Slaves) > 0 {\n\t\tslaves := errors.New(\"slaves\")\n\t\tfor _, slave := range status.Slaves {\n\t\t\tslaves = hierr.Push(slaves, slave.MarshalHierarchical())\n\t\t}\n\n\t\thierarchy = hierr.Push(hierarchy, slaves)\n\t}\n\n\treturn hierr.String(hierarchy)\n}", "func (c *Connect) DescribeUserHierarchyStructureWithContext(ctx aws.Context, input *DescribeUserHierarchyStructureInput, opts ...request.Option) (*DescribeUserHierarchyStructureOutput, error) {\n\treq, out := c.DescribeUserHierarchyStructureRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}", "func (status ServerStatus) MarshalHierarchical() []byte {\n\tvar hierarchy error\n\tif status.Address != \"\" {\n\t\thierarchy = hierr.Push(status.Address)\n\t} else {\n\t\thierarchy = hierr.Push(\"status\")\n\t}\n\n\tif status.Role != \"master\" {\n\t\tstatus.Role = \"slave\"\n\t}\n\n\thierarchy = hierr.Push(\n\t\thierarchy,\n\t\tfmt.Sprintf(\"role: %s\", status.Role),\n\t)\n\n\thierarchy = hierr.Push(\n\t\thierarchy,\n\t\tfmt.Sprintf(\"total: %d\", len(status.Mirrors)),\n\t)\n\n\tif status.HierarchicalError != \"\" {\n\t\thierarchy = hierr.Push(\n\t\t\thierarchy,\n\t\t\thierr.Push(\"error\", status.HierarchicalError),\n\t\t)\n\t}\n\n\tif len(status.Mirrors) > 0 {\n\t\tmirrors := errors.New(\"mirrors\")\n\t\tfor _, mirror := range status.Mirrors {\n\t\t\tmirrors = hierr.Push(mirrors, mirror.MarshalHierarchical())\n\t\t}\n\t\thierarchy = hierr.Push(hierarchy, mirrors)\n\t}\n\n\tif status.Role == \"master\" {\n\t\thierarchy = hierr.Push(hierarchy, status.Upstream.MarshalHierarchical())\n\t}\n\n\treturn []byte(hierr.String(hierarchy))\n}", "func (*CreateUserResponse) Descriptor() ([]byte, []int) {\n\treturn file_toit_api_organization_proto_rawDescGZIP(), []int{3}\n}", "func (*CreateUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_toit_api_organization_proto_rawDescGZIP(), []int{2}\n}", "func (r DescribeUserRequest) Send(ctx context.Context) (*DescribeUserResponse, error) {\n\tr.Request.SetContext(ctx)\n\terr := r.Request.Send()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp := &DescribeUserResponse{\n\t\tDescribeUserOutput: r.Request.Data.(*DescribeUserOutput),\n\t\tresponse: &aws.Response{Request: r.Request},\n\t}\n\n\treturn resp, nil\n}", "func (*MessageHubBlockUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_messagehub_proto_rawDescGZIP(), []int{10}\n}", "func (u *DbUser) UserStructureLevel(meetingID int) string {\n\tif u.Level == \"\" {\n\t\treturn u.DefaultLevel\n\t}\n\treturn u.Level\n}", "func (*CreateOrganizationRequest_User) Descriptor() ([]byte, []int) {\n\treturn file_toit_api_auth_proto_rawDescGZIP(), []int{6, 1}\n}", "func (a *Client) GetUniverseStructures(params *GetUniverseStructuresParams) (*GetUniverseStructuresOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetUniverseStructuresParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"get_universe_structures\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/universe/structures/\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &GetUniverseStructuresReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*GetUniverseStructuresOK), nil\n\n}", "func CreateHierarchyPage(req *http.Request, bp core.Page, h hierarchyClient.Model, dst dataset.DatasetDetails, f filter.Model, selectedValueLabels map[string]string, dims dataset.VersionDimensions, name, curPath, datasetID, apiRouterVersion, lang, serviceMessage string, emergencyBannerContent zebedee.EmergencyBanner) model.Hierarchy {\n\tp := model.Hierarchy{\n\t\tPage: bp,\n\t}\n\tp.BetaBannerEnabled = true\n\tp.FeatureFlags.SixteensVersion = sixteensVersion\n\tp.Language = lang\n\tp.RemoveGalleryBackground = true\n\n\tmapCookiePreferences(req, &p.CookiesPreferencesSet, &p.CookiesPolicy)\n\n\tctx := req.Context()\n\tlog.Info(ctx, \"mapping api response models to hierarchy page\", log.Data{\"filterID\": f.FilterID, \"datasetID\": datasetID, \"label\": h.Label})\n\n\tpageTitle := helpers.TitleCaseStr(name)\n\tfor i := range dims.Items {\n\t\tif dims.Items[i].Name == name {\n\t\t\tp.Metadata.Description = dims.Items[i].Description\n\t\t\tif len(dims.Items[i].Label) > 0 {\n\t\t\t\tpageTitle = dims.Items[i].Label\n\t\t\t}\n\t\t}\n\t}\n\n\tp.DatasetTitle = dst.Title\n\tp.Data.DimensionName = pageTitle\n\tp.DatasetId = datasetID\n\tp.URI = req.URL.Path\n\tp.ServiceMessage = serviceMessage\n\tp.EmergencyBanner = mapEmergencyBanner(emergencyBannerContent)\n\n\tvar title string\n\tif len(h.Breadcrumbs) == 0 {\n\t\ttitle = pageTitle\n\t} else {\n\t\ttitle = h.Label\n\t}\n\n\tvar ok bool\n\tif p.Type, ok = hierarchyBrowseLookup[name]; !ok {\n\t\tp.Type = \"type\"\n\t}\n\n\tp.SearchDisabled = true\n\n\tp.Data.SearchURL = fmt.Sprintf(\"/filters/%s/dimensions/%s/search\", f.FilterID, name)\n\n\tversionURL, err := url.Parse(f.Links.Version.HRef)\n\tif err != nil {\n\t\tlog.Warn(ctx, \"unable to parse version url\", log.FormatErrors([]error{err}))\n\t}\n\tversionPath := strings.TrimPrefix(versionURL.Path, apiRouterVersion)\n\n\tp.IsInFilterBreadcrumb = true\n\n\t_, edition, _, err := helpers.ExtractDatasetInfoFromPath(ctx, versionPath)\n\tif err != nil {\n\t\tlog.Warn(ctx, \"unable to extract edition from url\", log.FormatErrors([]error{err}))\n\t}\n\n\tp.Breadcrumb = append(\n\t\tp.Breadcrumb,\n\t\tcore.TaxonomyNode{\n\t\t\tTitle: dst.Title,\n\t\t\tURI: fmt.Sprintf(\"/datasets/%s/editions\", dst.ID),\n\t\t}, core.TaxonomyNode{\n\t\t\tTitle: edition,\n\t\t\tURI: versionPath,\n\t\t}, core.TaxonomyNode{\n\t\t\tTitle: \"Filter options\",\n\t\t\tURI: fmt.Sprintf(\"/filters/%s/dimensions\", f.FilterID),\n\t\t})\n\n\tif len(h.Breadcrumbs) > 0 {\n\t\tif name == \"geography\" {\n\t\t\tp.Breadcrumb = append(p.Breadcrumb, core.TaxonomyNode{\n\t\t\t\tTitle: \"Geographic Areas\",\n\t\t\t\tURI: fmt.Sprintf(\"/filters/%s/dimensions/%s\", f.FilterID, \"geography\"),\n\t\t\t})\n\n\t\t\tif !topLevelGeographies[h.Links.Code.ID] {\n\t\t\t\tfor i := len(h.Breadcrumbs) - 1; i >= 0; i-- {\n\t\t\t\t\tbreadcrumb := h.Breadcrumbs[i]\n\n\t\t\t\t\tif !topLevelGeographies[breadcrumb.Links.Code.ID] {\n\t\t\t\t\t\tvar uri string\n\t\t\t\t\t\tif breadcrumb.Links.Code.ID != \"\" {\n\t\t\t\t\t\t\turi = fmt.Sprintf(\"/filters/%s/dimensions/%s/%s\", f.FilterID, name, breadcrumb.Links.Code.ID)\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\turi = fmt.Sprintf(\"/filters/%s/dimensions/%s\", f.FilterID, name)\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tp.Breadcrumb = append(p.Breadcrumb, core.TaxonomyNode{\n\t\t\t\t\t\t\tTitle: breadcrumb.Label,\n\t\t\t\t\t\t\tURI: uri,\n\t\t\t\t\t\t})\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tfor i := len(h.Breadcrumbs) - 1; i >= 0; i-- {\n\t\t\t\tbreadcrumb := h.Breadcrumbs[i]\n\n\t\t\t\tvar uri string\n\t\t\t\tif breadcrumb.Links.Code.ID != \"\" {\n\t\t\t\t\turi = fmt.Sprintf(\"/filters/%s/dimensions/%s/%s\", f.FilterID, name, breadcrumb.Links.Code.ID)\n\t\t\t\t} else {\n\t\t\t\t\turi = fmt.Sprintf(\"/filters/%s/dimensions/%s\", f.FilterID, name)\n\t\t\t\t}\n\n\t\t\t\tp.Breadcrumb = append(p.Breadcrumb, core.TaxonomyNode{\n\t\t\t\t\tTitle: breadcrumb.Label,\n\t\t\t\t\tURI: uri,\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\t}\n\n\tp.Breadcrumb = append(p.Breadcrumb, core.TaxonomyNode{\n\t\tTitle: title,\n\t})\n\n\tp.FilterID = f.FilterID\n\tp.Data.Title = title\n\tp.Metadata.Title = fmt.Sprintf(\"Filter Options - %s\", title)\n\n\tif len(h.Breadcrumbs) > 0 {\n\t\tif len(h.Breadcrumbs) == 1 || topLevelGeographies[h.Breadcrumbs[0].Links.Code.ID] && name == \"geography\" {\n\t\t\tp.Data.Parent = pageTitle\n\t\t\tp.Data.GoBack = model.Link{\n\t\t\t\tURL: fmt.Sprintf(\"/filters/%s/dimensions/%s\", f.FilterID, name),\n\t\t\t}\n\t\t} else {\n\t\t\tp.Data.Parent = h.Breadcrumbs[0].Label\n\t\t\tp.Data.GoBack = model.Link{\n\t\t\t\tURL: fmt.Sprintf(\"/filters/%s/dimensions/%s/%s\", f.FilterID, name, h.Breadcrumbs[0].Links.Code.ID),\n\t\t\t}\n\t\t}\n\t}\n\n\tp.Data.AddAllFilters.Amount = strconv.Itoa(len(h.Children))\n\tp.Data.AddAllFilters.URL = curPath + \"/add-all\"\n\tfor _, child := range h.Children {\n\t\tif child.HasData {\n\t\t\tp.Data.HasData = true\n\t\t\tbreak\n\t\t}\n\t}\n\tp.Data.RemoveAll.URL = curPath + \"/remove-all\"\n\n\tfor option, label := range selectedValueLabels {\n\t\tp.Data.FiltersAdded = append(p.Data.FiltersAdded, model.Filter{\n\t\t\tLabel: label,\n\t\t\tRemoveURL: fmt.Sprintf(\"%s/remove/%s\", curPath, option),\n\t\t\tID: option,\n\t\t})\n\t}\n\n\tif h.HasData && len(h.Breadcrumbs) == 0 {\n\t\t_, selected := selectedValueLabels[h.Links.Code.ID]\n\t\tp.Data.FilterList = append(p.Data.FilterList, model.List{\n\t\t\tLabel: h.Label,\n\t\t\tID: h.Links.Code.ID,\n\t\t\tSubNum: \"0\",\n\t\t\tSubURL: \"\",\n\t\t\tSelected: selected,\n\t\t\tHasData: true,\n\t\t})\n\t}\n\n\tfor _, child := range h.Children {\n\t\t_, selected := selectedValueLabels[child.Links.Code.ID]\n\t\tp.Data.FilterList = append(p.Data.FilterList, model.List{\n\t\t\tLabel: child.Label,\n\t\t\tID: child.Links.Code.ID,\n\t\t\tSubNum: strconv.Itoa(child.NumberofChildren),\n\t\t\tSubURL: fmt.Sprintf(\"redirect:/filters/%s/dimensions/%s/%s\", f.FilterID, name, child.Links.Code.ID),\n\t\t\tSelected: selected,\n\t\t\tHasData: child.HasData,\n\t\t})\n\t}\n\n\tp.Data.SaveAndReturn.URL = curPath + \"/update\"\n\tp.Data.Cancel.URL = fmt.Sprintf(\"/filters/%s/dimensions\", f.FilterID)\n\n\treturn p\n}", "func (r *DeviceConfigurationUserOverviewRequest) Get(ctx context.Context) (resObj *DeviceConfigurationUserOverview, err error) {\n\tvar query string\n\tif r.query != nil {\n\t\tquery = \"?\" + r.query.Encode()\n\t}\n\terr = r.JSONRequest(ctx, \"GET\", query, nil, &resObj)\n\treturn\n}", "func (status MirrorStatus) MarshalHierarchical() string {\n\thierarchy := hierr.Push(\n\t\tstatus.Name,\n\t\tfmt.Sprintf(\"state: %s\", status.State),\n\t)\n\n\tif status.ModifyDate > 0 {\n\t\thierarchy = hierr.Push(\n\t\t\thierarchy,\n\t\t\tfmt.Sprintf(\"modify date: %v\", status.ModifyDate),\n\t\t)\n\t}\n\n\treturn hierr.String(hierarchy)\n}", "func (*UserMembershipSearchResponse) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{135}\n}", "func (c *PathXClient) NewDescribeUPathRequest() *DescribeUPathRequest {\n\treq := &DescribeUPathRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(true)\n\treturn req\n}", "func (*UserMembershipSearchRequest) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{136}\n}", "func (*CreateUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{11}\n}", "func (*CreateUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{11}\n}", "func (*DescribeUsersRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_account_proto_rawDescGZIP(), []int{2}\n}", "func (*GetUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_toit_api_organization_proto_rawDescGZIP(), []int{6}\n}", "func (*CreateUserResponse) Descriptor() ([]byte, []int) {\n\treturn file_buf_alpha_registry_v1alpha1_user_proto_rawDescGZIP(), []int{2}\n}", "func (*BlockUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_protobuf_spec_connection_user_v1_proto_rawDescGZIP(), []int{10}\n}", "func (*CreateUserResponse) Descriptor() ([]byte, []int) {\n\treturn file_users_proto_rawDescGZIP(), []int{5}\n}", "func (*CreateUserResponse) Descriptor() ([]byte, []int) {\n\treturn file_user_proto_rawDescGZIP(), []int{10}\n}", "func (*CreateUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_user_v1_user_proto_rawDescGZIP(), []int{0}\n}", "func (*CreateUserResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_user_proto_rawDescGZIP(), []int{3}\n}", "func (s UpdateUserHierarchyStructureInput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (*CreateUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_user_service_proto_rawDescGZIP(), []int{0}\n}", "func (s HierarchyStructure) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (*DeleteUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_toit_api_organization_proto_rawDescGZIP(), []int{4}\n}", "func (*CreateUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_buf_alpha_registry_v1alpha1_user_proto_rawDescGZIP(), []int{1}\n}", "func (*CreateUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_protodef_user_user_proto_rawDescGZIP(), []int{1}\n}", "func (*ListUsersRequest) Descriptor() ([]byte, []int) {\n\treturn file_toit_api_organization_proto_rawDescGZIP(), []int{0}\n}", "func (*CreateUserResponse) Descriptor() ([]byte, []int) {\n\treturn file_protob_user_service_proto_rawDescGZIP(), []int{6}\n}", "func (*CreateUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_thoughts_proto_rawDescGZIP(), []int{0}\n}", "func Structure(node string) ([]RPCRoutes, error) {\n\ttype r struct {\n\t\tResult []RPCRoutes `json:\"result\"`\n\t}\n\tp, e := rpc.RawRequest(rpc.Method.GetShardingStructure, node, []interface{}{})\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\tresult := r{}\n\tjson.Unmarshal(p, &result)\n\treturn result.Result, nil\n}", "func (*DeleteUserResponse) Descriptor() ([]byte, []int) {\n\treturn file_toit_api_organization_proto_rawDescGZIP(), []int{5}\n}", "func CreateHierarchySearchPage(req *http.Request, bp core.Page, items []search.Item, dst dataset.DatasetDetails, f filter.Model, selectedValueLabels map[string]string, dims []dataset.VersionDimension, name, curPath, datasetID, referrer, query, apiRouterVersion, lang, serviceMessage string, emergencyBannerContent zebedee.EmergencyBanner) model.Hierarchy {\n\tp := model.Hierarchy{\n\t\tPage: bp,\n\t}\n\tp.BetaBannerEnabled = true\n\tp.FeatureFlags.SixteensVersion = sixteensVersion\n\tp.RemoveGalleryBackground = true\n\n\tmapCookiePreferences(req, &p.CookiesPreferencesSet, &p.CookiesPolicy)\n\n\tctx := req.Context()\n\tlog.Info(ctx, \"mapping api response models to hierarchy search page\", log.Data{\"filterID\": f.FilterID, \"datasetID\": datasetID, \"name\": name})\n\n\tpageTitle := helpers.TitleCaseStr(name)\n\tfor i := range dims {\n\t\tif dims[i].Name == name && len(dims[i].Label) > 0 {\n\t\t\tpageTitle = dims[i].Label\n\t\t}\n\t}\n\tp.DatasetTitle = dst.Title\n\tp.Data.DimensionName = pageTitle\n\tp.DatasetId = datasetID\n\tp.Data.IsSearchResults = true\n\tp.Data.Query = query\n\tp.Language = lang\n\tp.URI = fmt.Sprintf(\"%s?q=%s\", req.URL.Path, url.QueryEscape(req.URL.Query().Get(\"q\")))\n\tp.ServiceMessage = serviceMessage\n\tp.EmergencyBanner = mapEmergencyBanner(emergencyBannerContent)\n\n\ttitle := pageTitle\n\n\tp.IsInFilterBreadcrumb = true\n\tvar ok bool\n\tif p.Type, ok = hierarchyBrowseLookup[name]; !ok {\n\t\tp.Type = \"type\"\n\t}\n\n\tp.SearchDisabled = true\n\n\tp.Data.SearchURL = fmt.Sprintf(\"/filters/%s/dimensions/%s/search\", f.FilterID, name)\n\n\tversionURL, err := url.Parse(f.Links.Version.HRef)\n\tif err != nil {\n\t\tlog.Warn(ctx, \"unable to parse version url\", log.FormatErrors([]error{err}))\n\t}\n\tversionPath := strings.TrimPrefix(versionURL.Path, apiRouterVersion)\n\n\tp.Data.LandingPageURL = versionPath + \"#id-dimensions\"\n\tp.Breadcrumb = append(p.Breadcrumb, core.TaxonomyNode{\n\t\tTitle: dst.Title,\n\t\tURI: versionPath,\n\t}, core.TaxonomyNode{\n\t\tTitle: \"Filter options\",\n\t\tURI: fmt.Sprintf(\"/filters/%s/dimensions\", f.FilterID),\n\t}, core.TaxonomyNode{\n\t\tTitle: title,\n\t\tURI: fmt.Sprintf(\"/filters/%s/dimensions/%s\", f.FilterID, name),\n\t}, core.TaxonomyNode{\n\t\tTitle: \"Search results\",\n\t})\n\n\tp.FilterID = f.FilterID\n\tp.Data.Title = title\n\tp.Metadata.Title = title\n\n\tp.Data.GoBack.URL = referrer\n\n\tp.Data.AddAllFilters.URL = curPath + \"/add-all\"\n\tp.Data.RemoveAll.URL = curPath + \"/remove-all\"\n\n\tfor option, label := range selectedValueLabels {\n\t\tp.Data.FiltersAdded = append(p.Data.FiltersAdded, model.Filter{\n\t\t\tLabel: label,\n\t\t\tRemoveURL: fmt.Sprintf(\"%s/remove/%s\", curPath, option),\n\t\t\tID: option,\n\t\t})\n\t}\n\n\tif len(items) == 0 {\n\t\tp.Data.IsSearchError = true\n\t} else {\n\t\tfor _, item := range items {\n\t\t\t_, selected := selectedValueLabels[item.Code]\n\t\t\tp.Data.FilterList = append(p.Data.FilterList, model.List{\n\t\t\t\tLabel: item.Label,\n\t\t\t\tID: item.Code,\n\t\t\t\tSubNum: strconv.Itoa(item.NumberOfChildren),\n\t\t\t\tSubURL: fmt.Sprintf(\"redirect:/filters/%s/dimensions/%s/%s\", f.FilterID, name, item.Code),\n\t\t\t\tSelected: selected,\n\t\t\t\tHasData: item.HasData,\n\t\t\t})\n\t\t}\n\t}\n\n\tp.Data.SaveAndReturn.URL = fmt.Sprintf(\"/filters/%s/dimensions/%s/search/update\", f.FilterID, name)\n\tp.Data.Cancel.URL = fmt.Sprintf(\"/filters/%s/dimensions\", f.FilterID)\n\n\treturn p\n}", "func (*CreateUserRequest_Data) Descriptor() ([]byte, []int) {\n\treturn file_protodef_user_user_proto_rawDescGZIP(), []int{1, 0}\n}", "func (*CreateUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_user_proto_rawDescGZIP(), []int{2}\n}", "func (*CreateUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_users_proto_rawDescGZIP(), []int{4}\n}", "func (*CreateUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_examplepb_example_proto_rawDescGZIP(), []int{3}\n}", "func (*ListOrganizationUsersResponse) Descriptor() ([]byte, []int) {\n\treturn file_buf_alpha_registry_v1alpha1_user_proto_rawDescGZIP(), []int{10}\n}", "func (*ListOrganizationUsersRequest) Descriptor() ([]byte, []int) {\n\treturn file_buf_alpha_registry_v1alpha1_user_proto_rawDescGZIP(), []int{9}\n}", "func (*CreateUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_protob_user_service_proto_rawDescGZIP(), []int{5}\n}", "func (*CreateUserResponse) Descriptor() ([]byte, []int) {\n\treturn file_pb_auth_proto_rawDescGZIP(), []int{1}\n}", "func (*CreateUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_user_proto_rawDescGZIP(), []int{9}\n}", "func (*GetUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_wechat_proto_rawDescGZIP(), []int{9}\n}", "func (m QueryHierarchyItem) MarshalJSON() ([]byte, error) {\n\t_parts := make([][]byte, 0, 1)\n\n\taO0, err := swag.WriteJSON(m.WorkItemTrackingResource)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t_parts = append(_parts, aO0)\n\n\t// now for regular properties\n\tvar propsQueryHierarchyItem struct {\n\t\tChildren []*QueryHierarchyItem `json:\"children\"`\n\n\t\tClauses *WorkItemQueryClause `json:\"clauses,omitempty\"`\n\n\t\tColumns []*WorkItemFieldReference `json:\"columns\"`\n\n\t\tCreatedBy *IdentityReference `json:\"createdBy,omitempty\"`\n\n\t\tCreatedDate strfmt.DateTime `json:\"createdDate,omitempty\"`\n\n\t\tFilterOptions interface{} `json:\"filterOptions,omitempty\"`\n\n\t\tHasChildren bool `json:\"hasChildren,omitempty\"`\n\n\t\tID strfmt.UUID `json:\"id,omitempty\"`\n\n\t\tIsDeleted bool `json:\"isDeleted,omitempty\"`\n\n\t\tIsFolder bool `json:\"isFolder,omitempty\"`\n\n\t\tIsInvalidSyntax bool `json:\"isInvalidSyntax,omitempty\"`\n\n\t\tIsPublic bool `json:\"isPublic,omitempty\"`\n\n\t\tLastExecutedBy *IdentityReference `json:\"lastExecutedBy,omitempty\"`\n\n\t\tLastExecutedDate strfmt.DateTime `json:\"lastExecutedDate,omitempty\"`\n\n\t\tLastModifiedBy *IdentityReference `json:\"lastModifiedBy,omitempty\"`\n\n\t\tLastModifiedDate strfmt.DateTime `json:\"lastModifiedDate,omitempty\"`\n\n\t\tLinkClauses *WorkItemQueryClause `json:\"linkClauses,omitempty\"`\n\n\t\tName string `json:\"name,omitempty\"`\n\n\t\tPath string `json:\"path,omitempty\"`\n\n\t\tQueryRecursionOption interface{} `json:\"queryRecursionOption,omitempty\"`\n\n\t\tQueryType interface{} `json:\"queryType,omitempty\"`\n\n\t\tSortColumns []*WorkItemQuerySortColumn `json:\"sortColumns\"`\n\n\t\tSourceClauses *WorkItemQueryClause `json:\"sourceClauses,omitempty\"`\n\n\t\tTargetClauses *WorkItemQueryClause `json:\"targetClauses,omitempty\"`\n\n\t\tWiql string `json:\"wiql,omitempty\"`\n\t}\n\tpropsQueryHierarchyItem.Children = m.Children\n\n\tpropsQueryHierarchyItem.Clauses = m.Clauses\n\n\tpropsQueryHierarchyItem.Columns = m.Columns\n\n\tpropsQueryHierarchyItem.CreatedBy = m.CreatedBy\n\n\tpropsQueryHierarchyItem.CreatedDate = m.CreatedDate\n\n\tpropsQueryHierarchyItem.FilterOptions = m.FilterOptions\n\n\tpropsQueryHierarchyItem.HasChildren = m.HasChildren\n\n\tpropsQueryHierarchyItem.ID = m.ID\n\n\tpropsQueryHierarchyItem.IsDeleted = m.IsDeleted\n\n\tpropsQueryHierarchyItem.IsFolder = m.IsFolder\n\n\tpropsQueryHierarchyItem.IsInvalidSyntax = m.IsInvalidSyntax\n\n\tpropsQueryHierarchyItem.IsPublic = m.IsPublic\n\n\tpropsQueryHierarchyItem.LastExecutedBy = m.LastExecutedBy\n\n\tpropsQueryHierarchyItem.LastExecutedDate = m.LastExecutedDate\n\n\tpropsQueryHierarchyItem.LastModifiedBy = m.LastModifiedBy\n\n\tpropsQueryHierarchyItem.LastModifiedDate = m.LastModifiedDate\n\n\tpropsQueryHierarchyItem.LinkClauses = m.LinkClauses\n\n\tpropsQueryHierarchyItem.Name = m.Name\n\n\tpropsQueryHierarchyItem.Path = m.Path\n\n\tpropsQueryHierarchyItem.QueryRecursionOption = m.QueryRecursionOption\n\n\tpropsQueryHierarchyItem.QueryType = m.QueryType\n\n\tpropsQueryHierarchyItem.SortColumns = m.SortColumns\n\n\tpropsQueryHierarchyItem.SourceClauses = m.SourceClauses\n\n\tpropsQueryHierarchyItem.TargetClauses = m.TargetClauses\n\n\tpropsQueryHierarchyItem.Wiql = m.Wiql\n\n\tjsonDataPropsQueryHierarchyItem, errQueryHierarchyItem := swag.WriteJSON(propsQueryHierarchyItem)\n\tif errQueryHierarchyItem != nil {\n\t\treturn nil, errQueryHierarchyItem\n\t}\n\t_parts = append(_parts, jsonDataPropsQueryHierarchyItem)\n\treturn swag.ConcatJSON(_parts...), nil\n}", "func (*GetUserResponse) Descriptor() ([]byte, []int) {\n\treturn file_toit_api_organization_proto_rawDescGZIP(), []int{7}\n}", "func (*ListUsersResponse) Descriptor() ([]byte, []int) {\n\treturn file_toit_api_organization_proto_rawDescGZIP(), []int{1}\n}", "func (s UpdateUserHierarchyStructureOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (*CreateUserResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_sample_proto_rawDescGZIP(), []int{3}\n}", "func (o *CompanyCompanyHierarchyV1GetCompanyHierarchyGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param id\n\tif err := r.SetPathParam(\"id\", swag.FormatInt64(o.ID)); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func GetAllUser(w http.ResponseWriter, r *http.Request) {\n\tusermanagement, err := getAllusermanagement()\n\n\tif err != nil {\n\t\tlog.Fatalf(\"Unable to get all user. %v\", err)\n\t}\n\n\tjson.NewEncoder(w).Encode(usermanagement)\n}", "func (r *DeviceComplianceUserOverviewRequest) Get(ctx context.Context) (resObj *DeviceComplianceUserOverview, err error) {\n\tvar query string\n\tif r.query != nil {\n\t\tquery = \"?\" + r.query.Encode()\n\t}\n\terr = r.JSONRequest(ctx, \"GET\", query, nil, &resObj)\n\treturn\n}", "func (*GetUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_server_pb_UserService_proto_rawDescGZIP(), []int{5}\n}", "func (*GetUsersParams) Descriptor() ([]byte, []int) {\n\treturn file_usermanage_usermanage_proto_rawDescGZIP(), []int{2}\n}", "func (*CreateUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_sample_proto_rawDescGZIP(), []int{2}\n}", "func (*GetUsersRequest) Descriptor() ([]byte, []int) {\n\treturn file_user_proto_rawDescGZIP(), []int{7}\n}", "func FlattenExpand(resp *v0.ExpandResponse) ([]*v0.User, error) { return flatten(resp.TreeNode), nil }", "func (o SchemaConfigResponseOutput) RecursiveStructureDepth() pulumi.StringOutput {\n\treturn o.ApplyT(func(v SchemaConfigResponse) string { return v.RecursiveStructureDepth }).(pulumi.StringOutput)\n}", "func (*CMsgPerfectWorldUserLookupRequest) Descriptor() ([]byte, []int) {\n\treturn file_dota_gcmessages_client_proto_rawDescGZIP(), []int{203}\n}", "func (*RefreshUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_directory_proto_rawDescGZIP(), []int{2}\n}", "func (*GetUsersResponse) Descriptor() ([]byte, []int) {\n\treturn file_user_proto_rawDescGZIP(), []int{8}\n}", "func (*UserRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_user_proto_rawDescGZIP(), []int{0}\n}", "func (*GetUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_user_user_proto_rawDescGZIP(), []int{3}\n}", "func (*GetUsersRequest) Descriptor() ([]byte, []int) {\n\treturn file_protob_user_service_proto_rawDescGZIP(), []int{3}\n}", "func (s DescribeUserHierarchyGroupInput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (*GetUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_users_proto_rawDescGZIP(), []int{0}\n}", "func (*CNETMsg_SplitScreenUser) Descriptor() ([]byte, []int) {\n\treturn file_artifact_networkbasetypes_proto_rawDescGZIP(), []int{6}\n}", "func (*GetUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_buf_alpha_registry_v1alpha1_user_proto_rawDescGZIP(), []int{3}\n}", "func (*UserSearchRequest) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{14}\n}", "func (*CNETMsg_SplitScreenUser) Descriptor() ([]byte, []int) {\n\treturn file_netmessages_proto_rawDescGZIP(), []int{12}\n}", "func (*GetUsersRequest) Descriptor() ([]byte, []int) {\n\treturn file_thoughts_proto_rawDescGZIP(), []int{5}\n}", "func (*GetUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_user_service_proto_rawDescGZIP(), []int{2}\n}", "func (*GetUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_user_v1_user_proto_rawDescGZIP(), []int{6}\n}", "func (*User) Descriptor() ([]byte, []int) {\n\treturn file_directory_proto_rawDescGZIP(), []int{0}\n}", "func (*SearchUserResp) Descriptor() ([]byte, []int) {\n\treturn file_business_ext_proto_rawDescGZIP(), []int{7}\n}", "func (*UserSearchParams) Descriptor() ([]byte, []int) {\n\treturn file_protos_users_proto_rawDescGZIP(), []int{2}\n}", "func (*ReportUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_protobuf_spec_connection_user_v1_proto_rawDescGZIP(), []int{26}\n}", "func (*GetUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_user_service_proto_rawDescGZIP(), []int{0}\n}", "func (*User) Descriptor() ([]byte, []int) {\n\treturn file_wechat_proto_rawDescGZIP(), []int{4}\n}", "func (*CNETMsg_SplitScreenUser) Descriptor() ([]byte, []int) {\n\treturn file_csgo_netmessages_proto_rawDescGZIP(), []int{12}\n}", "func (*User) Descriptor() ([]byte, []int) {\n\treturn file_usermanage_usermanage_proto_rawDescGZIP(), []int{1}\n}", "func (fn GetUniverseStructuresStructureIDHandlerFunc) Handle(params GetUniverseStructuresStructureIDParams, principal interface{}) middleware.Responder {\n\treturn fn(params, principal)\n}", "func (*ListUserRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_user_v1_user_proto_rawDescGZIP(), []int{8}\n}", "func (*UserRequest) Descriptor() ([]byte, []int) {\n\treturn file_userapi_proto_rawDescGZIP(), []int{0}\n}", "func (*GetUsersRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_proto_rawDescGZIP(), []int{39}\n}", "func (*UserHelpRequest) Descriptor() ([]byte, []int) {\n\treturn file_users_proto_rawDescGZIP(), []int{1}\n}", "func (r *DeviceConfigurationUserStateSummaryRequest) Get(ctx context.Context) (resObj *DeviceConfigurationUserStateSummary, err error) {\n\tvar query string\n\tif r.query != nil {\n\t\tquery = \"?\" + r.query.Encode()\n\t}\n\terr = r.JSONRequest(ctx, \"GET\", query, nil, &resObj)\n\treturn\n}", "func (r *DeviceConfigurationUserStateSummaryRequest) Get(ctx context.Context) (resObj *DeviceConfigurationUserStateSummary, err error) {\n\tvar query string\n\tif r.query != nil {\n\t\tquery = \"?\" + r.query.Encode()\n\t}\n\terr = r.JSONRequest(ctx, \"GET\", query, nil, &resObj)\n\treturn\n}" ]
[ "0.57184553", "0.5469647", "0.5156814", "0.49289703", "0.48873046", "0.48873046", "0.4772959", "0.4772959", "0.44967076", "0.4447158", "0.4413114", "0.44060668", "0.44043696", "0.44002792", "0.43401435", "0.4336481", "0.42501748", "0.42497203", "0.42474967", "0.42249528", "0.41445872", "0.41301143", "0.4120663", "0.41180152", "0.41109067", "0.41109067", "0.41098452", "0.41063344", "0.4104838", "0.40870106", "0.40823928", "0.40687624", "0.40589362", "0.40501684", "0.4048952", "0.4045806", "0.40355325", "0.4025855", "0.4019911", "0.4015214", "0.40150046", "0.40025383", "0.39970982", "0.39965767", "0.3994921", "0.39938354", "0.39889225", "0.3981713", "0.39710167", "0.39703915", "0.39696676", "0.39670855", "0.39655203", "0.39635277", "0.3961763", "0.39614832", "0.39584553", "0.39575896", "0.39518723", "0.3950363", "0.39390078", "0.39290783", "0.39270493", "0.392251", "0.39194977", "0.39184576", "0.39182848", "0.39140126", "0.39121258", "0.39098105", "0.390773", "0.39036703", "0.3894757", "0.38904458", "0.3888241", "0.38854432", "0.3885438", "0.38826165", "0.38823965", "0.3876233", "0.38715655", "0.38711467", "0.38683715", "0.38676113", "0.38673323", "0.3865625", "0.38606372", "0.3857999", "0.38572583", "0.38559988", "0.38549864", "0.38543618", "0.385122", "0.38466957", "0.3846648", "0.38445118", "0.38392037", "0.38342234", "0.38338065", "0.38338065" ]
0.7537883
0
SDKResponseMetdata returns the response metadata for the DescribeUserHierarchyStructure request.
func (r *DescribeUserHierarchyStructureResponse) SDKResponseMetdata() *aws.Response { return r.response }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (r *DescribeUserResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *DescribeOrganizationConfigurationResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *DescribeAccountAuditConfigurationResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *CreateUserResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *DescribeDBClusterParameterGroupsResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *ModifySelfservicePermissionsResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *DescribeFileSystemsResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *GetRelationalDatabaseMasterUserPasswordResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *ListSecurityProfilesResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *RegisterUserResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *CreateDBSecurityGroupResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *ModifyClusterSubnetGroupResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *StartMonitoringMembersResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *DescribeNotebookInstanceLifecycleConfigResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *GetDirectoryResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *CreateReplicationGroupResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *CreateMembersResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *CreateMembersResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *GetUsageStatisticsResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *ListNotebookInstanceLifecycleConfigsResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *GetDiscoverySummaryResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *ListWebACLsResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *ListEndpointGroupsResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *GetUserResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *NiftyCreatePrivateLanResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *ListFargateProfilesResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *OutputService6TestCaseOperation1Response) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *OutputService6TestCaseOperation1Response) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *SetV2LoggingLevelResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *UpdateAccountAuditConfigurationResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *DescribeTargetGroupsResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *CreateDBParameterGroupResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *ListLoggingConfigurationsResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *AddResourcePermissionsResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *DescribeDimensionKeysResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *OutputService7TestCaseOperation1Response) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *OutputService7TestCaseOperation1Response) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *ModifyMountTargetSecurityGroupsResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *ListGlobalTablesResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *OutputService14TestCaseOperation2Response) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *MergeBranchesByFastForwardResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *DescribeRouteTablesResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *DescribeDRTAccessResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *UpdateUserDefinedFunctionResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *CreatePackagingConfigurationResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *CreateClusterResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *CreateRelationalDatabaseFromSnapshotResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *UpdateVTLDeviceTypeResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *CreateSystemInstanceResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *CreateNetworkInterfaceResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *OutputService4TestCaseOperation2Response) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *GetInstanceAccessDetailsResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *SetUICustomizationResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *ApplySecurityGroupsToClientVpnTargetNetworkResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *GetDevicesResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *GetIntrospectionSchemaResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *DescribeParametersResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *ListTerminologiesResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *OutputService14TestCaseOperation1Response) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *GetResourceConfigHistoryResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *OutputService9TestCaseOperation1Response) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *DescribeAggregationAuthorizationsResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *AddPermissionResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *GetAccountSettingsResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *GetAdminAccountResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *DescribePipelineResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *OutputService5TestCaseOperation1Response) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *OutputService5TestCaseOperation1Response) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *EnableAWSOrganizationsAccessResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *ListRepositoriesResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *DescribeDetectorModelResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *CreateFileSystemFromBackupResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *DeleteHumanTaskUiResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *InputService6TestCaseOperation1Response) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *AuthorizeDBSecurityGroupIngressResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *ResizeClusterResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *OutputService11TestCaseOperation1Response) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *OutputService8TestCaseOperation1Response) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *ListJournalKinesisStreamsForLedgerResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *OutputService1TestCaseOperation2Response) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *RemoveLayerVersionPermissionResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *ModifyTrafficMirrorFilterNetworkServicesResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *DeleteUserDefinedFunctionResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *GetDeploymentStrategyResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *DescribeCreateAccountStatusResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *GetHomeRegionResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *InputService11TestCaseOperation2Response) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *InputService9TestCaseOperation2Response) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *ApplySecurityGroupsToLoadBalancerResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *ListResourcesForWebACLResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *OutputService4TestCaseOperation1Response) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *OutputService4TestCaseOperation1Response) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *OutputService1TestCaseOperation1Response) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *OutputService1TestCaseOperation1Response) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *UpdateGatewayGroupResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *DescribeDetectorResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *InputService7TestCaseOperation1Response) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *DescribeTaskDefinitionResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *DeleteMailboxPermissionsResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func (r *OutputService2TestCaseOperation1Response) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}" ]
[ "0.62536407", "0.5904975", "0.5872406", "0.5871081", "0.5858043", "0.5845549", "0.5798019", "0.57850534", "0.574214", "0.57293725", "0.5725915", "0.57254905", "0.57240504", "0.5721584", "0.5710341", "0.570328", "0.5691056", "0.5691056", "0.5689465", "0.5678145", "0.56704104", "0.5666377", "0.5656853", "0.56480616", "0.5646867", "0.5641043", "0.56324583", "0.56324583", "0.56320846", "0.56293905", "0.56230694", "0.56204146", "0.56201595", "0.5616141", "0.5613482", "0.56077975", "0.56077975", "0.5606318", "0.5605706", "0.5603771", "0.56034005", "0.56023145", "0.5585347", "0.5584433", "0.5577043", "0.5576986", "0.5575764", "0.5572443", "0.5570391", "0.55685717", "0.5568368", "0.5568295", "0.55657876", "0.55648565", "0.55637574", "0.5562656", "0.55600566", "0.5555697", "0.55519015", "0.5551657", "0.55508316", "0.55505985", "0.55498314", "0.55487007", "0.5544205", "0.5541619", "0.5541426", "0.5541426", "0.55408335", "0.5537967", "0.5537913", "0.5536992", "0.5534965", "0.5534545", "0.5533365", "0.5532357", "0.55312604", "0.5525957", "0.55252963", "0.55248463", "0.55231684", "0.5522509", "0.5518788", "0.55178505", "0.5516947", "0.5514817", "0.5512443", "0.5512344", "0.5512288", "0.5512179", "0.55115557", "0.55115557", "0.5511271", "0.5511271", "0.5510647", "0.5508648", "0.55068696", "0.55046976", "0.550333", "0.550329" ]
0.7379813
0
Func to remove lines if contains keyword 'TODO'
func removeText(fileName string) { //Skip main.go file if fileName != "main.go" { //Read file bytes from filename param, a success call return err==null, not err==EOF input, err := ioutil.ReadFile(fileName) if err != nil { log.Fatalln(err) } //Convert content to string text := string(input) //Replace keyword 'TODO' by regex re := regexp.MustCompile(".*TODO.*\r?\n") lines := re.ReplaceAllString(text, "") //Write string into a file err = WriteToFile(fileName, lines) if err != nil { log.Fatal(err) } } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (rign *CFGoReadIgnore) Clean() {\n}", "func (todo Todo) Remove() error {\n\treturn todo.updateInPlace(func(lineNumber int, line string) (string, bool) {\n\t\tif lineNumber == todo.Line {\n\t\t\treturn \"\", true\n\t\t}\n\n\t\treturn line, false\n\t})\n}", "func TestGoDocSkipLinesPass(t *testing.T) {\n\taccept(t, \"godoc_test.go\", \"TestGoDocSkipLinesPass\")\n}", "func shouldKeep(e bf.Expr) bool {\n\tc := e.Comment()\n\treturn len(c.Suffix) > 0 && strings.HasPrefix(c.Suffix[0].Token, keep)\n}", "func fileRkfilter(line []string) []string {\n\tvar cleanfile []string\n\tfor _, l := range line {\n\t\tif len(l) > 0 && l[0] != '#' {\n\t\t\tcleanfile = append(cleanfile, l)\n\t\t}\n\t}\n\treturn cleanfile\n}", "func (a *Annotation) remove(c context.Context, r io.Reader) (bool, error) {\n\tchange := &annotationRemove{}\n\n\terr := json.NewDecoder(r).Decode(change)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tmodified := false\n\n\tif change.Time {\n\t\ta.SnoozeTime = 0\n\t\tmodified = true\n\t}\n\n\tif change.Bugs != nil {\n\t\tset := stringset.NewFromSlice(a.Bugs...)\n\t\tfor _, bug := range change.Bugs {\n\t\t\tset.Del(bug)\n\t\t}\n\t\ta.Bugs = set.ToSlice()\n\t\tmodified = true\n\t}\n\n\t// Client passes in a list of comment indices to delete.\n\tfor _, i := range change.Comments {\n\t\tif i < 0 || i >= len(a.Comments) {\n\t\t\treturn false, errors.New(\"Invalid comment index\")\n\t\t}\n\t\ta.Comments = append(a.Comments[:i], a.Comments[i+1:]...)\n\t\tmodified = true\n\t}\n\n\tif change.GroupID {\n\t\ta.GroupID = \"\"\n\t\tmodified = true\n\t}\n\n\tif modified {\n\t\ta.ModificationTime = clock.Now(c)\n\t}\n\n\treturn false, nil\n}", "func functionWithWrappedLineComment() {\n\treturn nil\n}", "func TODO(project, label string, fullTime bool) bool {\n\treturn lists[\"todo\"].Print(project, label, fullTime)\n}", "func TODO(message string) error {\n\tpanic(Wrap(ErrTODO, message))\n}", "func removeLines(fn string, start, n int) (err error) {\n\tlogs.INFO.Println(\"Clear file -> \", fn)\n\tif n < 0 {\n\t\tn = store.getLines()\n\t}\n\tif n == 0 {\n\t\tlogs.INFO.Println(\"Nothing to clear\")\n\t\tseek = 0\n\t\treturn nil\n\t}\n\tlogs.INFO.Println(\"Total lines -> \", n)\n\tif start < 1 {\n\t\tlogs.WARNING.Println(\"Invalid request. line numbers start at 1.\")\n\t}\n\tvar f *os.File\n\tif f, err = os.OpenFile(fn, os.O_RDWR, 0); err != nil {\n\t\tlogs.CRITICAL.Println(\"Failed to open the file -> \", err)\n\t\treturn\n\t}\n\tdefer func() {\n\t\tif cErr := f.Close(); err == nil {\n\t\t\terr = cErr\n\t\t}\n\t}()\n\tvar b []byte\n\tif b, err = ioutil.ReadAll(f); err != nil {\n\t\tlogs.CRITICAL.Println(\"Failed to reading the file -> \", err)\n\t\treturn\n\t}\n\tcut, ok := skip(b, start-1)\n\tif !ok {\n\t\tlogs.CRITICAL.Printf(\"less than %d lines -> \", start)\n\t\treturn\n\t}\n\tif n == 0 {\n\t\treturn nil\n\t}\n\ttail, ok := skip(cut, n)\n\tif !ok {\n\t\tlogs.CRITICAL.Printf(\"less than %d lines after line %d \", n, start)\n\t\treturn\n\t}\n\tt := int64(len(b) - len(cut))\n\tif err = f.Truncate(t); err != nil {\n\t\treturn\n\t}\n\t// Writing in the archive the bytes already with cut removed\n\tif len(tail) > 0 {\n\t\t_, err = f.WriteAt(tail, t)\n\t}\n\treturn\n}", "func stripCommentedOutLines(lines []string) []string {\n\tvar outputLines []string\n\tfor _, line := range lines {\n\t\tif commentedOutLineRegexp.MatchString(line) {\n\t\t\toutputLines = append(outputLines, \"\")\n\t\t} else {\n\t\t\toutputLines = append(outputLines, line)\n\t\t}\n\t}\n\treturn outputLines\n}", "func shouldIgnore(oldFile *bf.File) bool {\n\tfor _, s := range oldFile.Stmt {\n\t\tfor _, c := range s.Comment().After {\n\t\t\tif strings.HasPrefix(c.Token, gazelleIgnore) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\tfor _, c := range s.Comment().Before {\n\t\t\tif strings.HasPrefix(c.Token, gazelleIgnore) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}", "func TODO(...interface{}) {\n\t// Indirection to prevent the compiler from ignoring unreachable code\n\tpanic(\"TODO\")\n}", "func RemoveLinesFromFile(filePath, text string) {\n\tinput, err := ioutil.ReadFile(filePath)\n\tif err != nil {\n\t\tlog.Errorf(\"Failed to read file %s: %v\\n\", filePath, err)\n\t\tos.Exit(1)\n\t}\n\n\tlines := strings.Split(string(input), \"\\n\")\n\n\ttmp := lines[:0]\n\tfor _, line := range lines {\n\t\tif !strings.Contains(line, text) {\n\t\t\ttmp = append(tmp, line)\n\t\t}\n\t}\n\toutput := strings.Join(tmp, \"\\n\")\n\terr = ioutil.WriteFile(filePath, []byte(output), 0644)\n\tif err != nil {\n\t\tlog.Errorf(\"Failed to write file %s: %v\\n\", filePath, err)\n\t\tos.Exit(1)\n\t}\n}", "func (t T) IgnoreLines(lines []string) T {\n\tfiltered := make([]string, 0, len(t))\n\tfor _, l := range t {\n\t\tif !containsString(lines, l) {\n\t\t\tfiltered = append(filtered, l)\n\t\t}\n\t}\n\treturn T(filtered)\n}", "func remove(ymlfile string, packageName string) error {\n\tappFS := afero.NewOsFs()\n\tyf, _ := afero.ReadFile(appFS, ymlfile)\n\tfi, err := os.Stat(ymlfile)\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar out []byte\n\ti := 0\n\tlines := bytes.Split(yf, []byte(\"\\n\"))\n\tfor _, line := range lines {\n\t\ti++\n\t\t// trim the line to detect the start of the list of packages\n\t\t// but do not write the trimmed string as it may cause an\n\t\t// unneeded file diff to the yml file\n\t\tsline := bytes.TrimLeft(line, \" \")\n\t\tif bytes.HasPrefix(sline, []byte(\"- \"+packageName)) {\n\t\t\tcontinue\n\t\t}\n\t\tout = append(out, line...)\n\t\tif i < len(lines) {\n\t\t\tout = append(out, []byte(\"\\n\")...)\n\t\t}\n\t}\n\terr = afero.WriteFile(appFS, ymlfile, out, fi.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func isIgnoredLine(s string) bool {\n\tif (len(s) == 0) || (s[0] == '#') {\n\t\treturn true\n\t}\n\treturn false\n}", "func stripCommentLines(input string) string {\n\tlines := strings.Split(input, \"\\n\")\n\tout := make([]string, 0, len(lines))\n\tfor _, line := range lines {\n\t\tif !strings.HasPrefix(line, \"#\") {\n\t\t\tout = append(out, line)\n\t\t}\n\t}\n\treturn strings.Join(out, \"\\n\")\n}", "func f() {\n\t_ = 12 // nolint // want `\\Qremove a space between // and \"nolint\" directive`\n\n\t_ = 30 // nolint2 foo bar // want `\\Qsuggestion: //nolint2 foo bar`\n\n\t/*\n\t\tnolint // want `\\Qdon't put \"nolint\" inside a multi-line comment`\n\t*/\n\n\t//go:baddirective // want `\\Qdon't use baddirective go directive`\n\t//go:noinline\n\t//go:generate foo bar\n\n\t//nolint:gocritic // want `\\Qhey, this is kinda upsetting`\n\n\t// This is a begining // want `\\Q\"begining\" may contain a typo`\n\t// Of a bizzare text with typos. // want `\\Q\"bizzare\" may contain a typo`\n\n\t// I can't give you a buisness advice. // want `\\Q\"buisness advice\" may contain a typo`\n\n\t// calender // want `\\Qfirst=calender`\n\t// cemetary // want `\\Qsecond=cemetary`\n\n\t// collegue // want `\\Qx=\"collegue\"`\n\t// commitee // want `\\Qx=\"\"`\n}", "func refmtdefineline(line string) string {\n\tstr := \"\"\n\tlastsp := false\n\tlastsharp := false\n\tvar lastch rune\n\tfor _, ch := range line {\n\t\tif ch == ' ' {\n\t\t\tif lastsp {\n\t\t\t} else if lastsharp {\n\t\t\t} else {\n\t\t\t\tstr += string(ch)\n\t\t\t}\n\t\t} else if ch == '\\t' {\n\t\t\tif !lastsp {\n\t\t\t\tstr += \" \"\n\t\t\t}\n\t\t\tlastsp = true\n\t\t} else {\n\t\t\tstr += string(ch)\n\t\t\tlastsp = false\n\t\t}\n\t\tif ch == '#' {\n\t\t\tlastsharp = true\n\t\t} else {\n\t\t\tlastsharp = false\n\t\t}\n\t\tlastch = ch\n\t}\n\tif lastch == ' ' {\n\t\tstr = strings.TrimSpace(str)\n\t}\n\treturn str\n}", "func removeMetadata(kustomized string) string {\n\treturn strings.Replace(\n\t\tkustomized,\n\t\t`metadata:\n name: config\n`,\n\t\t\"\",\n\t\t-1,\n\t)\n}", "func filter(fi os.FileInfo) bool {\n\treturn !strings.Contains(fi.Name(), \"test\")\n}", "func Todo(todo ...interface{}) {\n\tIssue(append([]interface{}{\"TODO: \"}, todo...))\n}", "func FilterEpdLines(lines []EpdLine, regex string) []EpdLine {\n\tmatchingLines := make([]EpdLine, 0)\n\tfor _, line := range lines {\n\t\tres, err := regexp.MatchString(regex, line.name)\n\t\tif err != nil {\n\t\t\t// shouldn't be possible (?)\n\t\t\tpanic(err)\n\t\t}\n\n\t\tif res {\n\t\t\tmatchingLines = append(matchingLines, line)\n\t\t}\n\t}\n\n\treturn matchingLines\n}", "func unused() {\n\t_ = strings.Contains(\"\", \"\")\n}", "func CheckForRemoval[T any](l, r *yaml.Node, label string, changes *[]*Change, breaking bool, orig, new T) {\n\tif l != nil && l.Value != \"\" && (r == nil || r.Value == \"\" && !utils.IsNodeArray(r) && !utils.IsNodeMap(r)) {\n\t\tCreateChange(changes, PropertyRemoved, label, l, r, breaking, orig, new)\n\t\treturn\n\t}\n\tif l != nil && r == nil {\n\t\tCreateChange(changes, PropertyRemoved, label, l, nil, breaking, orig, nil)\n\t}\n}", "func skip_limbo(){\nfor true{\nif loc>=len(buffer)&&!get_line(){\nreturn\n}\nfor loc<len(buffer)&&buffer[loc]!='@'{\nloc++\n}\nif loc++;loc<len(buffer){\nc:=buffer[loc]\nloc++\ncc:=ignore\nif c<int32(len(ccode)){\ncc= ccode[c]\n}\nif cc==new_section{\nbreak\n}\nswitch cc{\ncase format_code,'@':\ncase control_text:\nif c=='q'||c=='Q'{\nfor c= skip_ahead();c=='@';c= skip_ahead(){}\nif buffer[loc-1]!='>'{\nerr_print(\"! Double @ should be used in control text\")\n\n}\nbreak\n}\nfallthrough\ndefault:\nerr_print(\"! Double @ should be used in limbo\")\n\n}\n}\n}\n}", "func DropIgnoredIssues(issues []*Issue, fs *token.FileSet, f *ast.File) []*Issue {\n\tnolintLineNums := make(map[int]struct{})\n\tfor _, cg := range f.Comments {\n\t\tif strings.Contains(cg.Text(), \"NOLINT\") {\n\t\t\tlineNum := fs.File(cg.Pos()).Line(cg.Pos())\n\t\t\tnolintLineNums[lineNum] = struct{}{}\n\t\t}\n\t}\n\n\tvar kept []*Issue\n\tfor _, issue := range issues {\n\t\tif _, ok := nolintLineNums[issue.Pos.Line]; !ok {\n\t\t\tkept = append(kept, issue)\n\t\t}\n\t}\n\treturn kept\n}", "func stripComments(in []byte) []byte {\n\t// Issue 16376\n\tif i := bytes.Index(in, ignoreBelow); i >= 0 {\n\t\tin = in[:i+1]\n\t}\n\treturn regexp.MustCompile(`(?m)^#.*\\n`).ReplaceAll(in, nil)\n}", "func (wl *dummyLogger) Clean() {\n}", "func PoEMarkupLinesOnly(lines []string, small bool) template.HTML {\n\tres := ReplacePoEMarkup(strings.Join(lines, \"\\n\"), small)\n\tres = strings.Replace(res, \"\\n\", \"<br />\", -1)\n\treturn template.HTML(res)\n}", "func (tb *TextBuf) LinesDeleted(tbe *TextBufEdit) {\n\ttb.LinesMu.Lock()\n\ttb.MarkupMu.Lock()\n\n\tstln := tbe.Reg.Start.Ln\n\tedln := tbe.Reg.End.Ln\n\n\ttb.LineBytes = append(tb.LineBytes[:stln], tb.LineBytes[edln:]...)\n\ttb.Markup = append(tb.Markup[:stln], tb.Markup[edln:]...)\n\ttb.Tags = append(tb.Tags[:stln], tb.Tags[edln:]...)\n\ttb.HiTags = append(tb.HiTags[:stln], tb.HiTags[edln:]...)\n\ttb.ByteOffs = append(tb.ByteOffs[:stln], tb.ByteOffs[edln:]...)\n\n\ttb.PiState.Src.LinesDeleted(stln, edln)\n\n\tst := tbe.Reg.Start.Ln\n\ttb.LineBytes[st] = []byte(string(tb.Lines[st]))\n\ttb.Markup[st] = HTMLEscapeBytes(tb.LineBytes[st])\n\ttb.MarkupLines(st, st)\n\ttb.MarkupMu.Unlock()\n\ttb.LinesMu.Unlock()\n\t// probably don't need to do global markup here..\n}", "func filterOutput(output []byte) []byte {\n\tvar result [][]byte\n\tvar ignore bool\n\tdeprecation := []byte(\"DeprecationWarning\")\n\tregexLog := regexp.MustCompile(`^\\d{4}-\\d{2}-\\d{2}\\s\\d{2}:\\d{2}:\\d{2}`)\n\tregexSshWarning := regexp.MustCompile(`^Warning: Permanently added`)\n\tregexPythonWarning := regexp.MustCompile(`^.*warnings.warn`)\n\tregexUserWarning := regexp.MustCompile(`^.*UserWarning`)\n\tlines := bytes.Split(output, []byte{'\\n'})\n\tfor _, line := range lines {\n\t\tif ignore {\n\t\t\tignore = false\n\t\t\tcontinue\n\t\t}\n\t\tif bytes.Contains(line, deprecation) {\n\t\t\tignore = true\n\t\t\tcontinue\n\t\t}\n\t\tif !regexSshWarning.Match(line) &&\n\t\t\t!regexLog.Match(line) &&\n\t\t\t!regexPythonWarning.Match(line) &&\n\t\t\t!regexUserWarning.Match(line) {\n\t\t\tresult = append(result, line)\n\t\t}\n\t}\n\treturn bytes.Join(result, []byte{'\\n'})\n}", "func RemoveComment(source []byte) (result []byte) {\n\tvar (\n\t\tstateBlok, stateComment1, stateComment2 bool\n\t)\n\n\tfor i := 0; i <= len(source)-1; i++ {\n\n\t\tif stateBlok {\n\t\t\tresult = append(result, source[i])\n\t\t\tif beginOrEndBlok(source, i) {\n\t\t\t\tstateBlok = false\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tif stateComment1 {\n\t\t\tif endComment1(source, i) {\n\t\t\t\tstateComment1 = false\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tif stateComment2 {\n\t\t\tif endComment2(source, i) {\n\t\t\t\tstateComment2 = false\n\t\t\t\ti++\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tif beginOrEndBlok(source, i) {\n\t\t\tresult = append(result, source[i])\n\t\t\tstateBlok = true\n\t\t\tcontinue\n\t\t}\n\n\t\tif beginComment1(source, i) {\n\t\t\tstateComment1 = true\n\t\t\ti++\n\t\t\tcontinue\n\t\t}\n\n\t\tif beginComment2(source, i) {\n\t\t\tstateComment2 = true\n\t\t\ti++\n\t\t\tcontinue\n\t\t}\n\n\t\tresult = append(result, source[i])\n\n\t}\n\treturn\n}", "func (tdl *toDoList) markUnDone(fileName string, taskId int) {\n\tif taskId > len(*tdl)-1 || taskId < 0 {\n\t\tfmt.Println(\"Error: Invalid task id. Try again\")\n\t\tos.Exit(1)\n\t}\n\tif len(*tdl) == 0 {\n\t\tfmt.Println(\"To Do List empty\\nAdd new task\")\n\t\treturn\n\t}\n\tfor i, task := range *tdl {\n\t\tif i == taskId {\n\t\t\tif task.done == \"done\" {\n\t\t\t\ttask.done = \"undone\"\n\t\t\t\t(*tdl)[i] = task\n\t\t\t\terr := toDoListToFile(fileName, *tdl)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(\"Error:\", err)\n\t\t\t\t\tos.Exit(1)\n\t\t\t\t} else {\n\t\t\t\t\tfmt.Println(\"'\" + task.taskName + \"' marked as incomplete\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tfmt.Println(\"'\" + task.taskName + \"' has not been done yet. Nothing to do\")\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}", "func (r errorReporter) todo(n ast.Node, msg string, args ...interface{}) {\n\tr.prefixed(\"todo\", n, msg, args...)\n}", "func (t hiddenApiAnnotationsDependencyTag) ExcludeFromApexContents() {}", "func stripPendingDeletes(pm *kafkazk.PartitionMap, zk kafkazk.Handler) []string {\n\t// Get pending deletions.\n\tpd, err := zk.GetPendingDeletion()\n\tif err != nil {\n\t\tfmt.Println(\"Error fetching topics pending deletion\")\n\t}\n\n\tif len(pd) == 0 {\n\t\treturn []string{}\n\t}\n\n\t// This is used as a set of topic names\n\t// pending deleting.\n\tpending := map[string]struct{}{}\n\n\tfor _, t := range pd {\n\t\tpending[t] = struct{}{}\n\t}\n\n\t// Traverse the partition map and drop\n\t// any pending topics.\n\n\tnewPL := kafkazk.PartitionList{}\n\tpendingExcluded := map[string]struct{}{}\n\tfor _, p := range pm.Partitions {\n\t\tif _, exists := pending[p.Topic]; !exists {\n\t\t\tnewPL = append(newPL, p)\n\t\t} else {\n\t\t\tpendingExcluded[p.Topic] = struct{}{}\n\t\t}\n\t}\n\n\tpm.Partitions = newPL\n\n\tpendingExcludedNames := []string{}\n\tfor t := range pendingExcluded {\n\t\tpendingExcludedNames = append(pendingExcludedNames, t)\n\t}\n\n\treturn pendingExcludedNames\n}", "func removeAll(source []byte, remove []byte) []byte {\n for bytes.Index(source, remove) > -1 {\n pnt := bytes.Index(source, remove)\n source = append(source[:pnt], source[pnt+12:]...)\n }\n return source\n}", "func removeOperationsForSet(lines []string, hashedSetName, operation string) []string {\n\toperationRegex := regexp.MustCompile(fmt.Sprintf(`\\%s %s`, operation, hashedSetName))\n\tgoodLines := []string{}\n\tfor _, line := range lines {\n\t\tif !operationRegex.MatchString(line) {\n\t\t\tgoodLines = append(goodLines, line)\n\t\t}\n\t}\n\treturn goodLines\n}", "func TODO() Context { return nullContext(0) }", "func modify(fid string) (error) {\n var (\n justCopy bool = false\n looking4SOBInsertionPoint bool = false\n looking4SubjectLine bool = true\n looking4BuglinkInsertionPoint bool = false\n subjectLine bool = false\n cveInsertionPoint bool = false\n buglinkInsertionPoint bool = false\n sobInsertionPoint bool = false\n existingCVEs []string\n existingBugIds []string\n existingAcks []string\n existingSobs []string\n existingCps []string\n existingBps []string\n buglinkBaseUrl string = \"http://bugs.launchpad.net/bugs/\"\n cpRC = regexp.MustCompile(\"cherry picked from commit ([0-9a-zA-Z]+)\")\n bpRC = regexp.MustCompile(\"backported from commit ([0-9a-zA-Z]+) upstream\")\n )\n\n // Open the input file, return the error if there is one.\n //\n inputFile, err := os.Open(fid)\n if err != nil {\n return err\n }\n defer inputFile.Close()\n\n // Open the temp. file, return the error if there is one.\n //\n dst, err := ioutil.TempFile(\"./\", \"mp__\")\n if err != nil {\n return err\n }\n defer dst.Close()\n\n scanner := bufio.NewScanner(inputFile)\n for scanner.Scan() {\n line := scanner.Text()\n\n if justCopy {\n dst.WriteString(line)\n dst.WriteString(\"\\n\")\n continue\n }\n\n // If we are looking for the Sob insertion point then we've handled\n // all the other cases and we just need to find the sob section.\n //\n if looking4SOBInsertionPoint {\n looking4SOBInsertionPoint = true\n if line == \"---\" {\n sobInsertionPoint = true\n looking4SOBInsertionPoint = false\n } else {\n if strings.Contains(line, \"Acked-by:\") {\n id := strings.Replace(line, \"Acked-by:\", \"\", -1)\n if !hasString(existingAcks, id) {\n existingAcks = append(existingAcks, id)\n }\n }\n\n if strings.Contains(line, \"Signed-off-by:\") {\n id := strings.Replace(line, \"Signed-off-by:\", \"\", -1)\n if !hasString(existingSobs, id) {\n existingSobs = append(existingSobs, id)\n }\n }\n\n if strings.Contains(line, \"cherry picked\") {\n result := cpRC.FindStringSubmatch(line)\n existingCps = append(existingCps, result[1])\n }\n\n if strings.Contains(line, \"backported from\") {\n result := bpRC.FindStringSubmatch(line)\n existingBps = append(existingBps, result[1])\n }\n }\n }\n\n if sobInsertionPoint {\n dst.WriteString(sobBlock(existingAcks, existingSobs, existingCps, existingBps))\n sobInsertionPoint = false\n justCopy = true\n }\n\n // After the first blank line after the subject line is where we\n // want to insert our CVE lines if we need to insert any.\n //\n if cveInsertionPoint {\n cveInsertionPoint = true\n if strings.Contains(line, \"CVE-\") {\n cve := strings.Replace(line, \"CVE-\", \"\", -1)\n existingCVEs = append(existingCVEs, cve)\n } else {\n // Add the CVE id here.\n //\n if args.CVE != \"\" {\n if !hasString(existingCVEs, args.CVE) {\n dst.WriteString(\"CVE-\")\n dst.WriteString(args.CVE)\n dst.WriteString(\"\\n\")\n dst.WriteString(\"\\n\") // One blank line after the CVE line (this assumes there is only one CVE)\n }\n }\n cveInsertionPoint = false\n looking4BuglinkInsertionPoint = true\n\n // We don't know at this point if we are going to insert a Buglink\n // so we can't write out the current line of text.\n }\n }\n\n // After the first blank line after the CVE lines is where the Buglinks are to be\n // inserted.\n //\n if looking4BuglinkInsertionPoint {\n if line != \"\" {\n looking4BuglinkInsertionPoint = false\n buglinkInsertionPoint = true\n }\n }\n\n if buglinkInsertionPoint {\n buglinkInsertionPoint = true\n // Just like the CVEs we skip past any existing BugLink lines and build a list of existing\n // buglinks so we don't duplicate any.\n //\n if strings.Contains(line, \"BugLink:\") {\n s := strings.Split(line, \"/\")\n id := s[len(s)-1]\n existingBugIds = append(existingBugIds, id)\n } else {\n if len(args.Bugs) > 0 {\n for _, id := range args.Bugs {\n if !hasString(existingBugIds, id) {\n dst.WriteString(fmt.Sprintf(\"BugLink: %s%s\\n\", buglinkBaseUrl, id))\n }\n }\n dst.WriteString(\"\\n\") // One blank line after the BugLink line\n }\n buglinkInsertionPoint = false\n looking4SOBInsertionPoint = true\n }\n }\n\n // Once we've found the subject line, we look for the first blank line after it.\n //\n if subjectLine {\n if line == \"\" {\n cveInsertionPoint = true\n subjectLine = false\n }\n }\n\n // All modificatins that we make are made after the subject line, therefore that's\n // the first thing we look for.\n //\n if looking4SubjectLine {\n if strings.Contains(line, \"Subject:\") {\n subjectLine = true\n looking4SubjectLine = false\n }\n }\n\n dst.WriteString(line)\n dst.WriteString(\"\\n\")\n }\n\n // If the scanner encountered an error, return it.\n //\n if err := scanner.Err(); err != nil {\n return err\n }\n\n os.Rename(dst.Name(), inputFile.Name())\n return nil\n}", "func (ridt *CFGoReadLexUnit) Clean() {\n}", "func LineAsTodo(line string) *Todo {\n\tif todo := lineAsUnreportedTodo(line); todo != nil {\n\t\treturn todo\n\t}\n\n\tif todo := lineAsReportedTodo(line); todo != nil {\n\t\treturn todo\n\t}\n\n\treturn nil\n}", "func Hey(remark string) string {\n\t// Write some code here to pass the test suite.\n\t// Then remove all the stock comments.\n\t// They're here to help you get started but they only clutter a finished solution.\n\t// If you leave them in, reviewers may protest!\n\treturn \"\"\n}", "func del(a []string, f string) []string {\n\tb := []string{} // init resulting array\n\tfor i := 0; i < len(a); i++ {\n\t\tif a[i] != f { // if it isn't the f string\n\t\t\tb = append(b, a[i]) // add to resulting array\n\t\t}\n\t}\n\treturn b\n}", "func keepLines(s string, n int) string {\n result := strings.Join(strings.Split(s, \"\\n\")[:n], \"\\n\")\n return strings.Replace(result, \"\\r\", \"\", -1)\n}", "func BeginLineNone() func(first bool) string {\n\treturn func(bool) string { return \"\" }\n}", "func filterViolationMsg(msg string) string {\n\ts := []rune(msg)\n\tfor i := 0; i < len(s); i++ {\n\t\tif i%3 != 0 {\n\t\t\ts[i] = '*'\n\t\t}\n\t}\n\treturn string(s)\n}", "func remove_meta_fields (body string) (string){ \n regex, _ := regexp.Compile(\"<meta .*?>\")\n result := regex.ReplaceAllString(body, \"\")\n return result\n}", "func keepSorted(x Expr) bool {\n\treturn hasComment(x, \"keep sorted\")\n}", "func (pl *ProgramLine) RemoveAllFollowingLines() {\n\tpl.parentBlock.lastLine = pl\n\n\tfor line := pl.next; line != nil; line = line.next {\n\t\tline.forgetCallsOut()\n\t}\n\n\tpl.next = nil\n}", "func (e *ObservableEditableBuffer) Clean() {\n\tbefore := e.getTagStatus()\n\n\te.treatasclean = false\n\top := e.putseq\n\te.putseq = e.seq\n\n\te.notifyTagObservers(before)\n\n\tif op != e.seq {\n\t\te.filtertagobservers = false\n\t}\n}", "func (p *Doc) Uncomment(key string) bool {\n\te, ok := p.props[key]\n\tif !ok {\n\t\treturn false\n\t}\n\n\tfor i := e.Prev(); nil != i; {\n\t\tdel := i\n\t\ti = i.Prev()\n\n\t\ttypo := del.Value.(*line).typo\n\t\tif isProperty(typo) || typo == ' ' {\n\t\t\tbreak\n\t\t}\n\n\t\tp.lines.Remove(del)\n\t}\n\n\treturn true\n}", "func filterOk(fs []string) bool {\n\tfor _, f := range fs {\n\t\tif f[0] != todotxt.ProjectTag && f[0] != todotxt.ContextTag {\n\t\t\treturn false\n\t\t}\n\t\tl, _ := utf8.DecodeLastRuneInString(f)\n\t\tif !unicode.IsLetter(l) && !unicode.IsDigit(l) && l != '_' {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func cleanLine(i int, line string) (string, string) {\n\ttrimmed := strings.TrimSpace(line)\n\t// strip comments\n\tstrippedComponents := strings.SplitN(trimmed, CommentCharacter, 2)\n\n\tswitch len(strippedComponents) {\n\tcase 0:\n\t\treturn \"\", \"\"\n\tcase 1:\n\t\treturn strippedComponents[0], \"\"\n\tdefault:\n\t\treturn strippedComponents[0], strippedComponents[1]\n\t}\n}", "func findAndDelete(data string, regex string) string {\n\tvar re = regexp.MustCompile(regex)\n\treturn re.ReplaceAllString(data, \"\")\n}", "func FilterRequestChangedLines(request *track.AnalyzeRequest, changedLines *ChangedLinesInfo) {\n\tfor _, file := range request.Files {\n\t\tif file.Status == tricium.Data_RENAMED || file.Status == tricium.Data_COPIED {\n\t\t\tdelete(*changedLines, file.Path)\n\t\t}\n\t}\n}", "func (r *readability) clean(s *goquery.Selection, tag string) {\n\tif s == nil {\n\t\treturn\n\t}\n\n\tisEmbed := false\n\tif tag == \"object\" || tag == \"embed\" || tag == \"iframe\" {\n\t\tisEmbed = true\n\t}\n\n\ts.Find(tag).Each(func(i int, target *goquery.Selection) {\n\t\tattributeValues := \"\"\n\t\tfor _, attribute := range target.Nodes[0].Attr {\n\t\t\tattributeValues += \" \" + attribute.Val\n\t\t}\n\n\t\tif isEmbed && videos.MatchString(attributeValues) {\n\t\t\treturn\n\t\t}\n\n\t\tif isEmbed && videos.MatchString(target.Text()) {\n\t\t\treturn\n\t\t}\n\n\t\ttarget.Remove()\n\t})\n}", "func fixLine(line string) string {\n\t// Removing comments\n\ti := strings.Index(line, \"#\")\n\tif i != -1 {\n\t\tline = line[:i]\n\t}\n\tif len(line) == 0 || line == \"\\n\" {\n\t\treturn \"\"\n\t}\n\t// Removing white spaces\n\tif strings.HasPrefix(line, \" \") {\n\t\tline = strings.TrimLeft(line, \" \")\n\t}\n\tline = strings.TrimRight(line, \" \")\n\treturn line\n}", "func (factory *identifierFactory) cleanPart(part string, visibility Visibility) string {\n\tclean := filterRegex.ReplaceAllLiteralString(part, \" \")\n\tcleanWords := sliceIntoWords(clean)\n\tcaseCorrectedWords := make([]string, 0, len(cleanWords))\n\tfor ix, word := range cleanWords {\n\t\tvar w string\n\t\tif ix == 0 && visibility == NotExported {\n\t\t\tw = strings.ToLower(word)\n\t\t} else {\n\t\t\t// Disable lint: the suggested \"replacement\" for this in /x/cases has fundamental\n\t\t\t// differences in how it works (e.g. 'JSON' becomes 'Json'; we don’t want that).\n\t\t\t// Furthermore, the cases (ha) that it \"fixes\" are not relevant to us\n\t\t\t// (something about better handling of various punctuation characters;\n\t\t\t// our words are punctuation-free).\n\t\t\t//nolint:staticcheck\n\t\t\tw = strings.Title(word)\n\t\t}\n\n\t\tcaseCorrectedWords = append(caseCorrectedWords, w)\n\t}\n\n\treturn strings.Join(caseCorrectedWords, \"\")\n}", "func cleanConditionally(e *goquery.Selection, tag string) {\n\tisList := tag == \"ul\" || tag == \"ol\"\n\n\te.Find(tag).Each(func(i int, node *goquery.Selection) {\n\t\t// First check if we're in a data table, in which case don't remove it\n\t\tif ancestor, hasTag := hasAncestorTag(node, \"table\", -1); hasTag {\n\t\t\tif attr, _ := ancestor.Attr(dataTableAttr); attr == \"1\" {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\t// If it is table, remove data table marker\n\t\tif tag == \"table\" {\n\t\t\tnode.RemoveAttr(dataTableAttr)\n\t\t}\n\n\t\tcontentScore := 0.0\n\t\tweight := getClassWeight(node)\n\t\tif weight+contentScore < 0 {\n\t\t\tnode.Remove()\n\t\t\treturn\n\t\t}\n\n\t\t// If there are not very many commas, and the number of\n\t\t// non-paragraph elements is more than paragraphs or other\n\t\t// ominous signs, remove the element.\n\t\tnodeText := normalizeText(node.Text())\n\t\tnCommas := strings.Count(nodeText, \",\")\n\t\tnCommas += strings.Count(nodeText, \",\")\n\t\tif nCommas < 10 {\n\t\t\tp := node.Find(\"p\").Length()\n\t\t\timg := node.Find(\"img\").Length()\n\t\t\tli := node.Find(\"li\").Length() - 100\n\t\t\tinput := node.Find(\"input\").Length()\n\n\t\t\tembedCount := 0\n\t\t\tnode.Find(\"embed\").Each(func(i int, embed *goquery.Selection) {\n\t\t\t\tif !rxVideos.MatchString(embed.AttrOr(\"src\", \"\")) {\n\t\t\t\t\tembedCount++\n\t\t\t\t}\n\t\t\t})\n\n\t\t\tcontentLength := strLen(nodeText)\n\t\t\tlinkDensity := getLinkDensity(node)\n\t\t\t_, hasFigureAncestor := hasAncestorTag(node, \"figure\", 3)\n\n\t\t\thaveToRemove := (!isList && li > p) ||\n\t\t\t\t(img > 1 && float64(p)/float64(img) < 0.5 && !hasFigureAncestor) ||\n\t\t\t\t(float64(input) > math.Floor(float64(p)/3)) ||\n\t\t\t\t(!isList && contentLength < 25 && (img == 0 || img > 2) && !hasFigureAncestor) ||\n\t\t\t\t(!isList && weight < 25 && linkDensity > 0.2) ||\n\t\t\t\t(weight >= 25 && linkDensity > 0.5) ||\n\t\t\t\t((embedCount == 1 && contentLength < 75) || embedCount > 1)\n\n\t\t\tif haveToRemove {\n\t\t\t\tnode.Remove()\n\t\t\t}\n\t\t}\n\t})\n}", "func removeNoise(title string) string {\n\topening := strings.Index(title, \"(\")\n\tclosing := strings.Index(title, \")\")\n\t// text must be enclosed by round brackets\n\tif opening >= 0 && closing >= 0 && closing > opening {\n\t\tremove := false\n\t\t// fmt.Println(\"removing noise...\")\n\t\tnoise := strings.ToLower(title[opening+1 : closing])\n\t\tif len(noise) > 0 {\n\t\t\t// fmt.Println(\"noise:\", noise)\n\t\t\tif strings.Contains(noise, \"edit\") ||\n\t\t\t\tstrings.Contains(noise, \"mix\") ||\n\t\t\t\tstrings.Contains(noise, \"cdm\") ||\n\t\t\t\tstrings.Contains(noise, \"cut\") ||\n\t\t\t\tstrings.Contains(noise, \"rmx\") ||\n\t\t\t\tstrings.Contains(noise, \"cover\") {\n\t\t\t\tremove = true\n\t\t\t}\n\t\t}\n\t\tif remove {\n\t\t\ttitle = strings.ReplaceAll(title[:opening]+title[closing+1:], \" \", \" \")\n\t\t\ttitle = strings.TrimSpace(strings.ReplaceAll(title, \" .\", \"\"))\n\t\t\tif *debug {\n\t\t\t\tlogger.Info(\"removeNoise: %s\", title)\n\t\t\t}\n\t\t}\n\t}\n\treturn title\n}", "func delete() {\n fmt.Println(\"deleting iptables drop log rules\")\n\n lines := iptablesSave()\n\n outLines := make([]string, 0)\n\n count := 0\n\n for _, line := range lines {\n\n logMatch := logPattern.FindStringSubmatch(line)\n if ( logMatch != nil ){\n count++\n fmt.Printf(\"DELETE RULE: %s\\n\", line)\n } else {\n outLines = append(outLines, line)\n }\n }\n\n iptablesRestore(outLines)\n\n fmt.Printf(\"%d iptables log rules deleted\\n\", count)\n\n}", "func (p *Parser) Remove(pattern string) error {\n return p.json.Remove(pattern)\n}", "func (as *AllState) DeleteBreakByFile(fpath string, line int) bool {\n\tfor i, br := range as.Breaks {\n\t\tif br.FPath == fpath && br.Line == line {\n\t\t\tas.Breaks = append(as.Breaks[:i], as.Breaks[i+1:]...)\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (self *dependencies) remove(definition string) {\n for index, dependency := range *self {\n if dependency == definition {\n (*self) = append((*self)[:index], (*self)[index+1:]...)\n }\n }\n}", "func (tdl *toDoList) cleanupDoneTasks(fileName string) {\n\tif len(*tdl) == 0 {\n\t\tfmt.Println(\"To Do List empty\\nAdd new task\")\n\t\treturn\n\t}\n\tvar deleteIndexes []int\n\tfor i, task := range *tdl {\n\t\tif task.done == \"done\" {\n\t\t\tdeleteIndexes = append(deleteIndexes, i)\n\t\t}\n\t}\n\tif len(deleteIndexes) == 0 {\n\t\tfmt.Println(\"All tasks are incomplete. Nothing to do\")\n\t\treturn\n\t}\n\terr := toDoListToFile(fileName, *tdl, deleteIndexes...)\n\tif err != nil {\n\t\tfmt.Println(\"Error:\", err)\n\t\tos.Exit(1)\n\t} else {\n\t\tfmt.Println(\"All the completed tasks cleared from To Do List\")\n\t\treturn\n\t}\n}", "func (eng *Engine) Clean() {\n\tfor _, f := range eng.Junk {\n\t\tif err := os.Remove(f); err != nil {\n\t\t\teng.logf(\"clean: %v\\n\", err)\n\t\t}\n\t}\n}", "func removeComments(json string) string {\n\tlines := str.Lines(json)\n\t// remove single line comments\n\tlines = str.Map(lines, func(line string) string {\n\t\tif str.Match(line, `^\\s*//`) {\n\t\t\treturn \"\"\n\t\t}\n\t\treturn line\n\t})\n\treturn strings.Join(lines, \"\\n\")\n}", "func Clean(ctx context.Context, r runner.Runner) error {\n\titems := []struct {\n\t\tgetCmd, deleteCmd []string\n\t\twarning string\n\t}{{\n\t\tgetCmd: []string{\"docker\", \"ps\", \"-a\", \"-q\", \"--filter\", \"name=step_[0-9]+|cloudbuild_|metadata\"},\n\t\tdeleteCmd: []string{\"docker\", \"rm\", \"-f\"},\n\t\twarning: \"Warning: there are left over step containers from a previous build, cleaning them.\",\n\t}, {\n\t\tgetCmd: []string{\"docker\", \"network\", \"ls\", \"-q\", \"--filter\", \"name=cloudbuild\"},\n\t\tdeleteCmd: []string{\"docker\", \"network\", \"rm\"},\n\t\twarning: \"Warning: a network is left over from a previous build, cleaning it.\",\n\t}, {\n\t\tgetCmd: []string{\"docker\", \"volume\", \"ls\", \"-q\", \"--filter\", \"name=homevol|cloudbuild_\"},\n\t\tdeleteCmd: []string{\"docker\", \"volume\", \"rm\"},\n\t\twarning: \"Warning: there are left over step volumes from a previous build, cleaning it.\",\n\t}}\n\n\tfor _, item := range items {\n\t\tvar output bytes.Buffer\n\t\tif err := r.Run(ctx, item.getCmd, nil, &output, os.Stderr, \"\"); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tstr := strings.TrimSpace(output.String())\n\t\tif str == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tlog.Println(item.warning)\n\n\t\targs := strings.Split(str, \"\\n\")\n\t\tdeleteCmd := append(item.deleteCmd, args...)\n\t\tif err := r.Run(ctx, deleteCmd, nil, nil, os.Stderr, \"\"); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (r *Report) remove(item *summarize.ElementStr) {\n\tret := ReportItem{\n\t\tName: item.Name,\n\t\tBefore: item.String(),\n\t}\n\t// TODO: compress this table if possible after all diffs have been\n\t// accounted for.\n\tswitch item.Kind {\n\tcase \"library\", \"const\", \"bits\", \"enum\", \"struct\",\n\t\t\"table\", \"union\", \"protocol\", \"alias\",\n\t\t\"struct/member\", \"table/member\", \"bits/member\",\n\t\t\"enum/member\", \"union/member\", \"protocol/member\":\n\t\tret.Conclusion = APIBreaking\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"Report.remove: unknown kind: %+v\", item))\n\t}\n\tr.addToDiff(ret)\n}", "func Ignore() Option { return ignore{} }", "func MarkOrUnmarkTask(fileName string, id int, taskType TaskType){\r\n\r\n\tfiledata := ReadFile(fileName)\r\n\tfiledataarray := strings.Split(filedata, \"\\n\")\r\n\tif filedata == \"\" {\r\n\t\tfmt.Println(\"There are no available tasks in file\")\r\n\t\treturn\r\n\t}else if id > len(filedataarray) {\r\n\t\tfmt.Printf(\"The task id %d is not available. Run todolist list to get the list of available ids\", id)\r\n\t\treturn\r\n\t}\r\n postitiontomark := id-1\r\n\tcondition := CheckIfDone(filedataarray[postitiontomark])\r\n\tswitch taskType {\r\n\tcase done:\r\n\t\tif condition {\r\n\t\t\tfmt.Println(\"task is already done\")\r\n\t\t\treturn\r\n\t\t} else {\r\n\t\t\tfiledataarray[postitiontomark] = filedataarray[postitiontomark] + \"-> DONE\"\r\n\t\t}\t\r\n\tcase undone:\r\n\t\tif condition {\r\n\t\t\tfiledataarray[postitiontomark] = strings.Replace(filedataarray[postitiontomark], \"-> DONE\", \"\", -1)\r\n\t\t} else {\r\n\t\t\tfmt.Printf(\"task %d is not done yet.\", id)\r\n\t\t\treturn\r\n\t\t}\t\r\n\tdefault:\r\n\t\tfmt.Println(\"not a valid task type\")\t\r\n\t}\r\n\tupdatedfiledata := strings.Join(filedataarray, \"\\n\")\r\n\tCreateFile(fileName)\r\n\tWriteToFile(fileName,updatedfiledata)\r\n}", "func (f *blockRemove) Filter(inFile *hclwrite.File) (*hclwrite.File, error) {\n\ttypeName, labels, err := parseAddress(f.address)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tmatched := findBlocks(inFile.Body(), typeName, labels)\n\n\tfor _, b := range matched {\n\t\tinFile.Body().RemoveBlock(b)\n\t}\n\n\treturn inFile, nil\n}", "func (pl *ProgramLine) RemoveUnusedLine() {\n\tif pl.previous != nil {\n\t\tpl.previous.next = pl.next\n\t} else {\n\t\tpl.parentBlock.firstLine = pl.next\n\t}\n\n\tif pl.next != nil {\n\t\tpl.next.previous = pl.previous\n\t} else {\n\t\tpl.parentBlock.lastLine = pl.previous\n\t}\n\n\tpl.forgetCallsOut()\n}", "func ReplaceLines(data map[string]string)(err error){\n sourceDownload := map[string]map[string]string{}\n sourceDownload[\"ruleset\"] = map[string]string{}\n sourceDownload[\"ruleset\"][\"sourceDownload\"] = \"\"\n sourceDownload,err = GetConf(sourceDownload)\n pathDownloaded := sourceDownload[\"ruleset\"][\"sourceDownload\"]\n if err != nil {\n logs.Error(\"ReplaceLines error loading data from main.conf: \"+ err.Error())\n return err\n }\n \n //split path \n splitPath := strings.Split(data[\"path\"], \"/\")\n pathSelected := splitPath[len(splitPath)-2]\n\n saved := false\n rulesFile, err := os.Create(\"_creating-new-file.txt\")\n defer rulesFile.Close()\n var validID = regexp.MustCompile(`sid:(\\d+);`)\n\n newFileDownloaded, err := os.Open(pathDownloaded + pathSelected + \"/rules/\" + \"drop.rules\")\n\n scanner := bufio.NewScanner(newFileDownloaded)\n for scanner.Scan() {\n for x := range data{\n sid := validID.FindStringSubmatch(scanner.Text())\n if (sid != nil) && (sid[1] == string(x)) {\n if data[x] == \"N/A\"{\n saved = true\n continue\n }else{\n _, err = rulesFile.WriteString(string(data[x])) \n _, err = rulesFile.WriteString(\"\\n\") \n saved = true\n continue\n }\n }\n }\n if !saved{\n _, err = rulesFile.WriteString(scanner.Text())\n _, err = rulesFile.WriteString(\"\\n\") \n }\n saved = false\n }\n\n input, err := ioutil.ReadFile(\"_creating-new-file.txt\")\n err = ioutil.WriteFile(\"rules/drop.rules\", input, 0644)\n\n _ = os.Remove(\"_creating-new-file.txt\")\n\n if err != nil {\n logs.Error(\"ReplaceLines error writting new lines: \"+ err.Error())\n return err\n }\n return nil\n}", "func RemoveMarkdownTag(s string) string {\n\tres := s\n\tres = listLeadersReg.ReplaceAllString(res, \"$1\")\n\n\tres = headerReg.ReplaceAllString(res, \"\\n\")\n\tres = strikeReg.ReplaceAllString(res, \"\")\n\tres = codeReg.ReplaceAllString(res, \"\")\n\n\tres = emphReg.ReplaceAllString(res, \"$1\")\n\tres = emphReg2.ReplaceAllString(res, \"$1\")\n\tres = emphReg3.ReplaceAllString(res, \"$1\")\n\tres = emphReg4.ReplaceAllString(res, \"$1\")\n\tres = htmlReg.ReplaceAllString(res, \"$1\")\n\tres = setextHeaderReg.ReplaceAllString(res, \"\")\n\tres = footnotesReg.ReplaceAllString(res, \"\")\n\tres = footnotes2Reg.ReplaceAllString(res, \"\")\n\tres = imagesReg.ReplaceAllString(res, \"\")\n\tres = linksReg.ReplaceAllString(res, \"$1\")\n\tres = blockquoteReg.ReplaceAllString(res, \" \")\n\tres = refLinkReg.ReplaceAllString(res, \"\")\n\tres = atxHeaderReg.ReplaceAllString(res, \"$1\")\n\tres = atxHeaderReg2.ReplaceAllString(res, \"$2\")\n\tres = atxHeaderReg3.ReplaceAllString(res, \"$2\")\n\tres = atxHeaderReg4.ReplaceAllString(res, \"\")\n\tres = atxHeaderReg5.ReplaceAllString(res, \"$1\")\n\tres = atxHeaderReg6.ReplaceAllString(res, \"\\n\\n\")\n\treturn res\n}", "func UnlessMarked(err error, m *Marker, fn func(err error)) {\n\tUnless(err, RuleMarked(m), fn)\n}", "func (t *Ticket) Eliminate(words []string) []string {\n\tresults := []string{}\n\tfor _, word := range words {\n\t\tswitch word {\n\t\tcase \"Reserve\":\n\t\t\tresults = append(results, word)\n\t\tcase \"Field\":\n\t\t\tresults = append(results, word)\n\t\tcase \"Box\":\n\t\t\tresults = append(results, word)\n\t\tcase \"Top\":\n\t\t\tresults = append(results, word)\n\t\tcase \"Deck\":\n\t\t\tresults = append(results, word)\n\t\tcase \"Loge\":\n\t\t\tresults = append(results, word)\n\t\tcase \"Right\":\n\t\t\tresults = append(results, word)\n\t\tcase \"Left\":\n\t\t\tresults = append(results, word)\n\t\tcase \"Pavilion\":\n\t\t\tresults = append(results, word)\n\t\tcase \"Dugout\":\n\t\t\tresults = append(results, word)\n\t\tcase \"Club\":\n\t\t\tresults = append(results, word)\n\t\tcase \"Baseline\":\n\t\t\tresults = append(results, word)\n\t\t}\n\t}\n\n\treturn results\n}", "func deleteMarkdown(markdowns []*stashItem, target *stashItem) ([]*stashItem, error) {\n\tindex := -1\n\n\t// Operate on a copy to avoid any pointer weirdness\n\tmds := make([]*stashItem, len(markdowns))\n\tcopy(mds, markdowns)\n\n\tfor i, v := range mds {\n\t\tif v.Identifier() == target.Identifier() {\n\t\t\tindex = i\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif index == -1 {\n\t\terr := fmt.Errorf(\"could not find markdown to delete\")\n\t\tif debug {\n\t\t\tlog.Println(err)\n\t\t}\n\t\treturn nil, err\n\t}\n\n\treturn append(mds[:index], mds[index+1:]...), nil\n}", "func comment() {\r\n\t// ini komentar single line\r\n}", "func generatedFilter(path string, info os.FileInfo) bool {\n\tif strings.Contains(info.Name(), \"generated\") {\n\t\treturn false\n\t}\n\n\tf, err := os.Open(path + \"/\" + info.Name())\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Failed opening file %s: %v\", path+\"/\"+info.Name(), err))\n\t}\n\tdefer f.Close()\n\n\tscanner := bufio.NewScanner(f)\n\tscanner.Scan()\n\tline := scanner.Text()\n\n\tif strings.Contains(line, \"generated\") || strings.Contains(line, \"GENERATED\") {\n\t\treturn false\n\t}\n\treturn true\n}", "func StripInternalComments(schema []byte) ([]byte, error) {\n\tvar (\n\t\tscanner = bufio.NewScanner(bytes.NewReader(schema))\n\t\tout []byte\n\t\tre = regexp.MustCompile(\"^ *#!\")\n\t)\n\n\tfor scanner.Scan() {\n\t\tline := scanner.Text()\n\t\tif !re.MatchString(line) {\n\t\t\tout = append(out, []byte(line+\"\\n\")...)\n\t\t}\n\t}\n\tif err := scanner.Err(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}", "func fn3() { // want fn3:`Deprecated: Don't use this\\.`\n}", "func if_section_start_make_pending(b bool){\nfor loc= 0;loc<len(buffer)&&unicode.IsSpace(buffer[loc]);loc++{}\nif len(buffer)>=2&&buffer[0]=='@'&&(unicode.IsSpace(buffer[1])||buffer[1]=='*'){\nchange_pending= b\n}\n}", "func IgnoreEmptyLines(ignoreEmptyLines bool) ConfigFunc {\n\treturn func(c *Config) {\n\t\tc.IgnoreEmptyLines = ignoreEmptyLines\n\t}\n}", "func (r *readability) cleanConditionally(e *goquery.Selection, tag string) {\n\tif e == nil {\n\t\treturn\n\t}\n\n\tisList := tag == \"ul\" || tag == \"ol\"\n\n\te.Find(tag).Each(func(i int, node *goquery.Selection) {\n\t\tcontentScore := 0.0\n\t\tweight := r.getClassWeight(node)\n\t\tif weight+contentScore < 0 {\n\t\t\tnode.Remove()\n\t\t\treturn\n\t\t}\n\n\t\t// If there are not very many commas, and the number of\n\t\t// non-paragraph elements is more than paragraphs or other\n\t\t// ominous signs, remove the element.\n\t\tnodeText := normalizeText(node.Text())\n\t\tnCommas := strings.Count(nodeText, \",\")\n\t\tnCommas += strings.Count(nodeText, \",\")\n\t\tif nCommas < 10 {\n\t\t\tp := node.Find(\"p\").Length()\n\t\t\timg := node.Find(\"img\").Length()\n\t\t\tli := node.Find(\"li\").Length() - 100\n\t\t\tinput := node.Find(\"input\").Length()\n\n\t\t\tembedCount := 0\n\t\t\tnode.Find(\"embed\").Each(func(i int, embed *goquery.Selection) {\n\t\t\t\tif !videos.MatchString(embed.AttrOr(\"src\", \"\")) {\n\t\t\t\t\tembedCount++\n\t\t\t\t}\n\t\t\t})\n\n\t\t\tlinkDensity := r.getLinkDensity(node)\n\t\t\tcontentLength := strLen(normalizeText(node.Text()))\n\t\t\thaveToRemove := (!isList && li > p) ||\n\t\t\t\t(img > 1 && float64(p)/float64(img) < 0.5 && !r.hasAncestorTag(node, \"figure\")) ||\n\t\t\t\t(float64(input) > math.Floor(float64(p)/3)) ||\n\t\t\t\t(!isList && contentLength < 25 && (img == 0 || img > 2) && !r.hasAncestorTag(node, \"figure\")) ||\n\t\t\t\t(!isList && weight < 25 && linkDensity > 0.2) ||\n\t\t\t\t(weight >= 25 && linkDensity > 0.5) ||\n\t\t\t\t((embedCount == 1 && contentLength < 75) || embedCount > 1)\n\n\t\t\tif haveToRemove {\n\t\t\t\tnode.Remove()\n\t\t\t}\n\t\t}\n\t})\n}", "func StripComments(line string) string {\n\tnewLine := line\n\tfor i, a := range line {\n\t\tif a == '#' {\n\t\t\tnewLine = newLine[:i]\n\t\t\tbreak\n\t\t}\n\t}\n\treturn newLine\n}", "func (entry *POEntry) mustApplyBlock(s *Scanner, pluralCount int) {\n\tentry.checkObsolete(s)\n\n\tswitch [2]string{s.Border, s.Prefix} {\n\tcase [2]string{\"#\", \"\"}, [2]string{\"# \", \"\"}:\n\t\tif entry.TComment != \"\" {\n\t\t\tentry.TComment += \"\\n\"\n\t\t}\n\t\tentry.TComment += s.Buffer.String()\n\tcase [2]string{\"#. \", \"\"}:\n\t\tentry.mustBeEmpty(s, entry.EComment)\n\t\tentry.EComment = s.Buffer.String()\n\tcase [2]string{\"#: \", \"\"}:\n\t\tentry.mustBeEmpty(s, entry.Reference)\n\t\tentry.Reference = s.Buffer.String()\n\tcase [2]string{\"#, \", \"\"}:\n\t\tentry.mustBeEmpty(s, entry.Flags.String())\n\t\tentry.Flags.Parse(s.Buffer.String())\n\tcase [2]string{prevBorder, \"msgctxt \"}:\n\t\tentry.mustBeEmpty(s, entry.PrevMsgCtxt)\n\t\tentry.PrevMsgCtxt = s.Buffer.String()\n\tcase [2]string{prevBorder, \"msgid \"}:\n\t\tentry.mustBeEmpty(s, entry.PrevMsgID)\n\t\tentry.PrevMsgID = s.Buffer.String()\n\tcase [2]string{prevBorder, \"msgid_plural \"}:\n\t\tentry.mustBeEmpty(s, entry.PrevMsgIDP)\n\t\tentry.PrevMsgIDP = s.Buffer.String()\n\tcase [2]string{\"\", \"msgctxt \"},\n\t\t[2]string{\"#~ \", \"msgctxt \"}:\n\t\tentry.mustBeEmpty(s, entry.MsgCtxt)\n\t\tentry.MsgCtxt = s.Buffer.String()\n\tcase [2]string{\"\", \"msgid \"},\n\t\t[2]string{\"#~ \", \"msgid \"}:\n\t\tentry.mustBeEmpty(s, entry.MsgID)\n\t\tentry.MsgID = s.Buffer.String()\n\tcase [2]string{\"\", \"msgid_plural \"},\n\t\t[2]string{\"#~ \", \"msgid_plural \"}:\n\t\tentry.mustBeEmpty(s, entry.MsgIDP)\n\t\tentry.MsgIDP = s.Buffer.String()\n\tcase [2]string{\"\", \"msgstr \"},\n\t\t[2]string{\"#~ \", \"msgstr \"}:\n\t\tentry.mustBeEmpty(s, entry.MsgStr)\n\t\tentry.MsgStr = s.Buffer.String()\n\tdefault:\n\t\tentry.updateMsgStrP(s, pluralCount)\n\t}\n}", "func filter(in token.Token) bool {\n\tswitch in.Kind() {\n\tcase token.TK_SHEBANG:\n\tcase token.TK_SPACE:\n\tdefault:\n\t\treturn false\n\t}\n\n\treturn true\n}", "func stripBlockComments(lines []string) []string {\n\tvar outputLines []string\n\tblockComment := false // Keeps track of whether we're currently inside a /* block comment */.\n\n\tfor _, line := range lines {\n\t\tif !blockComment {\n\t\t\t// We are not currently inside a /* block comment */. Does this line have one or more\n\t\t\t// single-line block comments?\n\t\t\tmatch := singleLineBlockCommentRegexp.FindStringSubmatch(line)\n\t\t\tfor len(match) > 0 {\n\t\t\t\t// Remove the single-line block-comment and proceed as if it was never there.\n\t\t\t\tline = match[1] + match[2]\n\t\t\t\tmatch = singleLineBlockCommentRegexp.FindStringSubmatch(line)\n\t\t\t}\n\n\t\t\t// Does a multi-line block-comment start on the current line?\n\t\t\tmatch = blockCommentStartRegexp.FindStringSubmatch(line)\n\t\t\tif len(match) > 0 {\n\t\t\t\t// Block comment found. Keep the portion of the line that precedes the\n\t\t\t\t// \"/*\" characters.\n\t\t\t\tblockComment = true\n\t\t\t\toutputLines = append(outputLines, match[1])\n\t\t\t} else {\n\t\t\t\t// No block comment found. We can keep the current line as-is.\n\t\t\t\toutputLines = append(outputLines, line)\n\t\t\t}\n\t\t} else {\n\t\t\t// We are currently inside a /* block comment */. Does it end on the current line?\n\t\t\tmatch := blockCommentEndRegexp.FindStringSubmatch(line)\n\t\t\tif len(match) > 0 {\n\t\t\t\t// Found the end of the block comment. Keep the portion of the line that succeeds\n\t\t\t\t// the \"*/\" characters.\n\t\t\t\tblockComment = false\n\t\t\t\toutputLines = append(outputLines, match[1])\n\t\t\t} else {\n\t\t\t\t// We are still inside a block comment. The current line is replaced with a blank\n\t\t\t\t// line.\n\t\t\t\toutputLines = append(outputLines, \"\")\n\t\t\t}\n\t\t}\n\t}\n\n\treturn outputLines\n}", "func (s *Srt) DeleteIf(judge func(int, *Subtitle) bool) int {\n\tvar deleted int = 0\n\ts.ForEach(func(i int, sub *Subtitle) bool {\n\t\tif judge(i, sub) {\n\t\t\tsub.MarkAsDeleted()\n\t\t\tdeleted++\n\t\t\ts.count--\n\t\t}\n\t\treturn true\n\t})\n\treturn deleted\n}", "func LicenseFilter(input string) string {\n\tvar index int\n\tbuf := bufio.NewReader(strings.NewReader(input))\n\tfor {\n\t\tline, err := buf.ReadString('\\n')\n\t\tif !strings.HasPrefix(line, \"#\") {\n\t\t\treturn input[index:]\n\t\t}\n\t\tindex += len(line)\n\t\tif err == io.EOF {\n\t\t\treturn input[index:]\n\t\t}\n\t}\n}", "func skipLog(t *testing.T) {\n\t//t.Skip(fmt.Sprintf(\"skipping %s\", r.CurrentFunc(1)))\n\tt.Skip(fmt.Sprintf(\"skipping %s\", r.CurrentFunc(2)))\n}", "func (i *ignore) ignoreBlock(node *parser.Node, from string) {\n\tif _, ok := i.from[from]; ok {\n\t\ti.lines = append(i.lines, model.Range(node.StartLine, node.EndLine)...)\n\t}\n}", "func IgnoringNewLines() StringOpt {\n\treturn func(c *AssertableString) {\n\t\tc.actual = c.actual.AddDecorator(values.RemoveNewLines)\n\t}\n}", "func (t *Tokeniser) ignore() {\n\tt.start = t.pos\n\tt.prevLine = t.line\n\tt.prevCol = t.col\n}", "func removeLineColor(list *tview.List, id int) {\n\tcurrentText, currentSecondary := list.GetItemText(id)\n\tre := regexp.MustCompile(`^\\[[a-zA-Z0-9:-]+\\]`)\n\ttmp := re.ReplaceAllString(currentText, \"${1}\")\n\tlist.SetItemText(id, tmp, currentSecondary)\n\n}", "func Remove(base string) (err error) {\n\thooksFolder := filepath.Join(base, hooksPath())\n\n\t// The hooks folder must exist.\n\texists, err := helper.DoesPathExistErr(hooksFolder)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif !exists {\n\t\treturn fmt.Errorf(\"the hooks folder does not exist\")\n\t}\n\n\tif err = EnsureRemoveComment(preCommitFilepath(base), defaultBashComment); err != nil {\n\t\treturn\n\t}\n\n\tif err = EnsureRemoveComment(postCommitFilepath(base), defaultBashComment); err != nil {\n\t\treturn\n\t}\n\n\tklog.InfoS(\"Removed gogit replace lines\",\n\t\t\"from-pre-commit\", preCommitFilepath(base),\n\t\t\"from-post-commit\", postCommitFilepath(base),\n\t)\n\n\treturn nil\n}" ]
[ "0.54466265", "0.5368636", "0.5209538", "0.51757854", "0.51733", "0.510984", "0.5052854", "0.5028639", "0.501462", "0.49272683", "0.49216142", "0.48630762", "0.4812344", "0.4799068", "0.47983843", "0.4777912", "0.47583553", "0.4754255", "0.46990237", "0.4693532", "0.4676753", "0.46708086", "0.46526894", "0.4607296", "0.46027607", "0.45696995", "0.45658904", "0.4550466", "0.45282844", "0.44956166", "0.44926247", "0.4489812", "0.44890213", "0.44863003", "0.44745353", "0.44739473", "0.44681302", "0.44519737", "0.44515654", "0.44445065", "0.44309145", "0.44272", "0.4421753", "0.44185922", "0.44176376", "0.44167808", "0.44153643", "0.4412703", "0.44118845", "0.43929073", "0.43919885", "0.4390396", "0.43819407", "0.43772304", "0.43725988", "0.4370498", "0.43614924", "0.4360481", "0.4352564", "0.43303472", "0.43252409", "0.43075895", "0.43062708", "0.43062696", "0.4299342", "0.42986587", "0.42957285", "0.42910987", "0.42653322", "0.42637527", "0.42604074", "0.4257323", "0.4255206", "0.42523405", "0.42491895", "0.42428812", "0.4226952", "0.42202178", "0.4218893", "0.4215281", "0.42125076", "0.42067516", "0.4205223", "0.4198501", "0.4198203", "0.41970822", "0.41890562", "0.41890478", "0.41861737", "0.41845137", "0.4183563", "0.41824815", "0.41758198", "0.41681483", "0.41654432", "0.41635564", "0.41577345", "0.4156832", "0.4155295", "0.41534987" ]
0.6194988
0
Func to write string into a file function, with filename param
func WriteToFile(filename string, data string) error { file, err := os.Create(filename) if err != nil { return err } defer file.Close() _, err = io.WriteString(file, data) if err != nil { return err } return file.Sync() }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func FileWriteString(f *os.File, s string) (int, error)", "func _file_write(call otto.FunctionCall) otto.Value {\n\tfilepath, _ := call.Argument(0).ToString()\n\tdata, _ := call.Argument(1).ToString()\n\tif !fileExists(filepath) {\n\t\tos.Create(filepath)\n\t}\n\n\tfile, err := os.OpenFile(filepath, os.O_APPEND|os.O_WRONLY, 0644)\n\tif err != nil {\n\t\tjsThrow(call, err)\n\t}\n\n\tdefer file.Close()\n\n\t_, err = file.WriteString(data)\n\tif err != nil {\n\t\tjsThrow(call, err)\n\t}\n\treturn otto.Value{}\n}", "func StringToFile(fileName string, stringToWrite string) {\n\n\tfmt.Println(\"Writing: \" + fileName)\n\tf, err := os.Create(fileName)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\tn, err := io.WriteString(f, stringToWrite)\n\tif err != nil {\n\t\tfmt.Println(n, err)\n\t}\n\tf.Close()\n}", "func WriteStringToFile(text, path string) error {\n\tf, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, 0644)\n\tdefer f.Close()\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tf.WriteString(text)\n\treturn nil\n}", "func WriteStringToFile(filename string, c []uint8) error {\n\tf, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0755)\n\tCheck(err)\n\tfile, err := f.WriteString(string(c))\n\tCheck(err)\n\tf.Sync()\n\n\tfmt.Printf(\"wrote %d bytes\\n\", file)\n\treturn err\n}", "func write_file(outFile, contents string) error {\n\n\terr := ioutil.WriteFile(outFile, []byte(contents), 0644)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn nil\n}", "func WriteStringToFile(path string, s string) error {\n\treturn WriteToFile(path, strings.NewReader(s))\n}", "func WriteStringToFile(str string, destDir string, resourceName string, suffix string) error {\n\terr := os.MkdirAll(destDir, os.ModePerm)\n\tFatalIf(err)\n\tname := fmt.Sprintf(\"%s_%s\", resourceName, suffix)\n\ttempPattern := fmt.Sprintf(\"_%s_%s\", resourceName, suffix)\n\tglog.V(6).Infof(\"Writing file %s\", name)\n\ttempFile, err := ioutil.TempFile(destDir, tempPattern)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"Error creating temp file in %s for resource %s\",\n\t\t\tdestDir, name)\n\t}\n\tw := bufio.NewWriter(tempFile)\n\t_, err = w.WriteString(str)\n\tif err != nil {\n\t\ttempFile.Close()\n\t\tos.Remove(tempFile.Name())\n\t\treturn errors.Wrapf(err, \"Error writing bytes to file %s\",\n\t\t\ttempFile.Name())\n\t}\n\terr = w.Flush()\n\tif err != nil {\n\t\ttempFile.Close()\n\t\tos.Remove(tempFile.Name())\n\t\treturn errors.Wrapf(err, \"Error flushing buffer\")\n\t}\n\terr = tempFile.Sync()\n\tif err != nil {\n\t\ttempFile.Close()\n\t\tos.Remove(tempFile.Name())\n\t\treturn errors.Wrapf(err, \"Error syncing file\")\n\t}\n\terr = os.Rename(tempFile.Name(), path.Join(destDir, name))\n\tif err != nil {\n\t\ttempFile.Close()\n\t\tos.Remove(tempFile.Name())\n\t\treturn err\n\t}\n\treturn nil\n}", "func writeFile(fileName, message string) {\n\tfile, err := os.Create(fileName)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer file.Close()\n\tfile.WriteString(message)\n}", "func WriteFileString(input string, file string, overwrite bool) error {\r\n\treturn WriteFile([]byte(input), file, overwrite)\r\n}", "func lisp_write_file(filename string, text string) {\n\tfd, err := os.Create(filename)\n\tif (err != nil) {\n\t\tlprint(\"Could not create file %s\", filename)\n\t\treturn\n\t}\n\t_, err = fd.WriteString(text)\n\tif (err != nil) {\n\t\tlprint(\"Could not write string to file %s\", filename)\n\t\treturn\n\t}\n\tfd.Close()\n}", "func (f *Files) WriteFileString(name, data string) error {\n\treturn f.WriteFile(name, []byte(data))\n}", "func (t *testRunner) writeString(file, data string) {\n\tt.Helper()\n\n\tnewf, err := os.CreateTemp(t.dir, \"\")\n\trequire.NoError(t, err)\n\n\t_, err = newf.WriteString(data)\n\trequire.NoError(t, err)\n\trequire.NoError(t, newf.Close())\n\n\terr = os.Rename(newf.Name(), file)\n\trequire.NoError(t, err)\n}", "func writeFile(fileName, data string) error {\n\tf, err := os.Create(fileName)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = f.WriteString(data)\n\tif err != nil {\n\t\treturn err\n\t}\n\tf.Sync()\n\treturn nil\n}", "func (ft *DTDFormatter) writeToFile(filepath string, s string) error {\n\tf, err := os.OpenFile(filepath, os.O_APPEND|os.O_WRONLY, 0700)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\n\t_, err = io.WriteString(f, s)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn f.Sync()\n}", "func appendStringToFile(path, text string) error {\n\tf, err := os.OpenFile(path, os.O_APPEND|os.O_WRONLY, os.ModeAppend)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\n\t_, err = f.WriteString(text)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func writeToFile(destFile *os.File, text string) {\n\n\t_, err2 := destFile.WriteString(text + \"\\n\")\n\n\tif err2 != nil {\n\t\tlog.Fatal(err2)\n\t}\n}", "func writeFile(iptvline string){\n\n\t//check if file exists\n\tif _, err := os.Stat(path); os.IsNotExist(err) {\n\t\t// create file if not exists\n\t\tif os.IsNotExist(err) {\n\t\t\tvar file, err = os.Create(path)\n\t\t\tif isError(err) { return }\n\t\t\tdefer file.Close()\n\t\t}\n\t}\n\n\n\tfileHandle, _ := os.OpenFile(path, os.O_APPEND, 0666)\n\twriter := bufio.NewWriter(fileHandle)\n\tdefer fileHandle.Close()\n\n\tfmt.Fprintln(writer, iptvline)\n\twriter.Flush()\n}", "func writeFile(data string, fileName string) error {\n\tprint(&msg{Message: \"writeFile(): Entering\", LogLevel: \"debug\"})\n\tdefer print(&msg{Message: \"writeFile(): Returning\", LogLevel: \"debug\"})\n\n\tif err := ioutil.WriteFile(fileName, []byte(data), 0600); err != nil {\n\t\treturn fmt.Errorf(\"writeFile(): Couldn't write file %s. Error %s\", fileName, err.Error())\n\t}\n\n\tprint(&msg{Message: fmt.Sprintf(\"writeFile(): Wrote %s\", fileName), LogLevel: \"info\"})\n\treturn nil\n}", "func writeFile(file string, content string) {\n d1 := []byte(content)\n err := ioutil.WriteFile(file, d1, 0644)\n if err!=nil {\n os.Stderr.WriteString(err.Error() + \"\\n\")\n }\n}", "func (s *Spectrum) WriteFile(path string, fmt string, perm os.FileMode) error {\n\tvar strFunc func() string\n\tif fmt == \"ascii\" {\n\t\tstrFunc = s.String\n\t}\n\t// if fmt == \"tsv\" { strFunc = s.TSVString }\n\t// if fmt == \"csv\" { strFunc = s.CSVString }\n\t// if fmt == \"matlab\" { strFunc = s.MATLABString }\n\t// if fmt == \"json\" { strFunc = s.JSONString }\n\treturn ioutil.WriteFile(path, []byte(strFunc()), perm)\n}", "func write(fileName,content string) {\n\tf, err := os.Create(fileName)\n\tif err!=nil{\n\t\tmessage.FormatError(func() {\n\t\t\tpanic(fmt.Sprintf(\"Create %v fail! error:%v\",fileName,err.Error()))\n\t\t})\n\t}\n\n\tw := bufio.NewWriter(f)\n\t_, err = w.WriteString(content)\n\tif err!=nil{\n\t\tmessage.FormatError(func() {\n\t\t\tpanic(fmt.Sprintf(\"Write %v fail! error:%v\",fileName,err.Error()))\n\t\t})\n\t}\n\tw.Flush()\n\tf.Close()\n\n\tmessage.Success(fmt.Sprintf(\"Created %v\",fileName))\n}", "func makeFileString(baseDir, fileName string, s string) string {\n\treturn makeFile(baseDir, fileName, []byte(s))\n}", "func spewFile(path string, data string, perm os.FileMode) {\n\t_ = os.WriteFile(path, []byte(data+\"\\n\"), perm)\n}", "func createStringWriter(jsonOutputPath string) func(string, bool) {\n\t// Open the JSON file we will start writing to\n\tf, err := os.Create(jsonOutputPath)\n\t// Check for err, gracefully error\n\tcheck(err)\n\n\t// Return the function that will be used to write to the JSON file we decalred above\n\treturn func(data string, close bool) {\n\t\t// Write to the JSON file\n\t\t_, err := f.WriteString(data)\n\t\t// Check for error, gracefully handle\n\t\tcheck(err)\n\t\t// If close == true, then there's no more data left to write to close the file\n\t\tif close {\n\t\t\tf.Close()\n\t\t}\n\t}\n}", "func writeToFile(fileName string, content string) error {\n\n\tlog.Printf(\"Writing '%s' into file %s\", content, fileName)\n\n\t// try opening the file\n\tf, err := os.OpenFile(fileName, os.O_WRONLY, os.ModePerm)\n\tif err != nil {\n\t\tlog.Printf(\"Error by opening %s: %v\", fileName, err)\n\t\treturn err\n\t}\n\tdefer f.Close()\n\n\t// write to file\n\t_, err = f.Write([]byte(content))\n\tif err != nil {\n\t\tlog.Printf(\"Error by writing to %s: %v\", fileName, err)\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func FileName(f *os.File,) string", "func WriteTextFile(filePath, contents string) error {\n return WriteBinaryFile(filePath, []byte(contents))\n}", "func writetofile(somethingToWrite string) {\r\n\r\n\t// https://golang.org/pkg/os/#OpenFile\r\n\r\n\t// If the file doesn't exist, create it, or append to the file\r\n\tf, err := os.OpenFile(\"./output.txt\", os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)\r\n\tif err != nil {\r\n\t\tlog.Fatal(err)\r\n\t}\r\n\t//if _, err := f.Write([]byte(\"appended some data\\n\")); err != nil {\r\n\tif _, err := f.Write([]byte(somethingToWrite + \"\\n\")); err != nil {\r\n\t\tlog.Fatal(err)\r\n\t}\r\n\tif err := f.Close(); err != nil {\r\n\t\tlog.Fatal(err)\r\n\t}\r\n}", "func genFile(filePath, fileText string) error {\n\tfw, err := os.OpenFile(filePath, syscall.O_CREAT, 0644)\n\tdefer fw.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = fw.Write([]byte(fileText))\n\treturn err\n}", "func writeFile(contents []byte, filename string) {\n\terr := ioutil.WriteFile(filename, contents, 0644)\n\tcheckIfError(err)\n}", "func writeToFile(fileName, line string) {\n\tf, err := os.OpenFile(fileName, os.O_APPEND|os.O_WRONLY, 0600)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer f.Close()\n\tif _, err = f.WriteString(line); err != nil {\n\t\tpanic(err)\n\t}\n}", "func AppendToFile(newTask, fileName string){\r\n\r\n\tfile, err := os.OpenFile(fileName, os.O_APPEND|os.O_WRONLY, 0644)\r\n if err != nil {\r\n log.Println(err)\r\n }\r\n defer file.Close()\r\n if _, err := file.WriteString(newTask); err != nil {\r\n log.Fatal(err)\r\n }\r\n}", "func generateFromString(fullpath, src string) {\n\tfullpath = filepath.FromSlash(fullpath)\n\tfile, err := os.Create(fullpath)\n\tif err != nil {\n\t\tlog.Fatalf(\"Error: cannot generate file from string %s.\\n\"+\n\t\t\t\"The error is: %v\", fullpath, err)\n\t}\n\n\t_, err = file.WriteString(src)\n\tif err != nil {\n\t\tlog.Fatalf(\"Error: cannot write from string %s.\\n\"+\n\t\t\t\"The error is: %v\", fullpath, err)\n\t}\n\n\tlog.Successf(\"Created %v\\n\", fullpath)\n}", "func writeFile(tmplStr, suffix string, spec *vfsSpec) error {\n\n\terrFmt := \"write file: %v\\n\"\n\n\tif suffix != \"\" {\n\t\tsuffix = \"_\" + suffix\n\t}\n\n\tfilename := fmt.Sprintf(\"%s/%s%s.go\", spec.Package, spec.Package, suffix)\n\n\tout, err := os.Create(filename)\n\tif err != nil {\n\t\treturn fmt.Errorf(errFmt, err)\n\t}\n\tdefer out.Close()\n\n\ttmpl, err := template.New(\"\").Funcs(fnMap).Parse(tmplStr)\n\tif err != nil {\n\t\treturn fmt.Errorf(errFmt, err)\n\t}\n\n\ttmpl, err = tmpl.Parse(publicInterfaceTmplStr)\n\tif err != nil {\n\t\treturn fmt.Errorf(errFmt, err)\n\t}\n\n\tbuf := new(bytes.Buffer)\n\terr = tmpl.Execute(buf, spec)\n\tif err != nil {\n\t\treturn fmt.Errorf(errFmt, err)\n\t}\n\n\tdata, err := format.Source(buf.Bytes())\n\tif err != nil {\n\t\treturn fmt.Errorf(errFmt, err)\n\t}\n\n\tif err := ioutil.WriteFile(filename, data, os.FileMode(0644)); err != nil {\n\t\treturn fmt.Errorf(errFmt, err)\n\t}\n\n\tpwd, err := filepath.Abs(\".\")\n\tif err != nil {\n\t\treturn fmt.Errorf(errFmt, err)\n\t}\n\n\tfmt.Printf(\"ESSENCE: file written: %s/%s\\n\", pwd, filename)\n\n\treturn nil\n}", "func writeToFile(outputDir string, name string, data string, append bool) error {\n\toutfileName := strings.Join([]string{outputDir, name}, string(filepath.Separator))\n\n\terr := ensureDirectoryForFile(outfileName)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tf, err := createOrOpenFile(outfileName, append)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdefer f.Close()\n\n\t_, err = f.WriteString(fmt.Sprintf(\"---\\n# Source: %s\\n%s\\n\", name, data))\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfmt.Printf(\"wrote %s\\n\", outfileName)\n\treturn nil\n}", "func OverwriteString(filename, s string) error {\n\treturn Overwrite(filename, []byte(s))\n}", "func writeFile(w [samp_len]int) {\r\n\r\n var s string = \"\"\r\n var tmp string\r\n\r\n data, err := ioutil.ReadFile(fileName)\r\n check(err)\r\n tmp = string(data)\r\n\r\n for _, value := range w {\r\n s += fmt.Sprintf(\"%d\\n\", value)\r\n }\r\n\r\n tmp = s + tmp\r\n\r\n f, err := os.OpenFile(fileName,os.O_CREATE|os.O_WRONLY, 0755) // Opens file with permission and appends new values\r\n check(err)\r\n defer f.Close()\r\n\r\n _, err = f.WriteString(tmp)\r\n check(err)\r\n\r\n\r\n\r\n}", "func FileWrite(f *os.File, b []byte) (int, error)", "func AppendStringToFile(path, text string) error {\n\tf, err := os.OpenFile(path, os.O_APPEND|os.O_WRONLY, os.ModeAppend)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\n\t_, err = f.WriteString(\"\\n\" + text)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func addToFile(text, path, fileName string) {\n\tfullPath := createFullPath(path, fileName)\n\t//Make sure that the file exists\n\tif !fileExists(path, fileName) {\n\t\tcreateFile(path, fileName)\n\t}\n\tfile, err := os.OpenFile(fullPath, os.O_APPEND, 0666)\n\tcheckError(err)\n\t_, err = file.WriteString(text)\n\tcheckError(err)\n}", "func WriteToFile(f string ,d []byte) {\n err := ioutil.WriteFile(f, d, 0644)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}", "func WriteToFile(filename, content string) {\n\tf, err := os.Create(filename)\n\tMustCheck(err)\n\tdefer CloseFile(f)\n\t_, err = f.WriteString(content)\n\tMustCheck(err)\n}", "func WriteFile(input interface{}, filename string) error {\n\tvar (\n\t\tbytes []byte\n\t\terr error\n\t)\n\tif bytes, err = Write(input); err != nil {\n\t\treturn err\n\t}\n\treturn ioutil.WriteFile(filename, bytes, 0666)\n}", "func writeSourceFile(path string, file sourceFile) error {\n\tbody := strings.Join(file.parts, \"\")\n\treturn ioutil.WriteFile(path, []byte(body), 0666)\n}", "func WriteToFile(fileName, filedata string){\r\n\terr := ioutil.WriteFile(fileName, []byte(filedata), 0644)\r\n\t\tif err != nil {\r\n\t\t\tlog.Fatal(err)\r\n\t\t}\r\n\t\treturn\r\n}", "func writeToFile(name string, data []byte) {\n\tf, err := os.Create(name)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer f.Close()\n\n\t_, err = f.Write(data)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}", "func (c *Collection) writeFile(out string) {\n\tf, _ := os.Create(c.Config.OutputFile)\n\n\tw := bufio.NewWriter(f)\n\tw.WriteString(out)\n\n\tw.Flush()\n}", "func writePlan(planFileName string, details *detect.FunctionDetails) error {\n\tplanFile, err := os.OpenFile(planFileName, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)\n\tif err != nil {\n\t\tlog.Println(\"failed to open the plan file for writing\", os.Args[2], err)\n\t\tprintSupportedFunctionsAndExit()\n\t}\n\tdefer planFile.Close()\n\n\t// Replace the placeholders with valid values\n\treplacedPlan := strings.Replace(string(planFileFormat), \"PACKAGE\", details.Package, 1)\n\treplacedPlan = strings.Replace(replacedPlan, \"HTTP_GO_FUNCTION\", details.Name, 1)\n\tif _, err := planFile.WriteString(replacedPlan); err != nil {\n\t\tprintSupportedFunctionsAndExit()\n\t}\n\treturn nil\n}", "func writeFile(values *[]*model.SampleStream, fileNum uint) error {\n\tif len(*values) == 0 {\n\t\treturn nil\n\t}\n\tfilename := fmt.Sprintf(\"%s.%05d\", *out, fileNum)\n\tvaluesJSON, err := json.Marshal(values)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn ioutil.WriteFile(filename, valuesJSON, 0644)\n}", "func (hook *StackHook) fileWriteRaw(msg string) error {\n\tvar (\n\t\tfd *os.File\n\t\tpath string\n\t\terr error\n\t)\n\n\tpath = hook.path\n\n\t// rotate check\n\tif hook.rotationTime > 0 {\n\t\tnow := time.Now()\n\t\tif now.After(hook.lastRotate.Add(hook.rotationTime)) {\n\t\t\t// rotate now\n\t\t\tif err := os.Rename(path, hook.genFilename()); err != nil {\n\t\t\t\tlog.Println(\"failed to rename logfile:\", path, err)\n\t\t\t\treturn err\n\t\t\t}\n\t\t\thook.lastRotate = now\n\t\t}\n\t}\n\n\tdir := filepath.Dir(path)\n\tos.MkdirAll(dir, os.ModePerm)\n\n\tfd, err = os.OpenFile(path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666)\n\tif err != nil {\n\t\tlog.Println(\"failed to open logfile:\", path, err)\n\t\treturn err\n\t}\n\tdefer fd.Close()\n\n\tfd.Write([]byte(msg))\n\treturn nil\n}", "func WriteString(line string, filename string) error {\n\treturn WriteStrings([]string{line}, filename, \"\")\n}", "func Write(filename, data string) error {\n\tf, err := os.Create(filename)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\t_, err = f.WriteString(data)\n\treturn err\n}", "func writeToFile(output string, data []byte) error {\n\toutFile, err := os.Create(output)\n\tif err != nil {\n\t\treturn err\n\t}\n\tbytesOut, err := outFile.Write(data)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfmt.Fprintf(os.Stderr, \"%d bytes written successfully to `%s`\\n\", bytesOut, output)\n\terr = outFile.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func write_file(m map[string]string, filename string) {\n jsonString, err := json.Marshal(m)\n if err != nil {\n fmt.Println(err)\n }\n\n err = ioutil.WriteFile(filename, jsonString, 0644)\n}", "func writeToFile(payload []byte) {\n\tdir, _ := filepath.Abs(filepath.Dir(os.Args[0]))\n\tfilepath := []string{dir, \"export.json\"}\n\n\tdestination := strings.Join(filepath, \"\\\\\")\n\n\tfmt.Println(\"Writing data to file\", destination)\n\terr := ioutil.WriteFile(destination, payload, 0644)\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfmt.Println(\"Done\")\n}", "func writeFile(dst, content string, c *testing.T) {\n\tc.Helper()\n\t// Create subdirectories if necessary\n\tassert.Assert(c, os.MkdirAll(path.Dir(dst), 0o700) == nil)\n\tf, err := os.OpenFile(dst, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0o700)\n\tassert.NilError(c, err)\n\tdefer f.Close()\n\t// Write content (truncate if it exists)\n\t_, err = io.Copy(f, strings.NewReader(content))\n\tassert.NilError(c, err)\n}", "func ResetString2File(path, content string) error {\n\tfile, err := os.OpenFile(path, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0664)\n\tif err != nil {\n\t\tglog.Errorf(\"Can not open file: %s\", path)\n\t\treturn err\n\t}\n\tdefer file.Close()\n\n\tfile.WriteString(content)\n\treturn nil\n}", "func WriteFile() {\n\tvar text = \"text content\"\n\tvar data = []byte(text)\n\tioutil.WriteFile(\"tmp\", data, 0600)\n}", "func WriteToFile(output string, b bytes.Buffer) error {\n\tf, err := os.Create(output)\n\tb.WriteTo(f)\n\treturn err\n}", "func WriteToFile(fileName string, data string) error {\n\tfile, err := os.Create(fileName)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer file.Close()\n\n\t_, err = io.WriteString(file, data)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn file.Sync()\n}", "func WriteFile(filename string, v interface{}) error {\n\tfile, err := os.Create(filename)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer file.Close()\n\terr = NewEncoder(file).Encode(v)\n\tif err != nil {\n\t\treturn errors.New(\"error while writing \" + filename + \": \" + err.Error())\n\t}\n\treturn nil\n}", "func (realFS) WriteFile(name string, c []byte) error {\n\treturn ioutil.WriteFile(name, c, 0666)\n}", "func writeFile(path string, content []byte) error {\n\treturn ioutil.WriteFile(path, content, 0644)\n}", "func (d deck) saveToFile(fileName string) error {\n\t//folosesc functia din package respectiv, apoi ca al doilea arg\n\t// convertesc in []byte ce rezulta din functia CUSTOM toString()\n\treturn ioutil.WriteFile(fileName, []byte(d.toString()), 0666)\n}", "func writeFile(t *testing.T, fName, desc string, val []byte) (rval bool) {\n\tt.Helper()\n\n\trval = true\n\tvar err error\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tt.Logf(\"\\t: Couldn't write to the %s file\", desc)\n\t\t\tt.Error(\"\\t: \", err)\n\t\t\trval = false\n\t\t}\n\t}()\n\n\tt.Logf(\"Updating/Creating the %s file: %q\", desc, fName)\n\terr = os.WriteFile(fName, val, pBits)\n\tif os.IsNotExist(err) {\n\t\tdir := path.Dir(fName)\n\t\tif dir == \".\" {\n\t\t\treturn\n\t\t}\n\t\terr = os.MkdirAll(dir, dirPBits)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\terr = os.WriteFile(fName, val, pBits)\n\t}\n\treturn\n}", "func writeFile(f *os.File) {\n\tfmt.Println(\"writing\")\n\tfmt.Fprintln(f, \"data\")\n}", "func (fs *Fs) WriteString(file *os.File, string string) (int, error) {\n\treturn file.WriteString(string) // #nosec G304\n}", "func WriteFile(d *defs, fileName string, objName string, bflags attrVal) {\n objType, attrLen := getMaxAttr(objName)\n f, err := os.Create(fileName); if err != nil {\n fmt.Println(err)\n f.Close()\n return\n }\n defer func(){\n err := f.Close(); if err != nil {\n fmt.Println(\"Failed to close file \", err)\n }\n if bflags.Has(\"color\"){\n fmt.Printf(\"%vWrite%v: created a new config file '%v'\\n\",Green,RST,fileName)\n }else {\n fmt.Printf(\"Write: created a new config file '%v'\\n\",fileName)\n }\n }()\n // write nagios objects to a file\n for _, def := range *d {\n formatDef := formatObjDef(def, objType, attrLen)\n f.WriteString(formatDef)\n }\n}", "func WriteFile(scope *Scope, filename tf.Output, contents tf.Output) (o *tf.Operation) {\n\tif scope.Err() != nil {\n\t\treturn\n\t}\n\topspec := tf.OpSpec{\n\t\tType: \"WriteFile\",\n\t\tInput: []tf.Input{\n\t\t\tfilename, contents,\n\t\t},\n\t}\n\treturn scope.AddOperation(opspec)\n}", "func writeFile(filename string, data []byte, perm os.FileMode) error {\n\tf, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_EXCL, perm)\n\tif err != nil {\n\t\treturn err\n\t}\n\tn, err := f.Write(data)\n\tif err == nil && n < len(data) {\n\t\terr = io.ErrShortWrite\n\t}\n\tif err1 := f.Close(); err == nil {\n\t\terr = err1\n\t}\n\treturn err\n}", "func writeToRevisionFile(strSlice []string, filename string) error {\n\tfile, err := os.Create(filename)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer file.Close()\n\tfor idx, str := range strSlice {\n\t\tif idx == len(strSlice)-1 {\n\t\t\t// do not add new line to last line\n\t\t\tfile.WriteString(str)\n\t\t} else {\n\t\t\tfile.WriteString(str + \"\\n\")\n\t\t}\n\t}\n\n\treturn nil\n}", "func writeFile(v string) {\n\t// 打开文件\n\tfilePtr, err := os.OpenFile(\"mqtt.json\", os.O_CREATE|os.O_WRONLY, os.ModePerm)\n\tif err != nil {\n\t\tmqtt.ERROR.Println(err)\n\t}\n\n\tdefer filePtr.Close()\n\n\ttype Data struct {\n\t\tDeviceID string `JSON:\"deviceID\"` //设备id\n\t\tTimestamp string `JSON:\"timestamp\"` //时间戳\n\t\tFields map[string]string `JSON:\"fields\"` //标签\n\t}\n\tvar data Data\n\tif err := json.Unmarshal([]byte(v), &data); err == nil {\n\n\t\t// 创建Json编码器\n\t\tencoder := json.NewEncoder(filePtr)\n\t\terr = encoder.Encode(data)\n\t\tif err != nil {\n\t\t\tmqtt.ERROR.Println(\"writeFile failed\", err.Error())\n\t\t} else {\n\t\t\tmqtt.ERROR.Println(\"writeFile success\")\n\t\t}\n\t} else {\n\t\tmqtt.ERROR.Println(err)\n\t}\n\n}", "func WriteFile(root, fileName, content string) {\n\tfileName, f, err := OpenFile(root, fileName)\n\n\tif err != nil {\n\t\tlogrus.Errorf(\"Failed to write file: %s %v\", fileName, err)\n\t\treturn\n\t}\n\t_, err = f.WriteString(content)\n\tif err != nil {\n\t\tlogrus.Errorf(\"Failed to write content to file, %v\", err)\n\t}\n\t_ = f.Close()\n}", "func (a *Adapter) toFile(message string) {\n\n\tif !a.cfg.File {\n\t\treturn\n\t}\n\n\t_, err := a.lf.WriteString(message + \"\\n\")\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n}", "func writeToFile(level LogLevel, message string, values ...interface{}) {\n\tnow := time.Now().UTC()\n\tmessage = fmt.Sprintf(message, values...)\n\thours, minutes, seconds := now.Clock()\n\tmessage = fmt.Sprintf(\"[%d:%d:%d] {%s} %s\\n\", hours, minutes, seconds, level, message)\n\tsingletonFileLock.Lock()\n\tdefer singletonFileLock.Unlock()\n\t_, _ = singletonLogFile.WriteString(message)\n}", "func (t *testRunner) appendString(file, data string) {\n\tt.Helper()\n\n\tf, err := os.OpenFile(file, os.O_WRONLY|os.O_APPEND, 0)\n\trequire.NoError(t, err)\n\tdefer f.Close()\n\n\t_, err = f.WriteString(data)\n\trequire.NoError(t, err)\n}", "func FileWriter(cfg FileConfig) (Writer, error) {\n\treturn func(msg *message.Message) error {\n\t\t// path + year + month + day + topic\n\t\tdir := path.Join(cfg.Path, getDate(\"2006\"), getDate(\"01\"), getDate(\"02\"), msg.Topic())\n\n\t\terr := os.MkdirAll(dir, os.ModePerm)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// hour + minutes + seconds + prefix + messageId + suffix\n\t\tname := fmt.Sprintf(\"%s-%s\", getDate(\"15.04.05\"), *msg.MessageId)\n\n\t\tf, err := os.Create(path.Join(dir, name))\n\t\tif err != nil {\n\t\t\tif os.IsExist(err) {\n\t\t\t\tclog.Warn(\"Message already registered\")\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t\tdefer f.Close()\n\n\t\tfmt.Fprint(f, *msg.Body)\n\t\treturn nil\n\t}, nil\n}", "func writeFile(filename string, content []byte, mode os.FileMode) error {\n\tif options.dryRun {\n\t\tglog.Infof(\"dry-run: filename: %s, content:\", filename)\n\t\tfmt.Printf(\"%s\\n\", string(content))\n\t\treturn nil\n\t}\n\tglog.V(3).Infof(\"saving the file: %s\", filename)\n\n\terr := ensureDir(filename)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn ioutil.WriteFile(filename, content, mode)\n}", "func writeToken(filePath string, token string) {\n\t// Check if file exists\n\tif _, err := os.Stat(filePath); os.IsNotExist(err) {\n\t\t// Doesn't exist; lets create it\n\t\terr = os.MkdirAll(filepath.Dir(filePath), 0700)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\tb := []byte(token)\n\tif err := ioutil.WriteFile(filePath, b, 0600); err != nil {\n\t\treturn\n\t}\n}", "func WriteFile(fp string, src string) (string, error) {\n\t// Check if fp (filepath) exists or not\n\tnp, err := IsExist(fp)\n\tif err != nil {\n\t\t// If some kinda error\n\t\treturn \"\", err\n\t}\n\n\t// If file does not exist (np == \"\")\n\tif np == \"\" {\n\t\tfpDir, _ := path.Split(fp) // Directory name of fp\n\t\terr := os.MkdirAll(fpDir, os.ModePerm) // Creating directory\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\terr = CopyData(src, fp) // Writing file\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\treturn fp, nil\n\t}\n\n\t// If a file already a file exist then return a \"dup:err\" error\n\treturn \"\", errors.New(\"dup:err\")\n}", "func WriteToFile(fileName string, bytes []byte) error {\n\tf, err := os.Create(fileName)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer func() error {\n\t\terr := f.Close()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}()\n\n\t_, errorWrite := f.Write(bytes)\n\tif errorWrite != nil {\n\t\treturn errorWrite\n\t}\n\n\treturn nil\n}", "func statsToFile(stats string, dir string) error {\n\n\t// Create a directory using current date\n\t// get current date\n\tdt := time.Now()\n\n\t// forge base dir\n\tbasedir := fmt.Sprintf(\"%s/%s\", dir, dt.Format(\"2006-01-02\"))\n\t// create base dir if not exists\n\tif _, err := os.Stat(basedir); os.IsNotExist(err) {\n\t\tos.Mkdir(basedir, 0755)\n\t}\n\n\t// Create file\n\tf, err := os.Create(fmt.Sprintf(\"%s/stats\", basedir))\n\tif err != nil {\n\t\treturn err\n\t}\n\t// Ensure file is closed at the end of the func\n\tdefer f.Close()\n\n\t// Write stuff and return err\n\t_, err = f.WriteString(stats)\n\n\treturn err\n\n}", "func writeToFile(stud Student) {\r\n\tf, err := os.Create(\"./studentinfo.txt\")\r\n\tif err != nil {\r\n\t\tfmt.Println(err)\r\n\t\treturn\r\n\t}\r\n\tl, err := f.WriteString(stud.toString())\r\n\tif err != nil {\r\n\t\tfmt.Println(err)\r\n\t\tf.Close()\r\n\t\treturn\r\n\t}\r\n\tfmt.Println(l, \"info written successfully\")\r\n\terr = f.Close()\r\n\tif err != nil {\r\n\t\tfmt.Println(err)\r\n\t\treturn\r\n\t}\r\n}", "func writeFile(path, contents string) error {\n\tdir := filepath.Dir(path)\n\tif err := os.MkdirAll(dir, 0755); err != nil {\n\t\treturn err\n\t}\n\treturn ioutil.WriteFile(path, []byte(contents), 0644)\n}", "func writeFile(fileName string, data []byte) error {\n\tfile, err := os.Create(fileName)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"File creation failed: %v\", err)\n\t}\n\tdefer file.Close()\n\t_, err = file.Write(data)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error while writting response to file: %v\", err)\n\t}\n\treturn nil\n}", "func createFile( filename string) {\n f, err := os.Create(filename)\n u.ErrNil(err, \"Unable to create file\")\n defer f.Close()\n log.Printf(\"Created %s\\n\", f.Name())\n}", "func replaceFile(filePath, lines string) {\n\tbytesToWrite := []byte(lines) //data written\n\terr := ioutil.WriteFile(filePath, bytesToWrite, 0644) //filename, byte array (binary representation), and 0644 which represents permission number. (0-777) //will create a new text file if that text file does not exist yet\n\tif isError(err) {\n\t\tfmt.Println(\"Error Writing to file:\", filePath, \"=\", err)\n\t\treturn\n\t}\n}", "func overrideFile(filename, newcontent string) {\n\tfile, fileErr := os.Create(filename) //open given file\n\tif fileErr != nil {\n\t\tfmt.Println(fileErr)\n\t}\n\tfile.WriteString(newcontent) //write the new content to the file\n\tfile.Close()\n\treturn\n}", "func (t *Type1) WriteFile(filename string) error {\n\tw, err := os.Create(filename)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer w.Close()\n\treturn t.Write(w)\n}", "func (tf *Temp) WriteString(contents string) (int, error) {\n\ttf.Lock()\n\tdefer tf.Unlock()\n\n\twritten, err := tf.file.WriteString(contents)\n\treturn written, ex.New(err)\n}", "func WriteTemplateToFile(tpl string, config interface{}, writepath string, filemode os.FileMode) error {\n\tvar tplbuffer bytes.Buffer\n\tvar packageTemplate = template.Must(template.New(\"\").Parse(tpl))\n\terr := packageTemplate.Execute(&tplbuffer, config)\n\tif err != nil {\n\t\tlog.Warnf(\"Unable to translate template %q to string using the data %v\", tpl, config)\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(writepath, tplbuffer.Bytes(), filemode)\n\tif err != nil {\n\t\tlog.Warnf(\"Error writing file at %s : %s\", writepath, err)\n\t\treturn err\n\t}\n\treturn nil\n}", "func writeMapFile(filename string, target interface{}) error {\n\tf, err := os.Create(filename)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err = toml.NewEncoder(f).Encode(target); err != nil {\n\t\treturn err\n\t}\n\tif err := f.Close(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func createFile(bytes []byte, filepath string) error {\n\terr := ioutil.WriteFile(filepath, bytes, 0644)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func FileWriteAt(f *os.File, b []byte, off int64) (int, error)", "func writeFile(path string, occ []occurrence) error {\n\t// First write to temp file, then move to final destination.\n\t// Not doing this runs the risk of overwriting an existing\n\t// file with half a file due to a crash.\n\ttmp, err := ioutil.TempFile(filepath.Dir(path), \"annotator\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdefer func() {\n\t\ttmp.Close()\n\t\tif err == nil {\n\t\t\terr = os.Rename(tmp.Name(), path)\n\t\t}\n\t\tif err != nil {\n\t\t\tos.Remove(tmp.Name())\n\t\t}\n\t}()\n\n\tvar w io.WriteCloser = tmp\n\tif filepath.Ext(path) == \".gz\" {\n\t\tw, _ = gzip.NewWriterLevel(tmp, gzip.BestCompression)\n\t\tdefer w.Close()\n\t}\n\n\tfor i := range occ {\n\t\tif err = json.NewEncoder(w).Encode(&occ[i]); err != nil {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn err\n}", "func (r *RealRunner) WriteFile(path, contents string) error {\n\t// Note: os.Create uses mode=0666.\n\treturn ioutil.WriteFile(path, []byte(contents), 0666)\n}", "func WriteNewDataOnFile(path string, data []byte)(err error){\n err = ioutil.WriteFile(path, data, 0644)\n if err != nil {logs.Error(\"Error WriteNewData\"); return err}\n \n return nil\n}", "func generateFile(fileName string, content string) {\n\tif content == \"\" {\n\t\tcontent = \"#empty\"\n\t}\n\tif _, err := os.Stat(fileName); err == nil {\n\t\tfmt.Printf(\"File exists\\n\")\n\t} else {\n\t\td := []byte(content)\n\t\tcheck(ioutil.WriteFile(fileName, d, 0644))\n\t}\n\n}", "func createFile(path string, fileName string, panicTrigger bool, content string) {\n\t_, err := os.Stat(path + \"/\" + fileName)\n\tif os.IsNotExist(err) {\n\t\tfile, err := os.Create(path + \"/\" + fileName)\n\t\tdefer file.Close()\n\t\tif err != nil {\n\t\t\tif panicTrigger {\n\t\t\t\tpanic(err.Error())\n\t\t\t} else {\n\t\t\t\tlog.Println(err.Error())\n\t\t\t}\n\t\t} else {\n\t\t\tfile, err := os.OpenFile(path+\"/\"+fileName, os.O_RDWR, 0644)\n\t\t\tdefer file.Close()\n\t\t\tif err != nil {\n\t\t\t\tif panicTrigger {\n\t\t\t\t\tpanic(err.Error())\n\t\t\t\t} else {\n\t\t\t\t\tlog.Println(err.Error())\n\t\t\t\t}\n\t\t\t}\n\t\t\t_, err = file.WriteString(content)\n\t\t\tif err != nil {\n\t\t\t\tif panicTrigger {\n\t\t\t\t\tpanic(err.Error())\n\t\t\t\t} else {\n\t\t\t\t\tlog.Panicln(err.Error())\n\t\t\t\t}\n\t\t\t}\n\t\t\terr = file.Sync()\n\t\t\tif err != nil {\n\t\t\t\tif panicTrigger {\n\t\t\t\t\tpanic(err.Error())\n\t\t\t\t} else {\n\t\t\t\t\tlog.Panicln(err.Error())\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}" ]
[ "0.7139199", "0.6933927", "0.67930186", "0.67593706", "0.65849555", "0.654152", "0.6540923", "0.6525228", "0.64944446", "0.6472575", "0.645946", "0.6423754", "0.6371545", "0.6369693", "0.6349962", "0.633454", "0.63281596", "0.6317388", "0.62762797", "0.6273116", "0.62696135", "0.62284267", "0.6226549", "0.6217713", "0.6205086", "0.6198021", "0.6194577", "0.61928946", "0.61900055", "0.6186568", "0.6144561", "0.6064273", "0.60588276", "0.6049406", "0.60298955", "0.6014631", "0.6003659", "0.596778", "0.5964908", "0.5917131", "0.5912626", "0.59114045", "0.5907236", "0.58912265", "0.58761126", "0.58749396", "0.58646387", "0.5834653", "0.5829464", "0.57788914", "0.5776426", "0.5773057", "0.5766537", "0.57478577", "0.57460874", "0.573676", "0.5731317", "0.57052255", "0.57011265", "0.56996447", "0.56708854", "0.56705", "0.5666301", "0.56647575", "0.56629", "0.5659526", "0.5657158", "0.5654821", "0.5644076", "0.5640422", "0.5633876", "0.5629093", "0.56255484", "0.56207424", "0.56124336", "0.5608628", "0.55823857", "0.5579103", "0.5575146", "0.5567992", "0.5557486", "0.55510205", "0.5537675", "0.5535046", "0.5532339", "0.5529147", "0.55253315", "0.55121046", "0.55035174", "0.54993093", "0.5496313", "0.549226", "0.5488757", "0.5486278", "0.5483533", "0.54830885", "0.5481856", "0.54776734", "0.54771024", "0.54744923" ]
0.5798089
49
NewWorkload constructs and returns a workload representation from an application reference.
func NewWorkload(cluster *kubernetes.Cluster, app models.AppRef) *Workload { return &Workload{cluster: cluster, app: app} }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewWorkloadDefinition(ctx *pulumi.Context,\n\tname string, args *WorkloadDefinitionArgs, opts ...pulumi.ResourceOption) (*WorkloadDefinition, error) {\n\tif args == nil {\n\t\targs = &WorkloadDefinitionArgs{}\n\t}\n\targs.ApiVersion = pulumi.StringPtr(\"core.oam.dev/v1alpha2\")\n\targs.Kind = pulumi.StringPtr(\"WorkloadDefinition\")\n\tvar resource WorkloadDefinition\n\terr := ctx.RegisterResource(\"kubernetes:core.oam.dev/v1alpha2:WorkloadDefinition\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func CreateWorkload(pce PCE, workload Workload) (Workload, APIResponse, error) {\n\tvar newWL Workload\n\tvar api APIResponse\n\tvar err error\n\n\t// Build the API URL\n\tapiURL, err := url.Parse(\"https://\" + pceSanitization(pce.FQDN) + \":\" + strconv.Itoa(pce.Port) + \"/api/v2/orgs/\" + strconv.Itoa(pce.Org) + \"/workloads\")\n\tif err != nil {\n\t\treturn newWL, api, fmt.Errorf(\"create workload - %s\", err)\n\t}\n\n\t// Call the API\n\tworkloadJSON, err := json.Marshal(workload)\n\tif err != nil {\n\t\treturn newWL, api, fmt.Errorf(\"create workload - %s\", err)\n\t}\n\tapi, err = apicall(\"POST\", apiURL.String(), pce, workloadJSON, false)\n\tif err != nil {\n\t\treturn newWL, api, fmt.Errorf(\"create workload - %s\", err)\n\t}\n\n\t// Marshal JSON\n\tjson.Unmarshal([]byte(api.RespBody), &newWL)\n\n\treturn newWL, api, nil\n}", "func (c *Client) CreateWorkload(ctx context.Context, params *CreateWorkloadInput, optFns ...func(*Options)) (*CreateWorkloadOutput, error) {\n\tif params == nil {\n\t\tparams = &CreateWorkloadInput{}\n\t}\n\n\tresult, metadata, err := c.invokeOperation(ctx, \"CreateWorkload\", params, optFns, addOperationCreateWorkloadMiddlewares)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tout := result.(*CreateWorkloadOutput)\n\tout.ResultMetadata = metadata\n\treturn out, nil\n}", "func (in *Workload) DeepCopy() *Workload {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(Workload)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func New(name string, ts Templates, o ...Option) *xpworkloadv1alpha1.KubernetesApplication {\n\topts := &options{\n\t\tnamespace: corev1.NamespaceDefault,\n\t\tcs: &metav1.LabelSelector{}, // The empty selector selects all clusters.\n\t}\n\n\tfor _, apply := range o {\n\t\tapply(opts)\n\t}\n\n\ta := &xpworkloadv1alpha1.KubernetesApplication{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tNamespace: opts.namespace,\n\t\t\tName: name,\n\t\t\tLabels: opts.labels,\n\t\t\tAnnotations: opts.annotations,\n\t\t\tOwnerReferences: opts.owners,\n\t\t},\n\t\tSpec: xpworkloadv1alpha1.KubernetesApplicationSpec{\n\t\t\tResourceSelector: &metav1.LabelSelector{MatchLabels: opts.labels},\n\t\t\tClusterSelector: opts.cs,\n\t\t\tResourceTemplates: make([]xpworkloadv1alpha1.KubernetesApplicationResourceTemplate, len(ts)),\n\t\t},\n\t\tStatus: xpworkloadv1alpha1.KubernetesApplicationStatus{\n\t\t\tCluster: opts.cluster,\n\t\t},\n\t}\n\n\tfor i, t := range ts {\n\t\tsecrets := opts.secrets.Get(t)\n\t\trt := xpworkloadv1alpha1.KubernetesApplicationResourceTemplate{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t// TODO(negz): Handle the case in which we have templates for\n\t\t\t\t// two resources with the same kind and name but different API\n\t\t\t\t// versions. The below format string will result in a name\n\t\t\t\t// conflict.\n\t\t\t\tName: strings.ToLower(fmt.Sprintf(\"%s-%s-%s\", name, t.GetKind(), t.GetName())),\n\t\t\t\tLabels: opts.labels,\n\t\t\t\tAnnotations: opts.annotations,\n\t\t\t},\n\t\t\tSpec: xpworkloadv1alpha1.KubernetesApplicationResourceSpec{\n\t\t\t\tTemplate: t,\n\t\t\t\tSecrets: make([]corev1.LocalObjectReference, len(secrets)),\n\t\t\t},\n\t\t}\n\n\t\tfor i, name := range secrets {\n\t\t\trt.Spec.Secrets[i] = corev1.LocalObjectReference{Name: name}\n\t\t}\n\n\t\ta.Spec.ResourceTemplates[i] = rt\n\t}\n\n\treturn a\n}", "func (s *service) CreateWorkload(id string, payload io.Reader, contentType string) (string, error) {\n\tprefix := async.StoragePath(s.clusterUID, s.apiName)\n\tlog := s.logger.With(zap.String(\"id\", id), zap.String(\"contentType\", contentType))\n\n\tpayloadPath := async.PayloadPath(prefix, id)\n\tlog.Debug(\"uploading payload\", zap.String(\"path\", payloadPath))\n\tif err := s.storage.Upload(payloadPath, payload, contentType); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tlog.Debug(\"sending message to queue\")\n\tif err := s.queue.SendMessage(id, id); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tstatusPath := fmt.Sprintf(\"%s/%s/status/%s\", prefix, id, async.StatusInQueue)\n\tlog.Debug(fmt.Sprintf(\"setting status to %s\", async.StatusInQueue))\n\tif err := s.storage.Upload(statusPath, strings.NewReader(\"\"), \"text/plain\"); err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn id, nil\n}", "func newWorkloadDeployer(in *WorkloadDeployerInput) (*workloadDeployer, error) {\n\tws, err := workspace.Use(afero.NewOsFs())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefaultSession, err := in.SessionProvider.Default()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"create default: %w\", err)\n\t}\n\tenvSession, err := in.SessionProvider.FromRole(in.Env.ManagerRoleARN, in.Env.Region)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"create env session with region %s: %w\", in.Env.Region, err)\n\t}\n\tdefaultSessEnvRegion, err := in.SessionProvider.DefaultWithRegion(in.Env.Region)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"create default session with region %s: %w\", in.Env.Region, err)\n\t}\n\tresources, err := cloudformation.New(defaultSession, cloudformation.WithProgressTracker(os.Stderr)).GetAppResourcesByRegion(in.App, in.Env.Region)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"get application %s resources from region %s: %w\", in.App.Name, in.Env.Region, err)\n\t}\n\n\tvar addons stackBuilder\n\taddons, err = addon.ParseFromWorkload(in.Name, ws)\n\tif err != nil {\n\t\tvar notFoundErr *addon.ErrAddonsNotFound\n\t\tif !errors.As(err, &notFoundErr) {\n\t\t\treturn nil, fmt.Errorf(\"parse addons stack for workload %s: %w\", in.Name, err)\n\t\t}\n\t\taddons = nil // so that we can check for no addons with nil comparison\n\t}\n\n\trepoName := RepoName(in.App.Name, in.Name)\n\trepository := repository.NewWithURI(\n\t\tecr.New(defaultSessEnvRegion), repoName, resources.RepositoryURLs[in.Name])\n\tstore := config.NewSSMStore(identity.New(defaultSession), ssm.New(defaultSession), aws.StringValue(defaultSession.Config.Region))\n\tenvDescriber, err := describe.NewEnvDescriber(describe.NewEnvDescriberConfig{\n\t\tApp: in.App.Name,\n\t\tEnv: in.Env.Name,\n\t\tConfigStore: store,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tmft, err := envDescriber.Manifest()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"read the manifest used to deploy environment %s: %w\", in.Env.Name, err)\n\t}\n\tenvConfig, err := manifest.UnmarshalEnvironment(mft)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unmarshal the manifest used to deploy environment %s: %w\", in.Env.Name, err)\n\t}\n\n\tcfn := cloudformation.New(envSession, cloudformation.WithProgressTracker(os.Stderr))\n\n\tlabeledTermPrinter := func(fw syncbuffer.FileWriter, bufs []*syncbuffer.LabeledSyncBuffer, opts ...syncbuffer.LabeledTermPrinterOption) LabeledTermPrinter {\n\t\treturn syncbuffer.NewLabeledTermPrinter(fw, bufs, opts...)\n\t}\n\tdocker := dockerengine.New(exec.NewCmd())\n\treturn &workloadDeployer{\n\t\tname: in.Name,\n\t\tapp: in.App,\n\t\tenv: in.Env,\n\t\timage: in.Image,\n\t\tresources: resources,\n\t\tworkspacePath: ws.Path(),\n\t\tfs: afero.NewOsFs(),\n\t\ts3Client: s3.New(envSession),\n\t\taddons: addons,\n\t\trepository: repository,\n\t\tdeployer: cfn,\n\t\ttmplGetter: cfn,\n\t\tendpointGetter: envDescriber,\n\t\tspinner: termprogress.NewSpinner(log.DiagnosticWriter),\n\t\ttemplateFS: template.New(),\n\t\tenvVersionGetter: in.EnvVersionGetter,\n\t\toverrider: in.Overrider,\n\t\tdocker: docker,\n\t\tcustomResources: in.customResources,\n\t\tdefaultSess: defaultSession,\n\t\tdefaultSessWithEnvRegion: defaultSessEnvRegion,\n\t\tenvSess: envSession,\n\t\tstore: store,\n\t\tenvConfig: envConfig,\n\t\tlabeledTermPrinter: labeledTermPrinter,\n\n\t\tmft: in.Mft,\n\t\trawMft: in.RawMft,\n\t}, nil\n}", "func (in *WorkloadSpec) DeepCopy() *WorkloadSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(WorkloadSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func WorkloadNew(homeDirectory string, org string) {\n\n\t// Verify that env vars are set properly and determine the working directory.\n\tdir, err := VerifyEnvironment(homeDirectory, false, false, \"\")\n\tif err != nil {\n\t\tcliutils.Fatal(cliutils.CLI_INPUT_ERROR, \"'%v %v' %v\", WORKLOAD_COMMAND, WORKLOAD_CREATION_COMMAND, err)\n\t}\n\n\tif org == \"\" && os.Getenv(DEVTOOL_HZN_ORG) == \"\" {\n\t\tcliutils.Fatal(cliutils.CLI_INPUT_ERROR, \"'%v %v' must specify either --org or set the %v environment variable.\", WORKLOAD_COMMAND, WORKLOAD_CREATION_COMMAND, DEVTOOL_HZN_ORG)\n\t}\n\n\t// Create the working directory.\n\tif err := CreateWorkingDir(dir); err != nil {\n\t\tcliutils.Fatal(cliutils.CLI_INPUT_ERROR, \"'%v %v' %v\", WORKLOAD_COMMAND, WORKLOAD_CREATION_COMMAND, err)\n\t}\n\n\t// If there are any horizon metadata files already in the directory then we wont create any files.\n\tcmd := fmt.Sprintf(\"%v %v\", WORKLOAD_COMMAND, WORKLOAD_CREATION_COMMAND)\n\tFileNotExist(dir, cmd, USERINPUT_FILE, UserInputExists)\n\tFileNotExist(dir, cmd, WORKLOAD_DEFINITION_FILE, WorkloadDefinitionExists)\n\t//FileNotExist(dir, cmd, DEPENDENCIES_FILE, DependenciesExists)\n\n\tif org == \"\" {\n\t\torg = os.Getenv(DEVTOOL_HZN_ORG)\n\t}\n\n\t// Create the metadata files.\n\tif err := CreateUserInputs(dir, true, false, org); err != nil {\n\t\tcliutils.Fatal(cliutils.CLI_GENERAL_ERROR, \"'%v %v' %v\", WORKLOAD_COMMAND, WORKLOAD_CREATION_COMMAND, err)\n\t} else if err := CreateWorkloadDefinition(dir, org); err != nil {\n\t\tcliutils.Fatal(cliutils.CLI_GENERAL_ERROR, \"'%v %v' %v\", WORKLOAD_COMMAND, WORKLOAD_CREATION_COMMAND, err)\n\t}\n\t// } else if err := CreateDependencies(dir); err != nil {\n\t// \tcliutils.Fatal(cliutils.CLI_GENERAL_ERROR, \"'%v %v' %v\", WORKLOAD_COMMAND, WORKLOAD_CREATION_COMMAND, err)\n\t// }\n\n\tfmt.Printf(\"Created horizon metadata files in %v. Edit these files to define and configure your new %v.\\n\", dir, WORKLOAD_COMMAND)\n\n}", "func (h *H) AddWorkload(name, project string) {\n\th.InstalledWorkloads[name] = project\n}", "func produceWorkloadInfo(opts string) (*pb.WorkloadInfo, error) {\n\tninputs := FlexVolumeInputs{}\n\terr := json.Unmarshal([]byte(opts), &ninputs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\twlInfo := pb.WorkloadInfo{\n\t\tAttrs: &pb.WorkloadInfo_WorkloadAttributes{\n\t\t\tUid: ninputs.UID,\n\t\t\tWorkload: ninputs.Name,\n\t\t\tNamespace: ninputs.Namespace,\n\t\t\tServiceaccount: ninputs.ServiceAccount,\n\t\t},\n\t\tWorkloadpath: ninputs.UID,\n\t}\n\treturn &wlInfo, nil\n}", "func createStartWorkload(vCpus int, memMB int, diskMB int) *payloads.Start {\n\tvar work payloads.Start\n\n\twork.Start.InstanceUUID = \"c73322e8-d5fe-4d57-874c-dcee4fd368cd\"\n\twork.Start.ImageUUID = \"b265f62b-e957-47fd-a0a2-6dc261c7315c\"\n\n\treqVcpus := payloads.RequestedResource{\n\t\tType: \"vcpus\",\n\t\tValue: vCpus,\n\t\tMandatory: true,\n\t}\n\treqMem := payloads.RequestedResource{\n\t\tType: \"mem_mb\",\n\t\tValue: memMB,\n\t\tMandatory: true,\n\t}\n\twork.Start.RequestedResources = append(work.Start.RequestedResources, reqVcpus)\n\twork.Start.RequestedResources = append(work.Start.RequestedResources, reqMem)\n\n\t//TODO: add EstimatedResources\n\n\twork.Start.FWType = payloads.EFI\n\twork.Start.InstancePersistence = payloads.Host\n\n\treturn &work\n}", "func NewWorkloadV1(conn *grpc.ClientConn, logger log.Logger) workload.ServiceWorkloadV1Client {\n\n\tvar lAbortMigrationEndpoint endpoint.Endpoint\n\t{\n\t\tlAbortMigrationEndpoint = grpctransport.NewClient(\n\t\t\tconn,\n\t\t\t\"workload.WorkloadV1\",\n\t\t\t\"AbortMigration\",\n\t\t\tworkload.EncodeGrpcReqWorkload,\n\t\t\tworkload.DecodeGrpcRespWorkload,\n\t\t\t&workload.Workload{},\n\t\t\tgrpctransport.ClientBefore(trace.ToGRPCRequest(logger)),\n\t\t\tgrpctransport.ClientBefore(dummyBefore),\n\t\t).Endpoint()\n\t\tlAbortMigrationEndpoint = trace.ClientEndPoint(\"WorkloadV1:AbortMigration\")(lAbortMigrationEndpoint)\n\t}\n\tvar lAutoAddEndpointEndpoint endpoint.Endpoint\n\t{\n\t\tlAutoAddEndpointEndpoint = grpctransport.NewClient(\n\t\t\tconn,\n\t\t\t\"workload.WorkloadV1\",\n\t\t\t\"AutoAddEndpoint\",\n\t\t\tworkload.EncodeGrpcReqEndpoint,\n\t\t\tworkload.DecodeGrpcRespEndpoint,\n\t\t\t&workload.Endpoint{},\n\t\t\tgrpctransport.ClientBefore(trace.ToGRPCRequest(logger)),\n\t\t\tgrpctransport.ClientBefore(dummyBefore),\n\t\t).Endpoint()\n\t\tlAutoAddEndpointEndpoint = trace.ClientEndPoint(\"WorkloadV1:AutoAddEndpoint\")(lAutoAddEndpointEndpoint)\n\t}\n\tvar lAutoAddWorkloadEndpoint endpoint.Endpoint\n\t{\n\t\tlAutoAddWorkloadEndpoint = grpctransport.NewClient(\n\t\t\tconn,\n\t\t\t\"workload.WorkloadV1\",\n\t\t\t\"AutoAddWorkload\",\n\t\t\tworkload.EncodeGrpcReqWorkload,\n\t\t\tworkload.DecodeGrpcRespWorkload,\n\t\t\t&workload.Workload{},\n\t\t\tgrpctransport.ClientBefore(trace.ToGRPCRequest(logger)),\n\t\t\tgrpctransport.ClientBefore(dummyBefore),\n\t\t).Endpoint()\n\t\tlAutoAddWorkloadEndpoint = trace.ClientEndPoint(\"WorkloadV1:AutoAddWorkload\")(lAutoAddWorkloadEndpoint)\n\t}\n\tvar lAutoDeleteEndpointEndpoint endpoint.Endpoint\n\t{\n\t\tlAutoDeleteEndpointEndpoint = grpctransport.NewClient(\n\t\t\tconn,\n\t\t\t\"workload.WorkloadV1\",\n\t\t\t\"AutoDeleteEndpoint\",\n\t\t\tworkload.EncodeGrpcReqEndpoint,\n\t\t\tworkload.DecodeGrpcRespEndpoint,\n\t\t\t&workload.Endpoint{},\n\t\t\tgrpctransport.ClientBefore(trace.ToGRPCRequest(logger)),\n\t\t\tgrpctransport.ClientBefore(dummyBefore),\n\t\t).Endpoint()\n\t\tlAutoDeleteEndpointEndpoint = trace.ClientEndPoint(\"WorkloadV1:AutoDeleteEndpoint\")(lAutoDeleteEndpointEndpoint)\n\t}\n\tvar lAutoDeleteWorkloadEndpoint endpoint.Endpoint\n\t{\n\t\tlAutoDeleteWorkloadEndpoint = grpctransport.NewClient(\n\t\t\tconn,\n\t\t\t\"workload.WorkloadV1\",\n\t\t\t\"AutoDeleteWorkload\",\n\t\t\tworkload.EncodeGrpcReqWorkload,\n\t\t\tworkload.DecodeGrpcRespWorkload,\n\t\t\t&workload.Workload{},\n\t\t\tgrpctransport.ClientBefore(trace.ToGRPCRequest(logger)),\n\t\t\tgrpctransport.ClientBefore(dummyBefore),\n\t\t).Endpoint()\n\t\tlAutoDeleteWorkloadEndpoint = trace.ClientEndPoint(\"WorkloadV1:AutoDeleteWorkload\")(lAutoDeleteWorkloadEndpoint)\n\t}\n\tvar lAutoGetEndpointEndpoint endpoint.Endpoint\n\t{\n\t\tlAutoGetEndpointEndpoint = grpctransport.NewClient(\n\t\t\tconn,\n\t\t\t\"workload.WorkloadV1\",\n\t\t\t\"AutoGetEndpoint\",\n\t\t\tworkload.EncodeGrpcReqEndpoint,\n\t\t\tworkload.DecodeGrpcRespEndpoint,\n\t\t\t&workload.Endpoint{},\n\t\t\tgrpctransport.ClientBefore(trace.ToGRPCRequest(logger)),\n\t\t\tgrpctransport.ClientBefore(dummyBefore),\n\t\t).Endpoint()\n\t\tlAutoGetEndpointEndpoint = trace.ClientEndPoint(\"WorkloadV1:AutoGetEndpoint\")(lAutoGetEndpointEndpoint)\n\t}\n\tvar lAutoGetWorkloadEndpoint endpoint.Endpoint\n\t{\n\t\tlAutoGetWorkloadEndpoint = grpctransport.NewClient(\n\t\t\tconn,\n\t\t\t\"workload.WorkloadV1\",\n\t\t\t\"AutoGetWorkload\",\n\t\t\tworkload.EncodeGrpcReqWorkload,\n\t\t\tworkload.DecodeGrpcRespWorkload,\n\t\t\t&workload.Workload{},\n\t\t\tgrpctransport.ClientBefore(trace.ToGRPCRequest(logger)),\n\t\t\tgrpctransport.ClientBefore(dummyBefore),\n\t\t).Endpoint()\n\t\tlAutoGetWorkloadEndpoint = trace.ClientEndPoint(\"WorkloadV1:AutoGetWorkload\")(lAutoGetWorkloadEndpoint)\n\t}\n\tvar lAutoLabelEndpointEndpoint endpoint.Endpoint\n\t{\n\t\tlAutoLabelEndpointEndpoint = grpctransport.NewClient(\n\t\t\tconn,\n\t\t\t\"workload.WorkloadV1\",\n\t\t\t\"AutoLabelEndpoint\",\n\t\t\tworkload.EncodeGrpcReqLabel,\n\t\t\tworkload.DecodeGrpcRespEndpoint,\n\t\t\t&workload.Endpoint{},\n\t\t\tgrpctransport.ClientBefore(trace.ToGRPCRequest(logger)),\n\t\t\tgrpctransport.ClientBefore(dummyBefore),\n\t\t).Endpoint()\n\t\tlAutoLabelEndpointEndpoint = trace.ClientEndPoint(\"WorkloadV1:AutoLabelEndpoint\")(lAutoLabelEndpointEndpoint)\n\t}\n\tvar lAutoLabelWorkloadEndpoint endpoint.Endpoint\n\t{\n\t\tlAutoLabelWorkloadEndpoint = grpctransport.NewClient(\n\t\t\tconn,\n\t\t\t\"workload.WorkloadV1\",\n\t\t\t\"AutoLabelWorkload\",\n\t\t\tworkload.EncodeGrpcReqLabel,\n\t\t\tworkload.DecodeGrpcRespWorkload,\n\t\t\t&workload.Workload{},\n\t\t\tgrpctransport.ClientBefore(trace.ToGRPCRequest(logger)),\n\t\t\tgrpctransport.ClientBefore(dummyBefore),\n\t\t).Endpoint()\n\t\tlAutoLabelWorkloadEndpoint = trace.ClientEndPoint(\"WorkloadV1:AutoLabelWorkload\")(lAutoLabelWorkloadEndpoint)\n\t}\n\tvar lAutoListEndpointEndpoint endpoint.Endpoint\n\t{\n\t\tlAutoListEndpointEndpoint = grpctransport.NewClient(\n\t\t\tconn,\n\t\t\t\"workload.WorkloadV1\",\n\t\t\t\"AutoListEndpoint\",\n\t\t\tworkload.EncodeGrpcReqListWatchOptions,\n\t\t\tworkload.DecodeGrpcRespEndpointList,\n\t\t\t&workload.EndpointList{},\n\t\t\tgrpctransport.ClientBefore(trace.ToGRPCRequest(logger)),\n\t\t\tgrpctransport.ClientBefore(dummyBefore),\n\t\t).Endpoint()\n\t\tlAutoListEndpointEndpoint = trace.ClientEndPoint(\"WorkloadV1:AutoListEndpoint\")(lAutoListEndpointEndpoint)\n\t}\n\tvar lAutoListWorkloadEndpoint endpoint.Endpoint\n\t{\n\t\tlAutoListWorkloadEndpoint = grpctransport.NewClient(\n\t\t\tconn,\n\t\t\t\"workload.WorkloadV1\",\n\t\t\t\"AutoListWorkload\",\n\t\t\tworkload.EncodeGrpcReqListWatchOptions,\n\t\t\tworkload.DecodeGrpcRespWorkloadList,\n\t\t\t&workload.WorkloadList{},\n\t\t\tgrpctransport.ClientBefore(trace.ToGRPCRequest(logger)),\n\t\t\tgrpctransport.ClientBefore(dummyBefore),\n\t\t).Endpoint()\n\t\tlAutoListWorkloadEndpoint = trace.ClientEndPoint(\"WorkloadV1:AutoListWorkload\")(lAutoListWorkloadEndpoint)\n\t}\n\tvar lAutoUpdateEndpointEndpoint endpoint.Endpoint\n\t{\n\t\tlAutoUpdateEndpointEndpoint = grpctransport.NewClient(\n\t\t\tconn,\n\t\t\t\"workload.WorkloadV1\",\n\t\t\t\"AutoUpdateEndpoint\",\n\t\t\tworkload.EncodeGrpcReqEndpoint,\n\t\t\tworkload.DecodeGrpcRespEndpoint,\n\t\t\t&workload.Endpoint{},\n\t\t\tgrpctransport.ClientBefore(trace.ToGRPCRequest(logger)),\n\t\t\tgrpctransport.ClientBefore(dummyBefore),\n\t\t).Endpoint()\n\t\tlAutoUpdateEndpointEndpoint = trace.ClientEndPoint(\"WorkloadV1:AutoUpdateEndpoint\")(lAutoUpdateEndpointEndpoint)\n\t}\n\tvar lAutoUpdateWorkloadEndpoint endpoint.Endpoint\n\t{\n\t\tlAutoUpdateWorkloadEndpoint = grpctransport.NewClient(\n\t\t\tconn,\n\t\t\t\"workload.WorkloadV1\",\n\t\t\t\"AutoUpdateWorkload\",\n\t\t\tworkload.EncodeGrpcReqWorkload,\n\t\t\tworkload.DecodeGrpcRespWorkload,\n\t\t\t&workload.Workload{},\n\t\t\tgrpctransport.ClientBefore(trace.ToGRPCRequest(logger)),\n\t\t\tgrpctransport.ClientBefore(dummyBefore),\n\t\t).Endpoint()\n\t\tlAutoUpdateWorkloadEndpoint = trace.ClientEndPoint(\"WorkloadV1:AutoUpdateWorkload\")(lAutoUpdateWorkloadEndpoint)\n\t}\n\tvar lFinalSyncMigrationEndpoint endpoint.Endpoint\n\t{\n\t\tlFinalSyncMigrationEndpoint = grpctransport.NewClient(\n\t\t\tconn,\n\t\t\t\"workload.WorkloadV1\",\n\t\t\t\"FinalSyncMigration\",\n\t\t\tworkload.EncodeGrpcReqWorkload,\n\t\t\tworkload.DecodeGrpcRespWorkload,\n\t\t\t&workload.Workload{},\n\t\t\tgrpctransport.ClientBefore(trace.ToGRPCRequest(logger)),\n\t\t\tgrpctransport.ClientBefore(dummyBefore),\n\t\t).Endpoint()\n\t\tlFinalSyncMigrationEndpoint = trace.ClientEndPoint(\"WorkloadV1:FinalSyncMigration\")(lFinalSyncMigrationEndpoint)\n\t}\n\tvar lFinishMigrationEndpoint endpoint.Endpoint\n\t{\n\t\tlFinishMigrationEndpoint = grpctransport.NewClient(\n\t\t\tconn,\n\t\t\t\"workload.WorkloadV1\",\n\t\t\t\"FinishMigration\",\n\t\t\tworkload.EncodeGrpcReqWorkload,\n\t\t\tworkload.DecodeGrpcRespWorkload,\n\t\t\t&workload.Workload{},\n\t\t\tgrpctransport.ClientBefore(trace.ToGRPCRequest(logger)),\n\t\t\tgrpctransport.ClientBefore(dummyBefore),\n\t\t).Endpoint()\n\t\tlFinishMigrationEndpoint = trace.ClientEndPoint(\"WorkloadV1:FinishMigration\")(lFinishMigrationEndpoint)\n\t}\n\tvar lStartMigrationEndpoint endpoint.Endpoint\n\t{\n\t\tlStartMigrationEndpoint = grpctransport.NewClient(\n\t\t\tconn,\n\t\t\t\"workload.WorkloadV1\",\n\t\t\t\"StartMigration\",\n\t\t\tworkload.EncodeGrpcReqWorkload,\n\t\t\tworkload.DecodeGrpcRespWorkload,\n\t\t\t&workload.Workload{},\n\t\t\tgrpctransport.ClientBefore(trace.ToGRPCRequest(logger)),\n\t\t\tgrpctransport.ClientBefore(dummyBefore),\n\t\t).Endpoint()\n\t\tlStartMigrationEndpoint = trace.ClientEndPoint(\"WorkloadV1:StartMigration\")(lStartMigrationEndpoint)\n\t}\n\treturn workload.EndpointsWorkloadV1Client{\n\t\tClient: workload.NewWorkloadV1Client(conn),\n\n\t\tAbortMigrationEndpoint: lAbortMigrationEndpoint,\n\t\tAutoAddEndpointEndpoint: lAutoAddEndpointEndpoint,\n\t\tAutoAddWorkloadEndpoint: lAutoAddWorkloadEndpoint,\n\t\tAutoDeleteEndpointEndpoint: lAutoDeleteEndpointEndpoint,\n\t\tAutoDeleteWorkloadEndpoint: lAutoDeleteWorkloadEndpoint,\n\t\tAutoGetEndpointEndpoint: lAutoGetEndpointEndpoint,\n\t\tAutoGetWorkloadEndpoint: lAutoGetWorkloadEndpoint,\n\t\tAutoLabelEndpointEndpoint: lAutoLabelEndpointEndpoint,\n\t\tAutoLabelWorkloadEndpoint: lAutoLabelWorkloadEndpoint,\n\t\tAutoListEndpointEndpoint: lAutoListEndpointEndpoint,\n\t\tAutoListWorkloadEndpoint: lAutoListWorkloadEndpoint,\n\t\tAutoUpdateEndpointEndpoint: lAutoUpdateEndpointEndpoint,\n\t\tAutoUpdateWorkloadEndpoint: lAutoUpdateWorkloadEndpoint,\n\t\tFinalSyncMigrationEndpoint: lFinalSyncMigrationEndpoint,\n\t\tFinishMigrationEndpoint: lFinishMigrationEndpoint,\n\t\tStartMigrationEndpoint: lStartMigrationEndpoint,\n\t}\n}", "func NewWorkbookOperation()(*WorkbookOperation) {\n m := &WorkbookOperation{\n Entity: *NewEntity(),\n }\n return m\n}", "func (conf *Config) ApplyWorkload(client *pilosa.Client, wl *workloadSpec) (err error) {\n\tif conf.Time {\n\t\tbefore := time.Now()\n\t\tfmt.Printf(\" beginning workload %s\\n\", wl.Name)\n\t\tdefer func() {\n\t\t\tafter := time.Now()\n\t\t\tvar completed = \"completed\"\n\t\t\tif err != nil {\n\t\t\t\tcompleted = \"failed\"\n\t\t\t}\n\t\t\tfmt.Printf(\" workload %s %s in %v\\n\", wl.Name, completed, after.Sub(before))\n\t\t}()\n\t}\n\terr = conf.ApplyTasks(client, wl.Tasks)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (in *PodSpecWorkload) DeepCopy() *PodSpecWorkload {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(PodSpecWorkload)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (r *Finder) Workload(ref *base.Ref) (object interface{}, err error) {\n\tworkload := &Workload{}\n\terr = r.ByRef(workload, *ref)\n\tif err == nil {\n\t\tref.ID = workload.ID\n\t\tref.Name = workload.Name\n\t\tobject = workload\n\t}\n\n\treturn\n}", "func NewWorkloadAgent() *cobra.Command {\n\to := spoke.NewWorkloadAgentOptions()\n\tcmd := controllercmd.\n\t\tNewControllerCommandConfig(\"work-agent\", version.Get(), o.RunWorkloadAgent).\n\t\tNewCommand()\n\tcmd.Use = \"agent\"\n\tcmd.Short = \"Start the Cluster Registration Agent\"\n\n\to.AddFlags(cmd)\n\treturn cmd\n}", "func newTaskBuilder(b *jobBuilder, name string) *taskBuilder {\n\tparts, err := b.jobNameSchema.ParseJobName(name)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn &taskBuilder{\n\t\tjobBuilder: b,\n\t\tparts: parts,\n\t\tName: name,\n\t\tSpec: &specs.TaskSpec{},\n\t\trecipeProperties: map[string]string{},\n\t}\n}", "func NewApplication(ctx context.Context, flags *cliFlags) (*application, func(), error) {\n\twire.Build(\n\t\twire.FieldsOf(new(*cliFlags), \"Log\", \"Census\", \"MySQL\", \"Event\", \"Orc8r\"),\n\t\tlog.Provider,\n\t\tnewApplication,\n\t\tnewTenancy,\n\t\tnewHealthChecks,\n\t\tnewMySQLTenancy,\n\t\tnewAuthURL,\n\t\tmysql.Open,\n\t\tevent.Set,\n\t\tgraphhttp.NewServer,\n\t\twire.Struct(new(graphhttp.Config), \"*\"),\n\t\tgraphgrpc.NewServer,\n\t\twire.Struct(new(graphgrpc.Config), \"*\"),\n\t)\n\treturn nil, nil, nil\n}", "func (in *WorkloadBuild) DeepCopy() *WorkloadBuild {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(WorkloadBuild)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func NewApplication(ctx *pulumi.Context,\n\tname string, args *ApplicationArgs, opts ...pulumi.ResourceOption) (*Application, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.RuntimeEnvironment == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'RuntimeEnvironment'\")\n\t}\n\tif args.ServiceExecutionRole == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'ServiceExecutionRole'\")\n\t}\n\tvar resource Application\n\terr := ctx.RegisterResource(\"aws:kinesisanalyticsv2/application:Application\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func New(config config.Config) Workloads {\n\treturn Workloads{\n\t\tclient: http.NewClient(config),\n\t\tlogger: config.GetLogger(),\n\t}\n}", "func New() (interfaces.AsyncWork, error) {\n\treturn NewWithContext(context.Background())\n}", "func (c mockK8sClient) GetWorkload(args args.KubernetesArgs) (*kubernetes.Workload, error) {\n\treturn c.GetWorkloadNoError(args), nil\n}", "func NewWork(b *Block) *ProofOfWork {\n\tpow := &ProofOfWork{b}\n\treturn pow\n}", "func GetWorkloadDefinition(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *WorkloadDefinitionState, opts ...pulumi.ResourceOption) (*WorkloadDefinition, error) {\n\tvar resource WorkloadDefinition\n\terr := ctx.ReadResource(\"kubernetes:core.oam.dev/v1alpha2:WorkloadDefinition\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func NewLongRunningOperation()(*LongRunningOperation) {\n m := &LongRunningOperation{\n Entity: *NewEntity(),\n }\n return m\n}", "func NewWorkloadV1Backend(conn *grpc.ClientConn, logger log.Logger) workload.ServiceWorkloadV1Client {\n\tcl := NewWorkloadV1(conn, logger)\n\tcl = workload.LoggingWorkloadV1MiddlewareClient(logger)(cl)\n\treturn cl\n}", "func NewWorkloadControllerFactory(ctx context.Context, client client.Client, rolloutSpec *v1alpha1.RolloutPlan,\n\ttargetWorkload, sourceWorkload *unstructured.Unstructured) *WorkloadControllerFactory {\n\treturn &WorkloadControllerFactory{\n\t\tclient: client,\n\t\trolloutSpec: rolloutSpec,\n\t\ttargetWorkload: targetWorkload,\n\t\tsourceWorkload: sourceWorkload,\n\t}\n}", "func (c mockK8sClient) GetWorkloadNoError(args args.KubernetesArgs) *kubernetes.Workload {\n\treturn &kubernetes.Workload{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: args.Name,\n\t\t\tNamespace: args.Namespace,\n\t\t},\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tKind: args.Type,\n\t\t},\n\t\tFriendlyName: fmt.Sprintf(\"%s/%s\", strings.ToLower(args.Type), args.Name),\n\t\tPodSelector: &metav1.LabelSelector{\n\t\t\tMatchLabels: map[string]string{\n\t\t\t\t\"release\": args.Name,\n\t\t\t},\n\t\t},\n\t\tPodTemplateSpec: &corev1.PodTemplateSpec{\n\t\t\tSpec: corev1.PodSpec{},\n\t\t},\n\t}\n}", "func NewApplication()(*Application) {\n m := &Application{\n DirectoryObject: *NewDirectoryObject(),\n }\n odataTypeValue := \"#microsoft.graph.application\";\n m.SetOdataType(&odataTypeValue);\n return m\n}", "func NewApplication(ctx *pulumi.Context,\n\tname string, args *ApplicationArgs, opts ...pulumi.ResourceOption) (*Application, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.ResourceGroupName == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'ResourceGroupName'\")\n\t}\n\taliases := pulumi.Aliases([]pulumi.Alias{\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:servicefabricmesh/v20180701preview:Application\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:servicefabricmesh:Application\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:servicefabricmesh:Application\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:servicefabricmesh/v20180901preview:Application\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:servicefabricmesh/v20180901preview:Application\"),\n\t\t},\n\t})\n\topts = append(opts, aliases)\n\tvar resource Application\n\terr := ctx.RegisterResource(\"azure-native:servicefabricmesh/v20180701preview:Application\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func New(descr string) App {\n\treturn &app{descr: descr}\n}", "func New(\n\tclient client.Client,\n\ttaskQueue string,\n\toptions Options,\n) Worker {\n\treturn internal.NewWorker(client, taskQueue, options)\n}", "func NewSynchronizationJob()(*SynchronizationJob) {\n m := &SynchronizationJob{\n Entity: *NewEntity(),\n }\n return m\n}", "func (in *PodSpecWorkloadSpec) DeepCopy() *PodSpecWorkloadSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(PodSpecWorkloadSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func newWorkloadAPIClient(agentAddress string, timeout time.Duration) workload.X509Client {\n\taddr := &net.UnixAddr{\n\t\tNet: \"unix\",\n\t\tName: agentAddress,\n\t}\n\tconfig := &workload.X509ClientConfig{\n\t\tAddr: addr,\n\t\tTimeout: timeout,\n\t}\n\treturn workload.NewX509Client(config)\n}", "func CreateWorkloadconfig(cfg *WorkloadConfig,\n\texistingDevice *persistence.ExchangeDevice,\n\terrorhandler ErrorHandler,\n\tgetWorkload exchange.WorkloadHandler,\n\tdb *bolt.DB) (bool, *persistence.WorkloadConfig) {\n\n\tglog.V(5).Infof(apiLogString(fmt.Sprintf(\"WorkloadConfig POST input: %v\", cfg)))\n\n\t// If the device is already set to use the service model, then return an error.\n\tif existingDevice.IsServiceBased() {\n\t\treturn errorhandler(NewAPIUserInputError(\"The node is configured to use services, cannot configure a workload.\", \"workload\")), nil\n\t}\n\n\t// Validate the input strings. The variables map can be empty if the device owner wants\n\t// the workload to use all default values, so we wont validate that map.\n\tif cfg.WorkloadURL == \"\" {\n\t\treturn errorhandler(NewAPIUserInputError(\"not specified\", \"workload_url\")), nil\n\t}\n\n\t// If version is omitted, the default is all versions.\n\tif cfg.Version == \"\" {\n\t\tcfg.Version = \"0.0.0\"\n\t}\n\n\tif !policy.IsVersionString(cfg.Version) && !policy.IsVersionExpression(cfg.Version) {\n\t\treturn errorhandler(NewAPIUserInputError(fmt.Sprintf(\"workload_version %v is not a valid version string or expression\", cfg.Version), \"workload_version\")), nil\n\t}\n\n\t// Convert the input version to a full version expression if it is not already a full expression.\n\tvExp, verr := policy.Version_Expression_Factory(cfg.Version)\n\tif verr != nil {\n\t\treturn errorhandler(NewAPIUserInputError(fmt.Sprintf(\"workload_version %v error converting to full version expression, error: %v\", cfg.Version, verr), \"workload_version\")), nil\n\t}\n\n\t// Use the device org if not explicitly specified. We cant verify whether or not the org exists because the node\n\t// we are running on might not have authority to read other orgs in the exchange.\n\torg := cfg.Org\n\tif cfg.Org == \"\" {\n\t\torg = existingDevice.Org\n\t}\n\n\t// Reject the POST if there is already a config for this workload and version range\n\texistingCfg, err := persistence.FindWorkloadConfig(db, cfg.WorkloadURL, org, vExp.Get_expression())\n\tif err != nil {\n\t\treturn errorhandler(NewSystemError(fmt.Sprintf(\"Unable to read workloadconfig object, error %v\", err))), nil\n\t} else if existingCfg != nil {\n\t\treturn errorhandler(NewConflictError(\"workloadconfig already exists\")), nil\n\t}\n\n\t// Get the workload metadata from the exchange\n\tworkloadDef, _, err := getWorkload(cfg.WorkloadURL, org, vExp.Get_expression(), cutil.ArchString())\n\tif err != nil || workloadDef == nil {\n\t\treturn errorhandler(NewAPIUserInputError(fmt.Sprintf(\"unable to find the workload definition using %v %v %v %v in the exchange.\", cfg.WorkloadURL, org, vExp.Get_expression(), cutil.ArchString()), \"workload_url\")), nil\n\t}\n\n\t// Only the UserInputAttribute is supported. It must include a value for all non-default userInputs in the workload definition.\n\tworkloadAttributeVerifier := func(attr persistence.Attribute) (bool, error) {\n\n\t\t// Verfiy that all non-defaulted userInput variables in the workload definition are specified in a mapped attribute\n\t\t// of this service invocation.\n\t\tif attr.GetMeta().Type == \"UserInputAttributes\" {\n\n\t\t\t// Loop through each input variable and verify that it is defined in the workload's user input section, and that the\n\t\t\t// type matches.\n\t\t\tfor varName, varValue := range attr.GetGenericMappings() {\n\t\t\t\tglog.V(5).Infof(apiLogString(fmt.Sprintf(\"WorkloadConfig checking input variable: %v\", varName)))\n\t\t\t\tif ui := workloadDef.GetUserInputName(varName); ui != nil {\n\t\t\t\t\tif err := cutil.VerifyWorkloadVarTypes(varValue, ui.Type); err != nil {\n\t\t\t\t\t\treturn errorhandler(NewAPIUserInputError(fmt.Sprintf(\"WorkloadConfig variable %v is %v\", varName, err), \"variables\")), nil\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\treturn errorhandler(NewAPIUserInputError(fmt.Sprintf(\"unable to find the workload config variable %v in workload definition %v %v %v %v\", varName, cfg.WorkloadURL, org, vExp.Get_expression(), cutil.ArchString()), \"variables\")), nil\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Loop through each userInput variable in the workload definition to make sure variables without default values have been set.\n\t\t\tfor _, ui := range workloadDef.UserInputs {\n\t\t\t\tglog.V(5).Infof(apiLogString(fmt.Sprintf(\"WorkloadConfig checking workload userInput: %v\", ui)))\n\t\t\t\tif _, ok := attr.GetGenericMappings()[ui.Name]; !ok && ui.DefaultValue == \"\" {\n\t\t\t\t\t// User Input variable is not defined in the workload config request and doesnt have a default, that's a problem.\n\t\t\t\t\treturn errorhandler(NewAPIUserInputError(fmt.Sprintf(\"WorkloadConfig does not set %v, which has no default value\", ui.Name), \"variables\")), nil\n\t\t\t\t}\n\t\t\t}\n\n\t\t} else {\n\t\t\treturn errorhandler(NewAPIUserInputError(fmt.Sprintf(\"attribute %v is not supported on workload/config\", attr.GetMeta().Type), \"workload.[attribute]\")), nil\n\t\t}\n\n\t\treturn false, nil\n\t}\n\n\t// Verify the input attributes and convert to persistent attributes. If a UserInputAttribute is not returned for workloads that need user input,\n\t// there is a problem.\n\tvar attributes []persistence.Attribute\n\tvar inputErrWritten bool\n\n\tattributes, inputErrWritten, err = toPersistedAttributes(errorhandler, false, existingDevice, cfg.Attributes, []AttributeVerifier{workloadAttributeVerifier})\n\tif !inputErrWritten && err != nil {\n\t\treturn errorhandler(NewSystemError(fmt.Sprintf(\"Failure validating attributes: %v\", err))), nil\n\t} else if inputErrWritten {\n\t\treturn true, nil\n\t} else if workloadDef.NeedsUserInput() {\n\t\tuia := attributesContains(attributes, \"\", \"UserInputAttributes\")\n\t\tif uia == nil {\n\t\t\treturn errorhandler(NewAPIUserInputError(fmt.Sprintf(\"workload requires userInput variables to be set, but there are no UserInputAttributes\"), \"workload.[attribute].UserInputAttributes\")), nil\n\t\t}\n\t}\n\n\t// Persist the workload configuration to the database\n\tglog.V(5).Infof(apiLogString(fmt.Sprintf(\"WorkloadConfig persisting variables: %v (%T)\", attributes, attributes)))\n\n\twc, err := persistence.NewWorkloadConfig(db, cfg.WorkloadURL, org, vExp.Get_expression(), attributes)\n\tif err != nil {\n\t\tglog.Error(apiLogString(err))\n\t\treturn errorhandler(NewSystemError(fmt.Sprintf(\"Unable to save workloadconfig object, error: %v\", err))), nil\n\t}\n\n\t// Indicate that this node is workload/microservice based.\n\tif _, err := existingDevice.SetWorkloadBased(db); err != nil {\n\t\treturn errorhandler(NewSystemError(fmt.Sprintf(\"Error setting workload mode on device object: %v\", err))), nil\n\t}\n\n\treturn false, wc\n}", "func NewProgramControl()(*ProgramControl) {\n m := &ProgramControl{\n Entity: *NewEntity(),\n }\n return m\n}", "func NewWorkbook()(*Workbook) {\n m := &Workbook{\n Entity: *NewEntity(),\n }\n return m\n}", "func newApp(infile, outfile string) *App {\n\treturn &App{\n\t\tAddressFile: infile,\n\t\tGeoDecodeFile: outfile,\n\t\tClient: &http.Client{},\n\t}\n}", "func NewRef(ws string, path string, task string) Ref {\n\trefStr := path\n\n\tif ws != \"\" {\n\t\trefStr = ws + \":\" + path\n\t}\n\n\tif task != \"\" {\n\t\trefStr = refStr + \":\" + task\n\t}\n\n\treturn Ref(refStr)\n}", "func (r workloadEndpoints) Create(ctx context.Context, res *apiv3.WorkloadEndpoint, opts options.SetOptions) (*apiv3.WorkloadEndpoint, error) {\r\n\tif res != nil {\r\n\t\t// Since we're about to default some fields, take a (shallow) copy of the input data\r\n\t\t// before we do so.\r\n\t\tresCopy := *res\r\n\t\tres = &resCopy\r\n\t}\r\n\tif err := r.assignOrValidateName(res); err != nil {\r\n\t\treturn nil, err\r\n\t} else if err := validator.Validate(res); err != nil {\r\n\t\treturn nil, err\r\n\t}\r\n\tr.updateLabelsForStorage(res)\r\n\tout, err := r.client.resources.Create(ctx, opts, apiv3.KindWorkloadEndpoint, res)\r\n\tif out != nil {\r\n\t\treturn out.(*apiv3.WorkloadEndpoint), err\r\n\t}\r\n\treturn nil, err\r\n}", "func (in *WorkloadType) DeepCopy() *WorkloadType {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(WorkloadType)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (rt *SimpleRolloutTrait) GetWorkloadReference() runtimev1alpha1.TypedReference {\n\treturn rt.Spec.WorkloadReference\n}", "func New(config Config) (*Worker, error) {\n\tw := &Worker{hasLimits: config.Limits != nil, jobs: map[string]map[string]*Job{}}\n\t// Only use limited runner when resource limits are set\n\tif w.hasLimits {\n\t\tvar err error\n\t\tif w.runner, err = newLimitedRunner(config.Limits); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"creating limited runner: %w\", err)\n\t\t}\n\t} else {\n\t\tw.runner = newRunner()\n\t}\n\treturn w, nil\n}", "func (d *staticSiteDeployer) DeployWorkload(in *DeployWorkloadInput) (ActionRecommender, error) {\n\tconf, err := d.stackConfiguration(&in.StackRuntimeConfiguration)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err := d.deploy(in.Options, svcStackConfigurationOutput{conf: conf}); err != nil {\n\t\treturn nil, err\n\t}\n\treturn noopActionRecommender{}, nil\n}", "func NewV1WorkloadSpec() *V1WorkloadSpec {\n\tthis := V1WorkloadSpec{}\n\treturn &this\n}", "func New(ringWeight int) LoadBalancer {\n\t// TODO: Implement this!\n\tnewLB := new(loadBalancer)\n\tnewLB.sortedNames = make([]MMENode, 0)\n\tnewLB.weight = ringWeight\n\tnewLB.hashRing = NewRing()\n\tif 7 == 2 {\n\t\tfmt.Println(ringWeight)\n\t}\n\treturn newLB\n}", "func newApp(name string) (app *App, err error) {\n\tapp = &App{\n\t\tName: name,\n\t\tID: uuid.NewV5(namespace, \"org.homealone.\"+name).String(),\n\t\thandler: make(map[queue.Topic]message.Handler),\n\t\tdebug: *debug,\n\t\tfilterMessages: true,\n\t}\n\tapp.Log = log.NewLogger().With(log.Fields{\"app\": name, \"id\": app.ID})\n\treturn app, errors.Wrap(err, \"newApp failed\")\n}", "func newScenario(name string) *Instruction {\n\treturn &Instruction{\n\t\tType: ScenarioInst,\n\t\tName: name,\n\t\tVersion: &Version{},\n\t}\n}", "func NewBookingWorkTimeSlot()(*BookingWorkTimeSlot) {\n m := &BookingWorkTimeSlot{\n }\n m.backingStore = ie8677ce2c7e1b4c22e9c3827ecd078d41185424dd9eeb92b7d971ed2d49a392e.BackingStoreFactoryInstance();\n m.SetAdditionalData(make(map[string]any))\n return m\n}", "func NewLake(ctx *pulumi.Context,\n\tname string, args *LakeArgs, opts ...pulumi.ResourceOption) (*Lake, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.LakeId == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'LakeId'\")\n\t}\n\treplaceOnChanges := pulumi.ReplaceOnChanges([]string{\n\t\t\"lakeId\",\n\t\t\"location\",\n\t\t\"project\",\n\t})\n\topts = append(opts, replaceOnChanges)\n\topts = internal.PkgResourceDefaultOpts(opts)\n\tvar resource Lake\n\terr := ctx.RegisterResource(\"google-native:dataplex/v1:Lake\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func NewProgram(params *utils.Params, in, out circuit.IO,\n\tconsts map[string]ConstantInst, steps []Step) (*Program, error) {\n\n\tprog := &Program{\n\t\tParams: params,\n\t\tInputs: in,\n\t\tOutputs: out,\n\t\tConstants: consts,\n\t\tSteps: steps,\n\t\twires: make(map[string]*wireAlloc),\n\t\tfreeWires: make(map[types.Size][][]*circuits.Wire),\n\t}\n\n\t// Inputs into wires.\n\tfor idx, arg := range in {\n\t\tif len(arg.Name) == 0 {\n\t\t\targ.Name = fmt.Sprintf(\"arg{%d}\", idx)\n\t\t}\n\t\twires, err := prog.Wires(arg.Name, types.Size(arg.Size))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tprog.InputWires = append(prog.InputWires, wires...)\n\t}\n\n\treturn prog, nil\n}", "func New(ctx context.Context) *backend {\n\treturn &backend{\n\t\theaps: make(map[string]*taskHeap),\n\t\tbyID: make(map[uuid.UUID]*hItem),\n\t\tnw: subq.New(),\n\t}\n}", "func NewBandApp(\n\tlogger log.Logger, db dbm.DB, traceStore io.Writer, loadLatest bool,\n\tinvCheckPeriod uint, skipUpgradeHeights map[int64]bool, home string,\n\tdisableFeelessReports bool, owasmCacheSize uint32, baseAppOptions ...func(*bam.BaseApp),\n) *BandApp {\n\tcdc := MakeCodec()\n\tbApp := bam.NewBaseApp(AppName, logger, db, auth.DefaultTxDecoder(cdc), baseAppOptions...)\n\tbApp.SetCommitMultiStoreTracer(traceStore)\n\tbApp.SetAppVersion(version.Version)\n\tkeys := sdk.NewKVStoreKeys(\n\t\tbam.MainStoreKey, auth.StoreKey, supply.StoreKey, staking.StoreKey, mint.StoreKey,\n\t\tdistr.StoreKey, slashing.StoreKey, gov.StoreKey, params.StoreKey, upgrade.StoreKey,\n\t\tevidence.StoreKey, oracle.StoreKey,\n\t)\n\ttKeys := sdk.NewTransientStoreKeys(params.TStoreKey)\n\tapp := &BandApp{\n\t\tBaseApp: bApp,\n\t\tcdc: cdc,\n\t\tinvCheckPeriod: invCheckPeriod,\n\t\tkeys: keys,\n\t\ttKeys: tKeys,\n\t}\n\towasmVM, err := api.NewVm(owasmCacheSize)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\t// Initialize params keeper and module subspaces.\n\tapp.ParamsKeeper = params.NewKeeper(cdc, keys[params.StoreKey], tKeys[params.TStoreKey])\n\tauthSubspace := app.ParamsKeeper.Subspace(auth.DefaultParamspace)\n\tbankSubspace := app.ParamsKeeper.Subspace(bank.DefaultParamspace)\n\tstakingSubspace := app.ParamsKeeper.Subspace(staking.DefaultParamspace)\n\tmintSubspace := app.ParamsKeeper.Subspace(mint.DefaultParamspace)\n\tdistrSubspace := app.ParamsKeeper.Subspace(distr.DefaultParamspace)\n\tslashingSubspace := app.ParamsKeeper.Subspace(slashing.DefaultParamspace)\n\tevidenceSubspace := app.ParamsKeeper.Subspace(evidence.DefaultParamspace)\n\tgovSubspace := app.ParamsKeeper.Subspace(gov.DefaultParamspace).WithKeyTable(gov.ParamKeyTable())\n\tcrisisSubspace := app.ParamsKeeper.Subspace(crisis.DefaultParamspace)\n\toracleSubspace := app.ParamsKeeper.Subspace(oracle.DefaultParamspace)\n\t// Add module keepers.\n\tapp.AccountKeeper = auth.NewAccountKeeper(cdc, keys[auth.StoreKey], authSubspace, auth.ProtoBaseAccount)\n\tapp.BankKeeper = bank.NewBaseKeeper(app.AccountKeeper, bankSubspace, app.ModuleAccountAddrs())\n\tapp.SupplyKeeper = supply.NewKeeper(cdc, keys[supply.StoreKey], app.AccountKeeper, app.BankKeeper, maccPerms)\n\t// wrappedSupplyKeeper overrides burn token behavior to instead transfer to community pool.\n\twrappedSupplyKeeper := bandsupply.WrapSupplyKeeperBurnToCommunityPool(app.SupplyKeeper)\n\tstakingKeeper := staking.NewKeeper(cdc, keys[staking.StoreKey], &wrappedSupplyKeeper, stakingSubspace)\n\tapp.MintKeeper = mint.NewKeeper(cdc, keys[mint.StoreKey], mintSubspace, &stakingKeeper, app.SupplyKeeper, auth.FeeCollectorName)\n\tapp.DistrKeeper = distr.NewKeeper(cdc, keys[distr.StoreKey], distrSubspace, &stakingKeeper, app.SupplyKeeper, auth.FeeCollectorName, app.ModuleAccountAddrs())\n\t// DistrKeeper must be set afterward due to the circular reference between supply-staking-distr.\n\twrappedSupplyKeeper.SetDistrKeeper(&app.DistrKeeper)\n\tapp.CrisisKeeper = crisis.NewKeeper(crisisSubspace, invCheckPeriod, app.SupplyKeeper, auth.FeeCollectorName)\n\tapp.SlashingKeeper = slashing.NewKeeper(cdc, keys[slashing.StoreKey], &stakingKeeper, slashingSubspace)\n\tapp.UpgradeKeeper = upgrade.NewKeeper(skipUpgradeHeights, keys[upgrade.StoreKey], cdc)\n\tapp.OracleKeeper = oracle.NewKeeper(cdc, keys[oracle.StoreKey], filepath.Join(viper.GetString(cli.HomeFlag), \"files\"), auth.FeeCollectorName, oracleSubspace, app.SupplyKeeper, &stakingKeeper, app.DistrKeeper, owasmVM)\n\t// Register the proposal types.\n\tgovRouter := gov.NewRouter()\n\tgovRouter.\n\t\tAddRoute(gov.RouterKey, gov.ProposalHandler).\n\t\tAddRoute(params.RouterKey, params.NewParamChangeProposalHandler(app.ParamsKeeper)).\n\t\tAddRoute(distr.RouterKey, distr.NewCommunityPoolSpendProposalHandler(app.DistrKeeper)).\n\t\tAddRoute(upgrade.RouterKey, upgrade.NewSoftwareUpgradeProposalHandler(app.UpgradeKeeper))\n\tapp.GovKeeper = gov.NewKeeper(cdc, keys[gov.StoreKey], govSubspace, app.SupplyKeeper, &stakingKeeper, govRouter)\n\t// Create evidence keeper with evidence router.\n\tevidenceKeeper := evidence.NewKeeper(cdc, keys[evidence.StoreKey], evidenceSubspace, &stakingKeeper, app.SlashingKeeper)\n\tevidenceRouter := evidence.NewRouter()\n\tevidenceKeeper.SetRouter(evidenceRouter)\n\tapp.EvidenceKeeper = *evidenceKeeper\n\t// Register the staking hooks. NOTE: stakingKeeper above is passed by reference, so that it will contain these hooks.\n\tapp.StakingKeeper = *stakingKeeper.SetHooks(staking.NewMultiStakingHooks(app.DistrKeeper.Hooks(), app.SlashingKeeper.Hooks()))\n\t// Create the module manager. NOTE: Any module instantiated in the module manager that is later modified must be passed by reference here.\n\tapp.mm = module.NewManager(\n\t\tgenutil.NewAppModule(app.AccountKeeper, app.StakingKeeper, app.DeliverTx),\n\t\tauth.NewAppModule(app.AccountKeeper),\n\t\tbank.NewAppModule(app.BankKeeper, app.AccountKeeper),\n\t\tsupply.NewAppModule(app.SupplyKeeper, app.AccountKeeper),\n\t\tcrisis.NewAppModule(&app.CrisisKeeper),\n\t\tgov.NewAppModule(app.GovKeeper, app.AccountKeeper, app.SupplyKeeper),\n\t\tmint.NewAppModule(app.MintKeeper),\n\t\tslashing.NewAppModule(app.SlashingKeeper, app.AccountKeeper, app.StakingKeeper),\n\t\tdistr.NewAppModule(app.DistrKeeper, app.AccountKeeper, app.SupplyKeeper, app.StakingKeeper),\n\t\tstaking.NewAppModule(app.StakingKeeper, app.AccountKeeper, app.SupplyKeeper),\n\t\tupgrade.NewAppModule(app.UpgradeKeeper),\n\t\tevidence.NewAppModule(app.EvidenceKeeper),\n\t\toracle.NewAppModule(app.OracleKeeper),\n\t)\n\t// NOTE: Oracle module must occur before distr as it takes some fee to distribute to active oracle validators.\n\t// NOTE: During begin block slashing happens after distr.BeginBlocker so that there is nothing left\n\t// over in the validator fee pool, so as to keep the CanWithdrawInvariant invariant.\n\tapp.mm.SetOrderBeginBlockers(\n\t\tupgrade.ModuleName, mint.ModuleName, oracle.ModuleName, distr.ModuleName, slashing.ModuleName,\n\t\tevidence.ModuleName, staking.ModuleName,\n\t)\n\tapp.mm.SetOrderEndBlockers(\n\t\tcrisis.ModuleName, gov.ModuleName, staking.ModuleName, oracle.ModuleName,\n\t)\n\t// NOTE: The genutils module must occur after staking so that pools are\n\t// properly initialized with tokens from genesis accounts.\n\tapp.mm.SetOrderInitGenesis(\n\t\tauth.ModuleName, distr.ModuleName, staking.ModuleName, bank.ModuleName, supply.ModuleName,\n\t\tslashing.ModuleName, gov.ModuleName, mint.ModuleName, oracle.ModuleName, crisis.ModuleName,\n\t\tgenutil.ModuleName, evidence.ModuleName,\n\t)\n\tapp.mm.RegisterInvariants(&app.CrisisKeeper)\n\tapp.mm.RegisterRoutes(app.Router(), app.QueryRouter())\n\t// Initialize stores.\n\tapp.MountKVStores(keys)\n\tapp.MountTransientStores(tKeys)\n\t// initialize BaseApp.\n\tapp.SetInitChainer(app.InitChainer)\n\tapp.SetBeginBlocker(app.BeginBlocker)\n\tanteHandler := ante.NewAnteHandler(app.AccountKeeper, app.SupplyKeeper, auth.DefaultSigVerificationGasConsumer)\n\tif !disableFeelessReports {\n\t\tanteHandler = bandante.NewFeelessReportsAnteHandler(anteHandler, app.OracleKeeper)\n\t}\n\tapp.SetAnteHandler(anteHandler)\n\tapp.SetEndBlocker(app.EndBlocker)\n\tif loadLatest {\n\t\terr := app.LoadLatestVersion(app.keys[bam.MainStoreKey])\n\t\tif err != nil {\n\t\t\ttmos.Exit(err.Error())\n\t\t}\n\t}\n\treturn app\n}", "func New(job Job) *Worker {\n\treturn &Worker{\n\t\tjob: job,\n\t}\n}", "func newFromValue(clusterClient kubernetes.Interface, client k8s.Interface, wfr *v1alpha1.WorkflowRun, namespace string) (Operator, error) {\n\tf, err := client.CycloneV1alpha1().Workflows(namespace).Get(context.TODO(), wfr.Spec.WorkflowRef.Name, metav1.GetOptions{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &operator{\n\t\tclusterClient: clusterClient,\n\t\tclient: client,\n\t\trecorder: common.GetEventRecorder(client, common.EventSourceWfrController),\n\t\twf: f,\n\t\twfr: wfr,\n\t}, nil\n}", "func (in *WorkloadList) DeepCopy() *WorkloadList {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(WorkloadList)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func New(timeout time.Duration, someActionTitle string) *Worker {\n\treturn &Worker{\n\t\ttimeout: timeout,\n\t\taction: someActionTitle,\n\t}\n}", "func NewApplication(ldr loader.Loader, fSys fs.FileSystem) (*Application, error) {\n\tcontent, err := ldr.Load(constants.KustomizationFileName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar m types.Kustomization\n\terr = unmarshal(content, &m)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Application{kustomization: &m, ldr: ldr, fSys: fSys}, nil\n}", "func (rm *resourceManager) newCreateRequestPayload(\n\tctx context.Context,\n\tr *resource,\n) (*svcsdk.CreateModelBiasJobDefinitionInput, error) {\n\tres := &svcsdk.CreateModelBiasJobDefinitionInput{}\n\n\tif r.ko.Spec.JobDefinitionName != nil {\n\t\tres.SetJobDefinitionName(*r.ko.Spec.JobDefinitionName)\n\t}\n\tif r.ko.Spec.JobResources != nil {\n\t\tf1 := &svcsdk.MonitoringResources{}\n\t\tif r.ko.Spec.JobResources.ClusterConfig != nil {\n\t\t\tf1f0 := &svcsdk.MonitoringClusterConfig{}\n\t\t\tif r.ko.Spec.JobResources.ClusterConfig.InstanceCount != nil {\n\t\t\t\tf1f0.SetInstanceCount(*r.ko.Spec.JobResources.ClusterConfig.InstanceCount)\n\t\t\t}\n\t\t\tif r.ko.Spec.JobResources.ClusterConfig.InstanceType != nil {\n\t\t\t\tf1f0.SetInstanceType(*r.ko.Spec.JobResources.ClusterConfig.InstanceType)\n\t\t\t}\n\t\t\tif r.ko.Spec.JobResources.ClusterConfig.VolumeKMSKeyID != nil {\n\t\t\t\tf1f0.SetVolumeKmsKeyId(*r.ko.Spec.JobResources.ClusterConfig.VolumeKMSKeyID)\n\t\t\t}\n\t\t\tif r.ko.Spec.JobResources.ClusterConfig.VolumeSizeInGB != nil {\n\t\t\t\tf1f0.SetVolumeSizeInGB(*r.ko.Spec.JobResources.ClusterConfig.VolumeSizeInGB)\n\t\t\t}\n\t\t\tf1.SetClusterConfig(f1f0)\n\t\t}\n\t\tres.SetJobResources(f1)\n\t}\n\tif r.ko.Spec.ModelBiasAppSpecification != nil {\n\t\tf2 := &svcsdk.ModelBiasAppSpecification{}\n\t\tif r.ko.Spec.ModelBiasAppSpecification.ConfigURI != nil {\n\t\t\tf2.SetConfigUri(*r.ko.Spec.ModelBiasAppSpecification.ConfigURI)\n\t\t}\n\t\tif r.ko.Spec.ModelBiasAppSpecification.Environment != nil {\n\t\t\tf2f1 := map[string]*string{}\n\t\t\tfor f2f1key, f2f1valiter := range r.ko.Spec.ModelBiasAppSpecification.Environment {\n\t\t\t\tvar f2f1val string\n\t\t\t\tf2f1val = *f2f1valiter\n\t\t\t\tf2f1[f2f1key] = &f2f1val\n\t\t\t}\n\t\t\tf2.SetEnvironment(f2f1)\n\t\t}\n\t\tif r.ko.Spec.ModelBiasAppSpecification.ImageURI != nil {\n\t\t\tf2.SetImageUri(*r.ko.Spec.ModelBiasAppSpecification.ImageURI)\n\t\t}\n\t\tres.SetModelBiasAppSpecification(f2)\n\t}\n\tif r.ko.Spec.ModelBiasBaselineConfig != nil {\n\t\tf3 := &svcsdk.ModelBiasBaselineConfig{}\n\t\tif r.ko.Spec.ModelBiasBaselineConfig.BaseliningJobName != nil {\n\t\t\tf3.SetBaseliningJobName(*r.ko.Spec.ModelBiasBaselineConfig.BaseliningJobName)\n\t\t}\n\t\tif r.ko.Spec.ModelBiasBaselineConfig.ConstraintsResource != nil {\n\t\t\tf3f1 := &svcsdk.MonitoringConstraintsResource{}\n\t\t\tif r.ko.Spec.ModelBiasBaselineConfig.ConstraintsResource.S3URI != nil {\n\t\t\t\tf3f1.SetS3Uri(*r.ko.Spec.ModelBiasBaselineConfig.ConstraintsResource.S3URI)\n\t\t\t}\n\t\t\tf3.SetConstraintsResource(f3f1)\n\t\t}\n\t\tres.SetModelBiasBaselineConfig(f3)\n\t}\n\tif r.ko.Spec.ModelBiasJobInput != nil {\n\t\tf4 := &svcsdk.ModelBiasJobInput{}\n\t\tif r.ko.Spec.ModelBiasJobInput.EndpointInput != nil {\n\t\t\tf4f0 := &svcsdk.EndpointInput{}\n\t\t\tif r.ko.Spec.ModelBiasJobInput.EndpointInput.EndTimeOffset != nil {\n\t\t\t\tf4f0.SetEndTimeOffset(*r.ko.Spec.ModelBiasJobInput.EndpointInput.EndTimeOffset)\n\t\t\t}\n\t\t\tif r.ko.Spec.ModelBiasJobInput.EndpointInput.EndpointName != nil {\n\t\t\t\tf4f0.SetEndpointName(*r.ko.Spec.ModelBiasJobInput.EndpointInput.EndpointName)\n\t\t\t}\n\t\t\tif r.ko.Spec.ModelBiasJobInput.EndpointInput.FeaturesAttribute != nil {\n\t\t\t\tf4f0.SetFeaturesAttribute(*r.ko.Spec.ModelBiasJobInput.EndpointInput.FeaturesAttribute)\n\t\t\t}\n\t\t\tif r.ko.Spec.ModelBiasJobInput.EndpointInput.InferenceAttribute != nil {\n\t\t\t\tf4f0.SetInferenceAttribute(*r.ko.Spec.ModelBiasJobInput.EndpointInput.InferenceAttribute)\n\t\t\t}\n\t\t\tif r.ko.Spec.ModelBiasJobInput.EndpointInput.LocalPath != nil {\n\t\t\t\tf4f0.SetLocalPath(*r.ko.Spec.ModelBiasJobInput.EndpointInput.LocalPath)\n\t\t\t}\n\t\t\tif r.ko.Spec.ModelBiasJobInput.EndpointInput.ProbabilityAttribute != nil {\n\t\t\t\tf4f0.SetProbabilityAttribute(*r.ko.Spec.ModelBiasJobInput.EndpointInput.ProbabilityAttribute)\n\t\t\t}\n\t\t\tif r.ko.Spec.ModelBiasJobInput.EndpointInput.ProbabilityThresholdAttribute != nil {\n\t\t\t\tf4f0.SetProbabilityThresholdAttribute(*r.ko.Spec.ModelBiasJobInput.EndpointInput.ProbabilityThresholdAttribute)\n\t\t\t}\n\t\t\tif r.ko.Spec.ModelBiasJobInput.EndpointInput.S3DataDistributionType != nil {\n\t\t\t\tf4f0.SetS3DataDistributionType(*r.ko.Spec.ModelBiasJobInput.EndpointInput.S3DataDistributionType)\n\t\t\t}\n\t\t\tif r.ko.Spec.ModelBiasJobInput.EndpointInput.S3InputMode != nil {\n\t\t\t\tf4f0.SetS3InputMode(*r.ko.Spec.ModelBiasJobInput.EndpointInput.S3InputMode)\n\t\t\t}\n\t\t\tif r.ko.Spec.ModelBiasJobInput.EndpointInput.StartTimeOffset != nil {\n\t\t\t\tf4f0.SetStartTimeOffset(*r.ko.Spec.ModelBiasJobInput.EndpointInput.StartTimeOffset)\n\t\t\t}\n\t\t\tf4.SetEndpointInput(f4f0)\n\t\t}\n\t\tif r.ko.Spec.ModelBiasJobInput.GroundTruthS3Input != nil {\n\t\t\tf4f1 := &svcsdk.MonitoringGroundTruthS3Input{}\n\t\t\tif r.ko.Spec.ModelBiasJobInput.GroundTruthS3Input.S3URI != nil {\n\t\t\t\tf4f1.SetS3Uri(*r.ko.Spec.ModelBiasJobInput.GroundTruthS3Input.S3URI)\n\t\t\t}\n\t\t\tf4.SetGroundTruthS3Input(f4f1)\n\t\t}\n\t\tres.SetModelBiasJobInput(f4)\n\t}\n\tif r.ko.Spec.ModelBiasJobOutputConfig != nil {\n\t\tf5 := &svcsdk.MonitoringOutputConfig{}\n\t\tif r.ko.Spec.ModelBiasJobOutputConfig.KMSKeyID != nil {\n\t\t\tf5.SetKmsKeyId(*r.ko.Spec.ModelBiasJobOutputConfig.KMSKeyID)\n\t\t}\n\t\tif r.ko.Spec.ModelBiasJobOutputConfig.MonitoringOutputs != nil {\n\t\t\tf5f1 := []*svcsdk.MonitoringOutput{}\n\t\t\tfor _, f5f1iter := range r.ko.Spec.ModelBiasJobOutputConfig.MonitoringOutputs {\n\t\t\t\tf5f1elem := &svcsdk.MonitoringOutput{}\n\t\t\t\tif f5f1iter.S3Output != nil {\n\t\t\t\t\tf5f1elemf0 := &svcsdk.MonitoringS3Output{}\n\t\t\t\t\tif f5f1iter.S3Output.LocalPath != nil {\n\t\t\t\t\t\tf5f1elemf0.SetLocalPath(*f5f1iter.S3Output.LocalPath)\n\t\t\t\t\t}\n\t\t\t\t\tif f5f1iter.S3Output.S3UploadMode != nil {\n\t\t\t\t\t\tf5f1elemf0.SetS3UploadMode(*f5f1iter.S3Output.S3UploadMode)\n\t\t\t\t\t}\n\t\t\t\t\tif f5f1iter.S3Output.S3URI != nil {\n\t\t\t\t\t\tf5f1elemf0.SetS3Uri(*f5f1iter.S3Output.S3URI)\n\t\t\t\t\t}\n\t\t\t\t\tf5f1elem.SetS3Output(f5f1elemf0)\n\t\t\t\t}\n\t\t\t\tf5f1 = append(f5f1, f5f1elem)\n\t\t\t}\n\t\t\tf5.SetMonitoringOutputs(f5f1)\n\t\t}\n\t\tres.SetModelBiasJobOutputConfig(f5)\n\t}\n\tif r.ko.Spec.NetworkConfig != nil {\n\t\tf6 := &svcsdk.MonitoringNetworkConfig{}\n\t\tif r.ko.Spec.NetworkConfig.EnableInterContainerTrafficEncryption != nil {\n\t\t\tf6.SetEnableInterContainerTrafficEncryption(*r.ko.Spec.NetworkConfig.EnableInterContainerTrafficEncryption)\n\t\t}\n\t\tif r.ko.Spec.NetworkConfig.EnableNetworkIsolation != nil {\n\t\t\tf6.SetEnableNetworkIsolation(*r.ko.Spec.NetworkConfig.EnableNetworkIsolation)\n\t\t}\n\t\tif r.ko.Spec.NetworkConfig.VPCConfig != nil {\n\t\t\tf6f2 := &svcsdk.VpcConfig{}\n\t\t\tif r.ko.Spec.NetworkConfig.VPCConfig.SecurityGroupIDs != nil {\n\t\t\t\tf6f2f0 := []*string{}\n\t\t\t\tfor _, f6f2f0iter := range r.ko.Spec.NetworkConfig.VPCConfig.SecurityGroupIDs {\n\t\t\t\t\tvar f6f2f0elem string\n\t\t\t\t\tf6f2f0elem = *f6f2f0iter\n\t\t\t\t\tf6f2f0 = append(f6f2f0, &f6f2f0elem)\n\t\t\t\t}\n\t\t\t\tf6f2.SetSecurityGroupIds(f6f2f0)\n\t\t\t}\n\t\t\tif r.ko.Spec.NetworkConfig.VPCConfig.Subnets != nil {\n\t\t\t\tf6f2f1 := []*string{}\n\t\t\t\tfor _, f6f2f1iter := range r.ko.Spec.NetworkConfig.VPCConfig.Subnets {\n\t\t\t\t\tvar f6f2f1elem string\n\t\t\t\t\tf6f2f1elem = *f6f2f1iter\n\t\t\t\t\tf6f2f1 = append(f6f2f1, &f6f2f1elem)\n\t\t\t\t}\n\t\t\t\tf6f2.SetSubnets(f6f2f1)\n\t\t\t}\n\t\t\tf6.SetVpcConfig(f6f2)\n\t\t}\n\t\tres.SetNetworkConfig(f6)\n\t}\n\tif r.ko.Spec.RoleARN != nil {\n\t\tres.SetRoleArn(*r.ko.Spec.RoleARN)\n\t}\n\tif r.ko.Spec.StoppingCondition != nil {\n\t\tf8 := &svcsdk.MonitoringStoppingCondition{}\n\t\tif r.ko.Spec.StoppingCondition.MaxRuntimeInSeconds != nil {\n\t\t\tf8.SetMaxRuntimeInSeconds(*r.ko.Spec.StoppingCondition.MaxRuntimeInSeconds)\n\t\t}\n\t\tres.SetStoppingCondition(f8)\n\t}\n\tif r.ko.Spec.Tags != nil {\n\t\tf9 := []*svcsdk.Tag{}\n\t\tfor _, f9iter := range r.ko.Spec.Tags {\n\t\t\tf9elem := &svcsdk.Tag{}\n\t\t\tif f9iter.Key != nil {\n\t\t\t\tf9elem.SetKey(*f9iter.Key)\n\t\t\t}\n\t\t\tif f9iter.Value != nil {\n\t\t\t\tf9elem.SetValue(*f9iter.Value)\n\t\t\t}\n\t\t\tf9 = append(f9, f9elem)\n\t\t}\n\t\tres.SetTags(f9)\n\t}\n\n\treturn res, nil\n}", "func NewApplication(id int32, name string, order int32, type_ string, group ApplicationGroup, workspace ApplicationWorkspace) *Application {\n\tthis := Application{}\n\tthis.Id = id\n\tthis.Name = name\n\tthis.Order = order\n\tthis.Type = type_\n\tthis.Group = group\n\tthis.Workspace = workspace\n\treturn &this\n}", "func NewBusinessScenarioPlanner()(*BusinessScenarioPlanner) {\n m := &BusinessScenarioPlanner{\n Entity: *NewEntity(),\n }\n return m\n}", "func New(cfg *worker.Config, taskid string, devices []int) *Worker {\n\treturn &Worker{\n\t\ttaskid: taskid,\n\t\tdevices: devices,\n\t\tcfg: cfg,\n\t}\n}", "func New(p Params) (*Worker, error) {\n\tif p.SampleDir == \"\" {\n\t\treturn nil, fmt.Errorf(\"no sample directory set\")\n\t}\n\tif p.MeterAddr == \"\" {\n\t\treturn nil, fmt.Errorf(\"no meter address set\")\n\t}\n\tif p.Now == nil {\n\t\tp.Now = time.Now\n\t}\n\tif p.Interval == 0 {\n\t\tp.Interval = DefaultInterval\n\t}\n\tctx, cancel := context.WithCancel(context.Background())\n\tw := &Worker{\n\t\tp: p,\n\t\tctx: ctx,\n\t\tclose: cancel,\n\t}\n\tw.wg.Add(1)\n\tgo func() {\n\t\tif err := w.run(); err != nil {\n\t\t\tlog.Printf(\"sample worker for meter at %q failed: %v\", w.p.MeterAddr, err)\n\t\t}\n\t}()\n\treturn w, nil\n}", "func NewProgram(cfg *client.Config, parentName string) *tea.Program {\n\tm := NewModel(cfg)\n\tm.standalone = true\n\tm.parentName = parentName\n\treturn tea.NewProgram(m)\n}", "func newWorkoutPlanFromFile(infile string) (workoutplan, error) {\n\tmyplan := workoutplan{}\n\tdat, err := ioutil.ReadFile(infile)\n\tif err != nil {\n\t\treturn myplan, err\n\t}\n\terr = yaml.Unmarshal([]byte(dat), &myplan)\n\tif err != nil {\n\t\treturn myplan, err\n\t}\n\treturn myplan, err\n}", "func WorkloadPush(ctx context.Context, db *mongo.Database, w ...Workload) error {\n\tcol := db.Collection(queueCollection)\n\tdocs := make([]interface{}, 0, len(w))\n\tfor _, wl := range w {\n\t\tdocs = append(docs, wl)\n\t}\n\t_, err := col.InsertMany(ctx, docs)\n\n\treturn err\n}", "func NewApp(useMBTS, useSpecialEBmods bool) *App {\n\tapp := &App{\n\t\tworkers: []Worker{},\n\t\tdetector: nil,\n\t}\n\tapp.msg(\"Welcome to Go-TUCS (pid=%d). Building detector tree...\\n\", os.Getpid())\n\tapp.detector = TileCal(useMBTS, useSpecialEBmods)\n\tapp.msg(\"done.\\n\")\n\treturn app\n}", "func newSubScenario(name string) *Instruction {\n\treturn &Instruction{\n\t\tType: SubScenarioInst,\n\t\tName: name,\n\t\tVersion: &Version{},\n\t}\n}", "func (ot *openTelemetryWrapper) newResource(\n\twebEngineName,\n\twebEngineVersion string,\n) (*resource.Resource, error) {\n\treturn resource.Merge(resource.Default(), resource.NewSchemaless(\n\t\tsemconv.WebEngineName(webEngineName),\n\t\tsemconv.WebEngineVersion(webEngineVersion),\n\t))\n}", "func FetchWorkload(dynamicClient dynamic.Interface, restMapper meta.RESTMapper, resource workv1alpha1.ObjectReference) (*unstructured.Unstructured, error) {\n\tdynamicResource, err := restmapper.GetGroupVersionResource(restMapper,\n\t\tschema.FromAPIVersionAndKind(resource.APIVersion, resource.Kind))\n\tif err != nil {\n\t\tklog.Errorf(\"Failed to get GVR from GVK %s %s. Error: %v\", resource.APIVersion,\n\t\t\tresource.Kind, err)\n\t\treturn nil, err\n\t}\n\n\tworkload, err := dynamicClient.Resource(dynamicResource).Namespace(resource.Namespace).Get(context.TODO(),\n\t\tresource.Name, metav1.GetOptions{})\n\tif err != nil {\n\t\tklog.Errorf(\"Failed to get workload, kind: %s, namespace: %s, name: %s. Error: %v\",\n\t\t\tresource.Kind, resource.Namespace, resource.Name, err)\n\t\treturn nil, err\n\t}\n\n\treturn workload, nil\n}", "func (in *HealthService) GetWorkloadHealth(ctx context.Context, namespace, cluster, workload, rateInterval string, queryTime time.Time, w *models.Workload) (models.WorkloadHealth, error) {\n\tvar end observability.EndFunc\n\t_, end = observability.StartSpan(ctx, \"GetWorkloadHealth\",\n\t\tobservability.Attribute(\"package\", \"business\"),\n\t\tobservability.Attribute(\"namespace\", namespace),\n\t\tobservability.Attribute(\"workload\", workload),\n\t\tobservability.Attribute(\"rateInterval\", rateInterval),\n\t\tobservability.Attribute(\"queryTime\", queryTime),\n\t)\n\tdefer end()\n\n\t// Perf: do not bother fetching request rate if workload has no sidecar\n\tif !w.IstioSidecar && !w.IsGateway() {\n\t\treturn models.WorkloadHealth{\n\t\t\tWorkloadStatus: w.CastWorkloadStatus(),\n\t\t\tRequests: models.NewEmptyRequestHealth(),\n\t\t}, nil\n\t}\n\n\t// Add Telemetry info\n\trate, err := in.getWorkloadRequestsHealth(namespace, cluster, workload, rateInterval, queryTime, w)\n\treturn models.WorkloadHealth{\n\t\tWorkloadStatus: w.CastWorkloadStatus(),\n\t\tRequests: rate,\n\t}, err\n}", "func (db *Database) New(sid int, params *NewParams) (*Workout, error) {\n\t// Create a new Workout\n\tworkout := &Workout{\n\t\tSessionID: sid,\n\t\tExerciseID: params.ExerciseID,\n\t\tWeight: params.Weight,\n\t\tReps: params.Reps,\n\t\tSets: params.Sets,\n\t}\n\n\t// Create variable to hold the result.\n\tvar res sql.Result\n\tvar err error\n\n\t// Execute the query.\n\tif res, err = db.db.Exec(stmtInsert, workout.SessionID, workout.ExerciseID, workout.Weight, workout.Reps, workout.Sets); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Get last insert ID.\n\tid, err := res.LastInsertId()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// Define workout.ID\n\tworkout.ID = int(id)\n\n\treturn workout, nil\n}", "func NewJob(opts BulkOptions) (string, error) {\n\ttpl := BulkTemps.NewJob\n\ts, err := RenderTemplate(tpl, opts)\n\tlog.Printf(\"job query: %s\", s)\n\treturn s, err\n}", "func NewWorkflow(ctx *pulumi.Context,\n\tname string, args *WorkflowArgs, opts ...pulumi.ResourceOption) (*Workflow, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.WorkflowId == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'WorkflowId'\")\n\t}\n\treplaceOnChanges := pulumi.ReplaceOnChanges([]string{\n\t\t\"location\",\n\t\t\"project\",\n\t\t\"workflowId\",\n\t})\n\topts = append(opts, replaceOnChanges)\n\topts = internal.PkgResourceDefaultOpts(opts)\n\tvar resource Workflow\n\terr := ctx.RegisterResource(\"google-native:workflows/v1beta:Workflow\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func NewApplication(ctx *pulumi.Context,\n\tname string, args *ApplicationArgs, opts ...pulumi.ResourceOption) (*Application, error) {\n\tif args == nil {\n\t\targs = &ApplicationArgs{}\n\t}\n\n\treplaceOnChanges := pulumi.ReplaceOnChanges([]string{\n\t\t\"location\",\n\t\t\"project\",\n\t})\n\topts = append(opts, replaceOnChanges)\n\topts = internal.PkgResourceDefaultOpts(opts)\n\tvar resource Application\n\terr := ctx.RegisterResource(\"google-native:appengine/v1beta:Application\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func NewWithCapacity(workers, capacity int) Interface {\n\ti, _ := NewWithContext(context.Background(), workers, capacity)\n\treturn i\n}", "func NewWorkflow(ctx *pulumi.Context,\n\tname string, args *WorkflowArgs, opts ...pulumi.ResourceOption) (*Workflow, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.WorkflowId == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'WorkflowId'\")\n\t}\n\tvar resource Workflow\n\terr := ctx.RegisterResource(\"google-native:workflows/v1beta:Workflow\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func NewLoadBalance(name string) LoadBalance {\n\treturn LBS[name]()\n}", "func newWorker(f resources.Feature, t resources.Targetable, m installmethod.Enum, a installaction.Enum, cb alterCommandCB) (*worker, fail.Error) {\n\tw := worker{\n\t\tfeature: f.(*Feature),\n\t\ttarget: t,\n\t\tmethod: m,\n\t\taction: a,\n\t\tcommandCB: cb,\n\t}\n\tswitch t.TargetType() {\n\tcase featuretargettype.Cluster:\n\t\tw.cluster = t.(*Cluster)\n\t// case featuretargettype.Node:\n\t// \tw.node = true\n\t// \tfallthrough\n\tcase featuretargettype.Host:\n\t\tw.host = t.(*Host)\n\t}\n\n\tif m != installmethod.None {\n\t\tw.rootKey = \"feature.install.\" + strings.ToLower(m.String()) + \".\" + strings.ToLower(a.String())\n\t\tif !f.(*Feature).Specs().IsSet(w.rootKey) {\n\t\t\tmsg := `syntax error in Feature '%s' specification file (%s):\n\t\t\t\tno key '%s' found`\n\t\t\treturn nil, fail.SyntaxError(msg, f.GetName(), f.GetDisplayFilename(), w.rootKey)\n\t\t}\n\t}\n\n\treturn &w, nil\n}", "func newFromName(clusterClient kubernetes.Interface, client k8s.Interface, wfr, namespace string) (Operator, error) {\n\tw, err := client.CycloneV1alpha1().WorkflowRuns(namespace).Get(context.TODO(), wfr, metav1.GetOptions{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &operator{\n\t\tclusterClient: clusterClient,\n\t\tclient: client,\n\t\trecorder: common.GetEventRecorder(client, common.EventSourceWfrController),\n\t\twfr: w,\n\t}, nil\n}", "func NewRunBook(ctx *pulumi.Context,\n\tname string, args *RunBookArgs, opts ...pulumi.ResourceOption) (*RunBook, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.AutomationAccountName == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'AutomationAccountName'\")\n\t}\n\tif args.LogProgress == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'LogProgress'\")\n\t}\n\tif args.LogVerbose == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'LogVerbose'\")\n\t}\n\tif args.ResourceGroupName == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'ResourceGroupName'\")\n\t}\n\tif args.RunbookType == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'RunbookType'\")\n\t}\n\topts = internal.PkgResourceDefaultOpts(opts)\n\tvar resource RunBook\n\terr := ctx.RegisterResource(\"azure:automation/runBook:RunBook\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func NewAnalysis(ctx *pulumi.Context,\n\tname string, args *AnalysisArgs, opts ...pulumi.ResourceOption) (*Analysis, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.AnalysisId == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'AnalysisId'\")\n\t}\n\tif args.AwsAccountId == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'AwsAccountId'\")\n\t}\n\topts = internal.PkgResourceDefaultOpts(opts)\n\tvar resource Analysis\n\terr := ctx.RegisterResource(\"aws-native:quicksight:Analysis\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func CurrentWorkload() int {\n\t// TODO\n\treturn 50\n}", "func NewWitness(ws WitnessStore, logURL string, logSigVerifier note.Verifier, pollInterval time.Duration) (*Witness, error) {\n\tgcpRaw, err := ws.RetrieveCP()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"new witness failed due to storage retrieval: %w\", err)\n\t}\n\tgcp := api.LogCheckpoint{\n\t\tEnvelope: gcpRaw,\n\t}\n\tif len(gcpRaw) > 0 {\n\t\tcp, err := api.ParseCheckpoint(gcpRaw, logSigVerifier)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to open stored checkpoint: %w\", err)\n\t\t}\n\t\tgcp = *cp\n\t}\n\n\treturn &Witness{\n\t\tws: ws,\n\t\tgcp: gcp,\n\t\tlogURL: logURL,\n\t\tlogSigVerifier: logSigVerifier,\n\t\tpollInterval: pollInterval,\n\t}, nil\n}", "func NewWorker(chunkStore ChunkStore, analysis Analysis) *Worker {\n\treturn &Worker{\n\t\tchunkStore: chunkStore,\n\t\tanalysis: analysis,\n\t}\n}", "func NewWorkflow(option WorkflowOption) (*Workflow, error) {\n\tblobsDir := filepath.Join(option.TargetDir, \"blobs\")\n\tif err := os.RemoveAll(blobsDir); err != nil {\n\t\treturn nil, errors.Wrap(err, \"Remove blob directory\")\n\t}\n\tif err := os.MkdirAll(blobsDir, 0755); err != nil {\n\t\treturn nil, errors.Wrap(err, \"Create blob directory\")\n\t}\n\n\tbackendConfig := fmt.Sprintf(`{\"dir\": \"%s\"}`, blobsDir)\n\tbuilder := NewBuilder(option.NydusImagePath)\n\n\tdebugJSONPath := filepath.Join(option.TargetDir, \"output.json\")\n\n\tif option.PrefetchDir == \"\" {\n\t\toption.PrefetchDir = \"/\"\n\t}\n\n\treturn &Workflow{\n\t\tWorkflowOption: option,\n\t\tblobsDir: blobsDir,\n\t\tbackendConfig: backendConfig,\n\t\tbuilder: builder,\n\t\tdebugJSONPath: debugJSONPath,\n\t}, nil\n}", "func NewApp(name string) *App {\n\treturn &App{Name: name, Labels: make(map[string]string)}\n}", "func New() App {\n\treturn App{}\n}", "func NewBuilder(name, resources string, opts ...BuilderOptionFunc) (*Builder, error) {\n\tb := &Builder{\n\t\tname: name,\n\t\tresourcesPath: resources,\n\t}\n\n\tfor _, opt := range opts {\n\t\tif err := opt(b); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif b.timeout == 0 {\n\t\tb.timeout = defaultTimeout\n\t}\n\n\tif b.dryRun && b.kustomizationFile == \"\" {\n\t\treturn nil, fmt.Errorf(\"kustomization file is required for dry-run\")\n\t}\n\n\tif !b.dryRun && b.client == nil {\n\t\treturn nil, fmt.Errorf(\"client is required for live run\")\n\t}\n\n\treturn b, nil\n}", "func NewMining(minter sdk.AccAddress, tally int64) Mining {\n\treturn Mining{\n\t\tMinter: minter,\n\t\tLastTime: 0,\n\t\tTally: tally,\n\t}\n}", "func NewWorkSummary() *WorkSummary {\n\treturn &WorkSummary{}\n}", "func New(workers int) *App {\n\ta := &App{}\n\n\ta.jobs = make(chan Job, workers)\n\n\ta.metrics = Metrics{\n\t\tpromauto.NewGauge(\n\t\t\tprometheus.GaugeOpts{\n\t\t\t\tName: \"observ_worker_utilization\",\n\t\t\t\tHelp: \"number of busy workers\",\n\t\t\t},\n\t\t),\n\t\tpromauto.NewGauge(\n\t\t\tprometheus.GaugeOpts{\n\t\t\t\tName: \"observ_worker_saturation\",\n\t\t\t\tHelp: \"number of queued jobs\",\n\t\t\t},\n\t\t),\n\t\tpromauto.NewCounter(\n\t\t\tprometheus.CounterOpts{\n\t\t\t\tName: \"observ_worker_errors\",\n\t\t\t\tHelp: \"failed job count\",\n\t\t\t},\n\t\t),\n\t}\n\n\tfor i := 0; i < workers; i++ {\n\t\ta.createWorker()\n\t}\n\treturn a\n}", "func (o ApplicationStatusServicesOutput) WorkloadDefinition() ApplicationStatusServicesWorkloaddefinitionPtrOutput {\n\treturn o.ApplyT(func(v ApplicationStatusServices) *ApplicationStatusServicesWorkloaddefinition {\n\t\treturn v.WorkloadDefinition\n\t}).(ApplicationStatusServicesWorkloaddefinitionPtrOutput)\n}", "func (t *UseCase_UseCase_UseCase) NewApplication(Application string) (*UseCase_UseCase_UseCase_Application, error){\n\n\t// Initialise the list within the receiver struct if it has not already been\n\t// created.\n\tif t.Application == nil {\n\t\tt.Application = make(map[string]*UseCase_UseCase_UseCase_Application)\n\t}\n\n\tkey := Application\n\n\t// Ensure that this key has not already been used in the\n\t// list. Keyed YANG lists do not allow duplicate keys to\n\t// be created.\n\tif _, ok := t.Application[key]; ok {\n\t\treturn nil, fmt.Errorf(\"duplicate key %v for list Application\", key)\n\t}\n\n\tt.Application[key] = &UseCase_UseCase_UseCase_Application{\n\t\tApplication: &Application,\n\t}\n\n\treturn t.Application[key], nil\n}", "func New(options ...Option) Application {\n\topts := &Options{}\n\tfor _, opt := range options {\n\t\topt(opts)\n\t}\n\n\tif opts.StartupTimeout == 0 {\n\t\topts.StartupTimeout = 1000\n\t}\n\tif opts.ShutdownTimeout == 0 {\n\t\topts.ShutdownTimeout = 5000\n\t}\n\n\tif opts.AutoMaxProcs == nil || *opts.AutoMaxProcs {\n\t\tprocsutil.EnableAutoMaxProcs()\n\t}\n\n\tconfig.AppendServiceTag(opts.Tags...)\n\n\tapp := &application{\n\t\tquit: make(chan os.Signal),\n\t\tstartupTimeout: opts.StartupTimeout,\n\t\tshutdownTimeout: opts.ShutdownTimeout,\n\t\tboxes: append(opts.Boxes, &boxMetric{}),\n\t}\n\n\tsignal.Notify(app.quit, syscall.SIGINT, syscall.SIGTERM)\n\n\treturn app\n}", "func NewApplication(ctx *pulumi.Context,\n\tname string, args *ApplicationArgs, opts ...pulumi.ResourceOption) (*Application, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.ApplicationName == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'ApplicationName'\")\n\t}\n\tif args.ClusterId == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'ClusterId'\")\n\t}\n\tif args.PackageType == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'PackageType'\")\n\t}\n\topts = internal.PkgResourceDefaultOpts(opts)\n\tvar resource Application\n\terr := ctx.RegisterResource(\"alicloud:edas/application:Application\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}" ]
[ "0.6290281", "0.6186775", "0.59179425", "0.57543576", "0.5292014", "0.52139693", "0.52035826", "0.5164044", "0.51574534", "0.5087843", "0.5084347", "0.50763017", "0.49866802", "0.498484", "0.49624205", "0.4953039", "0.49443993", "0.49021307", "0.48930544", "0.47866622", "0.47664785", "0.47631654", "0.47606543", "0.47568884", "0.47461355", "0.4718368", "0.47145802", "0.47012067", "0.46874923", "0.46730328", "0.46116167", "0.45947582", "0.458292", "0.45798042", "0.45744094", "0.4572663", "0.45636237", "0.4559875", "0.45418507", "0.4527233", "0.4524534", "0.45183536", "0.4511555", "0.4504097", "0.44962844", "0.4495054", "0.44929066", "0.44829223", "0.44757998", "0.44732887", "0.44614345", "0.44613624", "0.44457933", "0.4440733", "0.44297242", "0.44260815", "0.44196263", "0.44195068", "0.44149804", "0.44127375", "0.44017988", "0.4399362", "0.43928543", "0.43851155", "0.43796247", "0.43662295", "0.4366148", "0.43606266", "0.43507338", "0.43415126", "0.43388283", "0.43352813", "0.4327793", "0.43268204", "0.43202633", "0.43198994", "0.43168724", "0.4315665", "0.4310806", "0.4306111", "0.43050277", "0.4300102", "0.42999434", "0.4299797", "0.42834008", "0.42791006", "0.42761245", "0.42722514", "0.42633873", "0.4260376", "0.42581895", "0.42576212", "0.42566574", "0.42534176", "0.42533997", "0.4249739", "0.4233049", "0.422774", "0.42195457", "0.4216342" ]
0.78468955
0
BoundServicesChange imports the currently bound services into the deployment. It takes a ServiceList, not just names, as it has to create/retrieve the associated service binding secrets. It further takes a set of the old services. This enables incremental modification of the deployment (add, remove affected, instead of wholsesale replacement).
func (a *Workload) BoundServicesChange(ctx context.Context, userName string, oldServices NameSet, newServices interfaces.ServiceList) error { app, err := Get(ctx, a.cluster, a.app) if err != nil { // Should not happen. Application was validated to exist // already somewhere by callers. return err } owner := metav1.OwnerReference{ APIVersion: app.GetAPIVersion(), Kind: app.GetKind(), Name: app.GetName(), UID: app.GetUID(), } bindings, err := ToBinds(ctx, newServices, a.app.Name, owner, userName) if err != nil { return err } // Create name-keyed maps from old/new slices for quick lookup and decision. No linear searches. new := map[string]struct{}{} for _, s := range newServices { new[s.Name()] = struct{}{} } // Read, modify and write the deployment return retry.RetryOnConflict(retry.DefaultRetry, func() error { // Retrieve the latest version of Deployment before attempting update // RetryOnConflict uses exponential backoff to avoid exhausting the apiserver deployment, err := a.Deployment(ctx) if err != nil { return err } // The action is done in multiple iterations over the deployment's volumes and volumemounts. // The first iteration over each determines removed services (in old, not in new). The second // iteration, over the new services now, adds all which are not in old, i.e. actually new. newVolumes := []corev1.Volume{} newMounts := []corev1.VolumeMount{} for _, volume := range deployment.Spec.Template.Spec.Volumes { _, hasold := oldServices[volume.Name] _, hasnew := new[volume.Name] // Note that volumes which are not in old are passed and kept. These are the volumes // not related to services. if hasold && !hasnew { continue } newVolumes = append(newVolumes, volume) } // TODO: Iterate over containers and find the one matching the app name for _, mount := range deployment.Spec.Template.Spec.Containers[0].VolumeMounts { _, hasold := oldServices[mount.Name] _, hasnew := new[mount.Name] // Note that volumes which are in not in old are passed and kept. These are the volumes // not related to services. if hasold && !hasnew { continue } newMounts = append(newMounts, mount) } for _, binding := range bindings { // Skip services which already exist if _, hasold := oldServices[binding.service]; hasold { continue } newVolumes = append(newVolumes, corev1.Volume{ Name: binding.service, VolumeSource: corev1.VolumeSource{ Secret: &corev1.SecretVolumeSource{ SecretName: binding.resource, }, }, }) newMounts = append(newMounts, corev1.VolumeMount{ Name: binding.service, ReadOnly: true, MountPath: fmt.Sprintf("/services/%s", binding.service), }) } // Write the changed set of mounts and volumes back to the deployment ... deployment.Spec.Template.Spec.Volumes = newVolumes deployment.Spec.Template.Spec.Containers[0].VolumeMounts = newMounts // ... and then the cluster. _, err = a.cluster.Kubectl.AppsV1().Deployments(a.app.Org).Update( ctx, deployment, metav1.UpdateOptions{}) return err }) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (service *Service) HandelBoundServices(d9SecurityGroupID, policyType string, boundService BoundServicesRequest) (*CloudSecurityGroupResponse, *http.Response, error) {\n\tv := new(CloudSecurityGroupResponse)\n\trelativeURL := fmt.Sprintf(\"%s/%s/%s/%s\", awsSgResourcePath, d9SecurityGroupID, awsSgResourceServices, policyType)\n\n\tresp, err := service.Client.NewRequestDo(\"POST\", relativeURL, nil, boundService, v)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn v, resp, nil\n}", "func (l *LoadBalancerEmulator) PatchServices() ([]string, error) {\n\treturn l.applyOnLBServices(l.updateService)\n}", "func (s *tprStorage) ListServiceBindings() ([]*scmodel.ServiceBinding, error) {\n\tl, err := s.watcher.GetResourceClient(watch.ServiceBinding, \"default\").List(&v1.ListOptions{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar ret []*scmodel.ServiceBinding\n\tfor _, i := range l.(*runtime.UnstructuredList).Items {\n\t\tvar tmp scmodel.ServiceBinding\n\t\terr := util.TPRObjectToSCObject(i, &tmp)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Failed to convert object: %v\\n\", err)\n\t\t\treturn nil, err\n\t\t}\n\t\tret = append(ret, &tmp)\n\t}\n\treturn ret, nil\n}", "func (c *cfService) ServiceBindings() ServiceBindings {\n\treturn newServiceBindingAPI(c.Client)\n}", "func (c *Config) AppendServices(newServices []*services.ServiceConfig) error {\n\tlog.Printf(\"Appending %d services.\\n\", len(newServices))\n\tif c.ServiceMap == nil {\n\t\tc.ServiceMap = make(map[string]*services.ServiceConfig)\n\t}\n\tfor _, s := range newServices {\n\t\tif _, found := c.ServiceMap[s.Name]; !found {\n\t\t\tc.ServiceMap[s.Name] = s\n\t\t\tc.Services = append(c.Services, *s)\n\t\t}\n\t}\n\treturn nil\n}", "func (m *BookingBusiness) SetServices(value []BookingServiceable)() {\n err := m.GetBackingStore().Set(\"services\", value)\n if err != nil {\n panic(err)\n }\n}", "func pushServices(ch services.ServicesChannel, definitions []Definition, adding bool) {\n\t// Iterate and convert the backend definitions into services\n\tfor _, definition := range definitions {\n\t\t// Attempt to convert the definition to a service request\n\t\tservice, err := definition.GetService()\n\t\tif err != nil {\n\t\t\tglog.Errorf(\"Failed to convert the definition: %s to a service, error: %s\", definition, err)\n\t\t\tcontinue\n\t\t}\n\n\t\tvar event services.ServiceEvent\n\t\tevent.Service = service\n\t\tevent.Action = services.SERVICE_REMOVAL\n\t\tif adding {\n\t\t\tevent.Action = services.SERVICE_REQUEST\n\t\t}\n\n\t\t// We perform this in a go-routine not to allow a receiver from blocking us\n\t\tgo func() {\n\t\t\tch <- event\n\t\t}()\n\t}\n}", "func (sdk *SDK) Bind(namespace, bindingName, externalID, instanceName, secretName string,\n\tparams interface{}, secrets map[string]string) (*v1beta1.ServiceBinding, error) {\n\n\t// Manually defaulting the name of the binding\n\t// I'm not doing the same for the secret since the API handles defaulting that value.\n\tif bindingName == \"\" {\n\t\tbindingName = instanceName\n\t}\n\n\trequest := &v1beta1.ServiceBinding{\n\t\tObjectMeta: v1.ObjectMeta{\n\t\t\tName: bindingName,\n\t\t\tNamespace: namespace,\n\t\t},\n\t\tSpec: v1beta1.ServiceBindingSpec{\n\t\t\tExternalID: externalID,\n\t\t\tServiceInstanceRef: v1beta1.LocalObjectReference{\n\t\t\t\tName: instanceName,\n\t\t\t},\n\t\t\tSecretName: secretName,\n\t\t\tParameters: BuildParameters(params),\n\t\t\tParametersFrom: BuildParametersFrom(secrets),\n\t\t},\n\t}\n\n\tresult, err := sdk.ServiceCatalog().ServiceBindings(namespace).Create(request)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"bind request failed (%s)\", err)\n\t}\n\n\treturn result, nil\n}", "func Bind(\n\thost string,\n\tport int,\n\tusername string,\n\tpassword string,\n\tinstanceID string,\n\tparams map[string]string,\n) (string, map[string]interface{}, error) {\n\tbindingID := uuid.NewV4().String()\n\turl := fmt.Sprintf(\n\t\t\"%s/v2/service_instances/%s/service_bindings/%s\",\n\t\tgetBaseURL(host, port),\n\t\tinstanceID,\n\t\tbindingID,\n\t)\n\tbindingRequest := &api.BindingRequest{\n\t\tParameters: params,\n\t}\n\tjson, err := bindingRequest.ToJSON()\n\tif err != nil {\n\t\treturn \"\", nil, fmt.Errorf(\"error encoding request body: %s\", err)\n\t}\n\treq, err := http.NewRequest(\n\t\thttp.MethodPut,\n\t\turl,\n\t\tbytes.NewBuffer(json),\n\t)\n\tif err != nil {\n\t\treturn \"\", nil, fmt.Errorf(\"error building request: %s\", err)\n\t}\n\tif username != \"\" || password != \"\" {\n\t\taddAuthHeader(req, username, password)\n\t}\n\thttpClient := &http.Client{}\n\tresp, err := httpClient.Do(req)\n\tif err != nil {\n\t\treturn \"\", nil, fmt.Errorf(\"error executing bind call: %s\", err)\n\t}\n\tbodyBytes, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn \"\", nil, fmt.Errorf(\"error reading response body: %s\", err)\n\t}\n\tdefer resp.Body.Close() // nolint: errcheck\n\tif resp.StatusCode != http.StatusCreated {\n\t\treturn \"\", nil, fmt.Errorf(\n\t\t\t\"unanticipated http response code %d\",\n\t\t\tresp.StatusCode,\n\t\t)\n\t}\n\tbindingResponse := &api.BindingResponse{}\n\terr = api.GetBindingResponseFromJSON(bodyBytes, bindingResponse)\n\tif err != nil {\n\t\treturn \"\", nil, fmt.Errorf(\"error unmarshaling response body: %s\", err)\n\t}\n\tcredsMap, ok := bindingResponse.Credentials.(map[string]interface{})\n\tif !ok {\n\t\treturn \"\", nil, fmt.Errorf(\"error unmarshaling response body: %s\", err)\n\t}\n\treturn bindingID, credsMap, nil\n}", "func (r *app) ListServiceBindings(appGUID string) ([]ServiceBinding, error) {\n\trawURL := fmt.Sprintf(\"/v2/apps/%s/service_bindings\", appGUID)\n\treq := rest.GetRequest(rawURL)\n\thttpReq, err := req.Build()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tpath := httpReq.URL.String()\n\tsb, err := listServiceBindingWithPath(r.client, path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn sb, nil\n}", "func (c *backingservices) List(opts kapi.ListOptions) (result *backingserviceapi.BackingServiceList, err error) {\n\tresult = &backingserviceapi.BackingServiceList{}\n\terr = c.r.Get().\n\t\tNamespace(c.ns).\n\t\tResource(\"backingservices\").\n\t\tVersionedParams(&opts, kapi.ParameterCodec).\n\t\tDo().\n\t\tInto(result)\n\treturn\n}", "func (b *AwsBroker) Bind(request *osb.BindRequest, c *broker.RequestContext) (*broker.BindResponse, error) {\n\tglog.V(10).Infof(\"request=%+v\", *request)\n\n\tbinding := &serviceinstance.ServiceBinding{\n\t\tID: request.BindingID,\n\t\tInstanceID: request.InstanceID,\n\t}\n\n\t// Get the binding params\n\tfor k, v := range request.Parameters {\n\t\tif strings.EqualFold(k, bindParamRoleName) {\n\t\t\tbinding.RoleName = paramValue(v)\n\t\t} else if strings.EqualFold(k, bindParamScope) {\n\t\t\tbinding.Scope = paramValue(v)\n\t\t} else {\n\t\t\tdesc := fmt.Sprintf(\"The parameter %s is not supported.\", k)\n\t\t\treturn nil, newHTTPStatusCodeError(http.StatusBadRequest, \"\", desc)\n\t\t}\n\t}\n\n\t// Verify that the binding doesn't already exist\n\tsb, err := b.db.DataStorePort.GetServiceBinding(binding.ID)\n\tif err != nil {\n\t\tdesc := fmt.Sprintf(\"Failed to get the service binding %s: %v\", binding.ID, err)\n\t\treturn nil, newHTTPStatusCodeError(http.StatusInternalServerError, \"\", desc)\n\t} else if sb != nil {\n\t\tif sb.Match(binding) {\n\t\t\tglog.Infof(\"Service binding %s already exists.\", binding.ID)\n\t\t\tresponse := broker.BindResponse{}\n\t\t\tresponse.Exists = true\n\t\t\treturn &response, nil\n\t\t}\n\t\tdesc := fmt.Sprintf(\"Service binding %s already exists but with different attributes.\", binding.ID)\n\t\treturn nil, newHTTPStatusCodeError(http.StatusConflict, \"\", desc)\n\t}\n\n\t// Get the service (this is only required because the USER_KEY_ID and\n\t// USER_SECRET_KEY credentials need to be prefixed with the service name for\n\t// backward compatibility)\n\tservice, err := b.db.DataStorePort.GetServiceDefinition(request.ServiceID)\n\tif err != nil {\n\t\tdesc := fmt.Sprintf(\"Failed to get the service %s: %v\", request.ServiceID, err)\n\t\treturn nil, newHTTPStatusCodeError(http.StatusInternalServerError, \"\", desc)\n\t} else if service == nil {\n\t\tdesc := fmt.Sprintf(\"The service %s was not found.\", request.ServiceID)\n\t\treturn nil, newHTTPStatusCodeError(http.StatusBadRequest, \"\", desc)\n\t}\n\n\t// Get the instance\n\tinstance, err := b.db.DataStorePort.GetServiceInstance(binding.InstanceID)\n\tif err != nil {\n\t\tdesc := fmt.Sprintf(\"Failed to get the service instance %s: %v\", binding.InstanceID, err)\n\t\treturn nil, newHTTPStatusCodeError(http.StatusInternalServerError, \"\", desc)\n\t} else if instance == nil {\n\t\tdesc := fmt.Sprintf(\"The service instance %s was not found.\", binding.InstanceID)\n\t\treturn nil, newHTTPStatusCodeError(http.StatusBadRequest, \"\", desc)\n\t}\n\n\tsess := b.GetSession(b.keyid, b.secretkey, b.region, b.accountId, b.profile, instance.Params)\n\n\t// Get the CFN stack outputs\n\tresp, err := b.Clients.NewCfn(sess).Client.DescribeStacks(&cloudformation.DescribeStacksInput{\n\t\tStackName: aws.String(instance.StackID),\n\t})\n\tif err != nil {\n\t\tdesc := fmt.Sprintf(\"Failed to describe the CloudFormation stack %s: %v\", instance.StackID, err)\n\t\treturn nil, newHTTPStatusCodeError(http.StatusInternalServerError, \"\", desc)\n\t}\n\n\t// Get the credentials from the CFN stack outputs\n\tcredentials, err := getCredentials(service, resp.Stacks[0].Outputs, b.Clients.NewSsm(sess))\n\tif err != nil {\n\t\tdesc := fmt.Sprintf(\"Failed to get the credentials from CloudFormation stack %s: %v\", instance.StackID, err)\n\t\treturn nil, newHTTPStatusCodeError(http.StatusInternalServerError, \"\", desc)\n\t}\n\n\tif binding.RoleName != \"\" {\n\t\tpolicyArn, err := getPolicyArn(resp.Stacks[0].Outputs, binding.Scope)\n\t\tif err != nil {\n\t\t\tdesc := fmt.Sprintf(\"The CloudFormation stack %s does not support binding with scope '%s': %v\", instance.StackID, binding.Scope, err)\n\t\t\treturn nil, newHTTPStatusCodeError(http.StatusBadRequest, \"\", desc)\n\t\t}\n\n\t\t// Attach the scoped policy to the role\n\t\t_, err = b.Clients.NewIam(sess).AttachRolePolicy(&iam.AttachRolePolicyInput{\n\t\t\tPolicyArn: aws.String(policyArn),\n\t\t\tRoleName: aws.String(binding.RoleName),\n\t\t})\n\t\tif err != nil {\n\t\t\tdesc := fmt.Sprintf(\"Failed to attach the policy %s to role %s: %v\", policyArn, binding.RoleName, err)\n\t\t\treturn nil, newHTTPStatusCodeError(http.StatusInternalServerError, \"\", desc)\n\t\t}\n\n\t\tbinding.PolicyArn = policyArn\n\t}\n\n\tif bindViaLambda(service) {\n\t\t// Copy instance and binding IDs into credentials to\n\t\t// be used as identifiers for resources we create in\n\t\t// lambda so that we can reference them when we unbind\n\t\t// (for example, you can build a unique path for an\n\t\t// IAM User with this information, and avoid the need\n\t\t// to have persist extra identifiers, or have users\n\t\t// provide them.\n\t\tcredentials[\"INSTANCE_ID\"] = binding.InstanceID\n\t\tcredentials[\"BINDING_ID\"] = binding.ID\n\n\t\t// Replace credentials with a derived set calculated by a lambda function\n\t\tcredentials, err = invokeLambdaBindFunc(sess, b.Clients.NewLambda, credentials, \"bind\")\n\t\tif err != nil {\n\t\t\treturn nil, newHTTPStatusCodeError(http.StatusInternalServerError, \"\", err.Error())\n\t\t}\n\t}\n\n\t// Store the binding\n\terr = b.db.DataStorePort.PutServiceBinding(*binding)\n\tif err != nil {\n\t\tdesc := fmt.Sprintf(\"Failed to store the service binding %s: %v\", binding.ID, err)\n\t\treturn nil, newHTTPStatusCodeError(http.StatusInternalServerError, \"\", desc)\n\t}\n\n\tb.metrics.Actions.With(\n\t\tprom.Labels{\n\t\t\t\"action\": \"bind\",\n\t\t\t\"service\": service.Name,\n\t\t\t\"plan\": \"\",\n\t\t}).Inc()\n\n\treturn &broker.BindResponse{\n\t\tBindResponse: osb.BindResponse{\n\t\t\tCredentials: credentials,\n\t\t},\n\t}, nil\n}", "func GetBindingsForService(storage Storage, serviceID string, t BindingDirection) ([]*model.ServiceBinding, error) {\n\tvar ret []*model.ServiceBinding\n\tbindings, err := storage.ListServiceBindings()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, b := range bindings {\n\t\tswitch t {\n\t\tcase Both:\n\t\t\tif b.From == serviceID || b.To == serviceID {\n\t\t\t\tret = append(ret, b)\n\t\t\t}\n\t\tcase From:\n\t\t\tif b.From == serviceID {\n\t\t\t\tret = append(ret, b)\n\t\t\t}\n\t\tcase To:\n\t\t\tif b.To == serviceID {\n\t\t\t\tret = append(ret, b)\n\t\t\t}\n\t\t}\n\t}\n\treturn ret, nil\n}", "func (am AppModule) RegisterServices(cfg module.Configurator) {\n}", "func SetServiceStatuses(svc k8sApiV1.Service, annotations map[string]string) {}", "func (r *Reconciler) bind(\n\tlogger *log.Log,\n\tbm *ServiceBinder,\n\tsbrStatus *v1alpha1.ServiceBindingRequestStatus,\n) (\n\treconcile.Result,\n\terror,\n) {\n\tlogger = logger.WithName(\"bind\")\n\n\tlogger.Info(\"Binding applications with intermediary secret...\")\n\treturn bm.Bind()\n}", "func (vm VcapServicesMap) Add(service VcapService) {\n\t// See the cloud-controller-ng source for the definition of how this is\n\t// built.\n\t// https://github.com/cloudfoundry/cloud_controller_ng/blob/65a75e6c97f49756df96e437e253f033415b2db1/app/presenters/system_environment/system_env_presenter.rb\n\tvm[service.Label] = append(vm[service.Label], service)\n}", "func loadBalancerServiceTagsModified(current, expected *corev1.Service) (bool, *corev1.Service) {\n\tignoredAnnotations := managedLoadBalancerServiceAnnotations.Union(sets.NewString(awsLBAdditionalResourceTags))\n\treturn loadBalancerServiceAnnotationsChanged(current, expected, ignoredAnnotations)\n}", "func (am AppModule) RegisterServices(cfg module.Configurator) {\n\ttypes.RegisterMsgServer(cfg.MsgServer(), keeper.NewMsgServerImpl(am.keeper))\n\ttypes.RegisterQueryServer(cfg.QueryServer(), keeper.NewQuerier(am.keeper))\n\n\tm := keeper.NewMigrator(am.keeper, am.legacySubspace)\n\tif err := cfg.RegisterMigration(types.ModuleName, 1, m.Migrate1to2); err != nil {\n\t\tpanic(fmt.Sprintf(\"failed to migrate x/%s from version 1 to 2: %v\", types.ModuleName, err))\n\t}\n\n\tif err := cfg.RegisterMigration(types.ModuleName, 2, m.Migrate2to3); err != nil {\n\t\tpanic(fmt.Sprintf(\"failed to migrate x/%s from version 2 to 3: %v\", types.ModuleName, err))\n\t}\n\n\tif err := cfg.RegisterMigration(types.ModuleName, 3, m.Migrate3to4); err != nil {\n\t\tpanic(fmt.Sprintf(\"failed to migrate x/%s from version 3 to 4: %v\", types.ModuleName, err))\n\t}\n}", "func (client *serviceManagerClient) ListBindings(q *Parameters) (*types.ServiceBindings, error) {\n\tbindings := &types.ServiceBindings{}\n\terr := client.list(&bindings.ServiceBindings, web.ServiceBindingsURL, q)\n\n\treturn bindings, err\n}", "func ListServicesOld() []string {\n\treturn dm.ListServicesOld()\n}", "func appendServiceFilter(lConfig []clientservingv1.ListConfig, client clientservingv1.KnServingClient, cmd *cobra.Command) ([]clientservingv1.ListConfig, error) {\n\tif !cmd.Flags().Changed(\"service\") {\n\t\treturn lConfig, nil\n\t}\n\n\tserviceName := cmd.Flag(\"service\").Value.String()\n\n\t// Verify that service exists first\n\t_, err := client.GetService(cmd.Context(), serviceName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn append(lConfig, clientservingv1.WithService(serviceName)), nil\n}", "func (c *kubernetesDeploymentManifest) Add(ctx context.Context, profile ServiceRequest, services []ServiceRequest, env map[string]string) error {\n\tspan, _ := apm.StartSpanOptions(ctx, \"Adding services to kubernetes deployment\", \"kubernetes.manifest.add-services\", apm.SpanOptions{\n\t\tParent: apm.SpanFromContext(ctx).TraceContext(),\n\t})\n\tspan.Context.SetLabel(\"profile\", profile)\n\tspan.Context.SetLabel(\"services\", services)\n\tdefer span.End()\n\n\tkubectl = cluster.Kubectl().WithNamespace(ctx, getNamespaceFromProfile(profile))\n\n\tfor _, service := range services {\n\t\t_, err := kubectl.Run(ctx, \"apply\", \"-k\", fmt.Sprintf(\"../../../cli/config/kubernetes/overlays/%s\", service.Name))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (h *handler) DeleteServiceBinding(sb *servicecatalog.Binding) error {\n\t// this logic to set and update the timestamp is TPR specific. to be moved to the API server\n\tdts := metav1.Now()\n\tsb.DeletionTimestamp = &dts\n\tif _, err := h.apiClient.Bindings(sb.Namespace).Update(sb); err != nil {\n\t\treturn err\n\t}\n\n\t// uninject\n\tif err := h.injector.Uninject(sb); err != nil {\n\t\t// if 0 conditions, uninject and drop condition for uninject\n\t\t// TODO: add failure condition (https://github.com/kubernetes-incubator/service-catalog/issues/305)\n\t\treturn err\n\t}\n\t// TODO: add success condition (https://github.com/kubernetes-incubator/service-catalog/issues/305)\n\tif _, err := h.apiClient.Bindings(sb.Namespace).Update(sb); err != nil {\n\t\treturn err\n\t}\n\n\t// TODO: unbind && add conditions (https://github.com/kubernetes-incubator/service-catalog/issues/305)\n\tif err := h.unbind(sb); err != nil {\n\t\t// TODO: add failure condition (https://github.com/kubernetes-incubator/service-catalog/issues/305)\n\t\treturn err\n\t}\n\t// TODO: add success condition (https://github.com/kubernetes-incubator/service-catalog/issues/305)\n\n\tif _, err := h.apiClient.Bindings(sb.Namespace).Update(sb); err != nil {\n\t\treturn err\n\t}\n\n\t// This is where the binding is _actually_ deleted after all necessary actions have been taken\n\tif err := h.apiClient.Bindings(sb.Namespace).Delete(sb.Name); err != nil {\n\t\t// TODO: add deletion error condition (https://github.com/kubernetes-incubator/service-catalog/issues/305)\n\t\treturn err\n\t}\n\treturn nil\n}", "func (k *k8sService) StartServices(svcs []string) error {\n\tlog.Infof(\"got call to start services %v\", svcs)\n\tdefer k.Unlock()\n\tk.Lock()\n\tif k.modCh == nil {\n\t\tlog.Error(\"Modules Channel is nil, returning\")\n\t\treturn nil\n\t}\n\tfor _, m := range svcs {\n\t\tmodule, ok := k8sModules[m]\n\t\tif !ok {\n\t\t\tlog.Errorf(\"cant find module for service %s to start\", m)\n\t\t\treturn fmt.Errorf(\"cant find module for service %s to start\", m)\n\t\t}\n\t\tmodule.Spec.Disabled = false\n\t\tk8sModules[m] = module\n\t\tk.modCh <- module\n\t\tlog.Infof(\"sent service %s to start\", m)\n\t}\n\treturn nil\n}", "func (scl *SimpleConfigurationLayer) SetServices(services *map[string]*ent.ServiceConfiguration) {\n\tscl.Services = services\n}", "func serviceMap(deps []corev1.Service) map[string]corev1.Service {\n\tm := map[string]corev1.Service{}\n\tfor _, d := range deps {\n\t\tm[fmt.Sprintf(\"%s.%s\", d.Namespace, d.Name)] = d\n\t}\n\treturn m\n}", "func (status *AppStatus) PropagateServiceInstanceBindingsStatus(bindings []ServiceInstanceBinding) {\n\t// Make sure binding sorting is deterministic.\n\tsort.Slice(bindings, func(i, j int) bool {\n\t\treturn bindings[i].Name < bindings[j].Name\n\t})\n\n\t// Gather binding names\n\tvar bindingNames []string\n\tfor _, binding := range bindings {\n\t\tbindingNames = append(bindingNames, binding.Status.BindingName)\n\t}\n\tstatus.ServiceBindingNames = bindingNames\n\n\t// Gather binding conditions\n\tvar conditionTypes []apis.ConditionType\n\tfor _, binding := range bindings {\n\t\tconditionType := serviceBindingConditionType(binding)\n\t\tconditionTypes = append(conditionTypes, conditionType)\n\t}\n\n\tduckStatus := &duckv1beta1.Status{}\n\tmanager := apis.NewLivingConditionSet(conditionTypes...).Manage(duckStatus)\n\tmanager.InitializeConditions()\n\n\tfor _, binding := range bindings {\n\t\tif binding.Generation != binding.Status.ObservedGeneration {\n\t\t\t// this binding's conditions are out of date.\n\t\t\tcontinue\n\t\t}\n\t\tfor _, cond := range binding.Status.Conditions {\n\t\t\tif cond.Type != ServiceInstanceBindingConditionReady {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tconditionType := serviceBindingConditionType(binding)\n\t\t\tswitch v1.ConditionStatus(cond.Status) {\n\t\t\tcase v1.ConditionTrue:\n\t\t\t\tmanager.MarkTrue(conditionType)\n\t\t\tcase v1.ConditionFalse:\n\t\t\t\tmanager.MarkFalse(conditionType, cond.Reason, cond.Message)\n\t\t\tcase v1.ConditionUnknown:\n\t\t\t\tmanager.MarkUnknown(conditionType, cond.Reason, cond.Message)\n\t\t\t}\n\t\t}\n\t}\n\n\t// if there are no bindings, set the happy condition to true\n\tif len(bindings) == 0 {\n\t\tmanager.MarkTrue(apis.ConditionReady)\n\t}\n\n\t// Copy Ready condition\n\tPropagateCondition(status.manage(), AppConditionServiceInstanceBindingsReady, manager.GetCondition(apis.ConditionReady))\n\tstatus.ServiceBindingConditions = duckStatus.Conditions\n}", "func (s *storage) UpdateServices(services []*pb.SyncService) {\n\tfor _, val := range services {\n\t\tdata, err := proto.Marshal(val)\n\t\tif err != nil {\n\t\t\tlog.Errorf(err, \"Proto marshal failed: %s\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\tupdateOp := putServiceOp(val.ServiceId, data)\n\t\t_, err = s.engine.Do(context.Background(), updateOp)\n\t\tif err != nil {\n\t\t\tlog.Errorf(err, \"Save service to etcd failed: %s\", err)\n\t\t}\n\t}\n}", "func BindingHandler(c *gin.Context) {\n\tinstanceID := c.Param(\"instanceID\")\n\n\treqBody, err := ioutil.ReadAll(c.Request.Body)\n\tif err != nil {\n\t\tlog.Panic(fmt.Sprintf(\"Unable to read request body %s. Body\", err))\n\t}\n\n\tvar serviceBinding model.ServiceBinding\n\terr = json.Unmarshal(reqBody, &serviceBinding)\n\tif err != nil {\n\t\tlog.Panic(fmt.Sprintf(\"Unable to unmarshal service binding request %s. Request Body: %s\", err, string(reqBody)))\n\t}\n\n\tvar planInfo = model.PlanID{}\n\terr = json.Unmarshal([]byte(serviceBinding.PlanID), &planInfo)\n\tif err != nil {\n\t\tlog.Panic(fmt.Sprintf(\"Unable to unmarshal PlanID: %s\", err))\n\t}\n\tserviceName := planInfo.LibsServiceName\n\n\tvolumeID, err := libstoragewrapper.GetVolumeID(NewLibsClient(), serviceName, instanceID)\n\tif err != nil {\n\t\tlog.Panic(fmt.Sprintf(\"Unable to find volume ID by instance Id: %s\", err))\n\t}\n\n\tvolumeName, err := utils.CreateNameForVolume(instanceID)\n\tif err != nil {\n\t\tlog.Panic(fmt.Sprintf(\"Unable to encode instanceID to volume Name %s\", err))\n\t}\n\n\tserviceBindingResp := model.CreateServiceBindingResponse{\n\t\tCredentials: model.CreateServiceBindingCredentials{\n\t\t\tDatabase: \"dummy\",\n\t\t\tHost: \"dummy\",\n\t\t\tPassword: \"dummy\",\n\t\t\tPort: 3306,\n\t\t\tURI: \"dummy\",\n\t\t\tUsername: \"dummy\",\n\t\t},\n\t\tVolumeMounts: []model.VolumeMount{\n\t\t\tmodel.VolumeMount{\n\t\t\t\t//should we be using volumeID?\n\t\t\t\tContainerPath: fmt.Sprintf(\"/var/vcap/store/%s\", volumeID),\n\t\t\t\tMode: \"rw\",\n\t\t\t\tPrivate: model.VolumeMountPrivateDetails{\n\t\t\t\t\tDriver: driverName,\n\t\t\t\t\tGroupId: volumeName,\n\t\t\t\t\tConfig: \"{\\\"broker\\\":\\\"specific_values\\\"}\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tc.JSON(http.StatusCreated, serviceBindingResp)\n}", "func (r *ResourcesController) syncTags() error {\n\tctx, cancel := context.WithTimeout(context.Background(), syncTagsTimeout)\n\tdefer cancel()\n\n\tsvcs, err := r.svcLister.List(labels.Everything())\n\tif err != err {\n\t\treturn fmt.Errorf(\"failed to list services: %s\", err)\n\t}\n\n\tvar lbSvcs []*corev1.Service\n\tfor _, svc := range svcs {\n\t\tif svc.Spec.Type == corev1.ServiceTypeLoadBalancer {\n\t\t\tlbSvcs = append(lbSvcs, svc)\n\t\t}\n\t}\n\n\tif len(lbSvcs) == 0 {\n\t\tklog.V(5).Info(\"No load-balancers to tag because no LoadBalancer-typed services exist\")\n\t\treturn nil\n\t}\n\n\tlbs, err := allLoadBalancerList(ctx, r.resources.gclient)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to list load-balancers: %s\", err)\n\t}\n\n\t// Collect tag resources for known load-balancers (i.e., services with\n\t// type=LoadBalancer that either have our own LB ID annotation set or go by\n\t// a matching name).\n\tvar res []godo.Resource\n\tfor _, svc := range lbSvcs {\n\t\tid := findLoadBalancerID(svc, lbs)\n\n\t\t// Load-balancers that have no LB ID set yet and were renamed directly\n\t\t// (e.g., via the cloud control panel) would still be missed, so check\n\t\t// again if we have found an ID.\n\t\tif id != \"\" {\n\t\t\tres = append(res, godo.Resource{\n\t\t\t\tID: id,\n\t\t\t\tType: godo.ResourceType(godo.LoadBalancerResourceType),\n\t\t\t})\n\t\t}\n\t}\n\n\tif len(res) == 0 {\n\t\treturn nil\n\t}\n\n\ttag := buildK8sTag(r.resources.clusterID)\n\t// Tag collected resources with the cluster ID. If the tag does not exist\n\t// (for reasons outlined below), we will create it and retry tagging again.\n\terr = r.tagResources(res)\n\tif _, ok := err.(tagMissingError); ok {\n\t\t// Cluster ID tag has not been created yet. This should have happen\n\t\t// when we set the tag on LB creation. For LBs that have been created\n\t\t// prior to CCM using cluster IDs, however, we need to create the tag\n\t\t// explicitly.\n\t\t_, _, err = r.resources.gclient.Tags.Create(ctx, &godo.TagCreateRequest{\n\t\t\tName: tag,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to create tag %q: %s\", tag, err)\n\t\t}\n\n\t\t// Try tagging again, which should not fail anymore due to a missing\n\t\t// tag.\n\t\terr = r.tagResources(res)\n\t}\n\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to tag LB resource(s) %v with tag %q: %s\", res, tag, err)\n\t}\n\n\treturn nil\n}", "func (t *Transformer) CreateServices(o *object.Service) ([]runtime.Object, error) {\n\tresult := []runtime.Object{}\n\n\tService := func() *api_v1.Service {\n\t\tserviceLabels := map[string]string(o.Labels)\n\t\treturn &api_v1.Service{\n\t\t\tObjectMeta: api_v1.ObjectMeta{\n\t\t\t\tName: o.Name,\n\t\t\t\tLabels: *util.MergeMaps(\n\t\t\t\t\t// The map containing `\"service\": o.Name` should always be\n\t\t\t\t\t// passed later to avoid being overridden by util.MergeMaps()\n\t\t\t\t\t&serviceLabels,\n\t\t\t\t\t&map[string]string{\n\t\t\t\t\t\t\"service\": o.Name,\n\t\t\t\t\t},\n\t\t\t\t),\n\t\t\t},\n\t\t\tSpec: api_v1.ServiceSpec{\n\t\t\t\tSelector: map[string]string{\n\t\t\t\t\t\"service\": o.Name,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t}\n\n\tis := Service()\n\tis.Spec.Type = api_v1.ServiceTypeClusterIP\n\n\tes := Service()\n\tes.Spec.Type = api_v1.ServiceTypeLoadBalancer\n\n\tfor _, c := range o.Containers {\n\t\t// We don't want to generate service if there are no ports to be mapped\n\t\tif len(c.Ports) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tfor _, p := range c.Ports {\n\t\t\tvar s *api_v1.Service\n\t\t\tswitch p.Type {\n\t\t\tcase object.PortType_Internal:\n\t\t\t\ts = is\n\t\t\tcase object.PortType_External:\n\t\t\t\ts = es\n\t\t\tdefault:\n\t\t\t\t// There is a mistake in our code; and in Golang because it doesn't have strongly typed enumerations :)\n\t\t\t\treturn result, fmt.Errorf(\"Internal error: unknown PortType %#v\", p.Type)\n\t\t\t}\n\n\t\t\ts.Spec.Ports = append(s.Spec.Ports, api_v1.ServicePort{\n\t\t\t\tName: fmt.Sprintf(\"port-%d\", p.Port.ServicePort),\n\t\t\t\tPort: int32(p.Port.ServicePort),\n\t\t\t\tTargetPort: intstr.FromInt(p.Port.ContainerPort),\n\t\t\t})\n\t\t}\n\t}\n\n\tif len(is.Spec.Ports) > 0 {\n\t\tresult = append(result, is)\n\t}\n\n\tif len(es.Spec.Ports) > 0 {\n\t\tresult = append(result, es)\n\t}\n\n\treturn result, nil\n}", "func (r *reconciler) updateLoadBalancerService(current, desired *corev1.Service, platform *configv1.PlatformStatus, deleteIfScopeChanged bool) (bool, error) {\n\t_, platformHasMutableScope := platformsWithMutableScope[platform.Type]\n\tif !platformHasMutableScope && deleteIfScopeChanged && !scopeEqual(current, desired, platform) {\n\t\tlog.Info(\"deleting and recreating the load balancer because its scope changed\", \"namespace\", desired.Namespace, \"name\", desired.Name)\n\t\tforeground := metav1.DeletePropagationForeground\n\t\tdeleteOptions := crclient.DeleteOptions{PropagationPolicy: &foreground}\n\t\tif err := r.deleteLoadBalancerService(current, &deleteOptions); err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tif err := r.createLoadBalancerService(desired); err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\treturn true, nil\n\t}\n\n\tchanged, updated := loadBalancerServiceChanged(current, desired)\n\tif !changed {\n\t\treturn false, nil\n\t}\n\t// Diff before updating because the client may mutate the object.\n\tdiff := cmp.Diff(current, updated, cmpopts.EquateEmpty())\n\tif err := r.client.Update(context.TODO(), updated); err != nil {\n\t\treturn false, err\n\t}\n\tlog.Info(\"updated load balancer service\", \"namespace\", updated.Namespace, \"name\", updated.Name, \"diff\", diff)\n\treturn true, nil\n}", "func (mon *Monitor) refreshCloudServices(ip string, port int) {\n\tlp := dist.NewLookupProxy(ip, port)\n\tservices, err := lp.List()\n\tif err != nil {\n\t\tlib.PrintlnError(\"Error at lookup. Error:\", err)\n\t}\n\n\terr = lp.Close()\n\tif err != nil {\n\t\tlib.PrintlnError(\"Error at closing lookup. Error:\", err)\n\t}\n\n\tfor _, cloudService := range mon.cloudServices {\n\t\tcloudService.Removed = true\n\t}\n\n\tfor _, service := range services {\n\t\t// If the service registred in NameServer is a CloudFunctions server\n\t\tif strings.Contains(service.ServiceName, mon.cloudFunctionsPattern) {\n\t\t\tfound := false\n\t\t\tfor _, cloudService := range mon.cloudServices {\n\t\t\t\tif cloudService.Aor.ServiceName == service.ServiceName {\n\t\t\t\t\tfound = true\n\t\t\t\t\tcloudService.Removed = false\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !found {\n\t\t\t\tnewCloudService := CloudService{}\n\t\t\t\tnewCloudService.Aor = service\n\t\t\t\tnewCloudService.Removed = false\n\t\t\t\tmon.cloudServices = append(mon.cloudServices, newCloudService)\n\t\t\t}\n\t\t}\n\t}\n}", "func BindService(w http.ResponseWriter, r *http.Request) {\n\tvar mappingName string\n\tif n, found := mux.Vars(r)[\"broker\"]; found {\n\t\tmappingName = n\n\t}\n\tbrokerMapping, err := store.GetMapping(mappingName)\n\tif err != nil {\n\t\tif err == store.ErrNotFound {\n\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t\tw.Write([]byte(fmt.Sprintf(\"Portcullis: Unrecognized Broker Route `%s`\", mappingName)))\n\t\t\treturn\n\t\t}\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tw.Write([]byte(\"Portcullis: Error while contacting backend store\"))\n\t}\n\tproxy, statuscode, err := preparePassthrough(r, brokerMapping)\n\n\tif err != nil {\n\t\tw.WriteHeader(statuscode)\n\t\tw.Write([]byte(err.Error()))\n\t\treturn\n\t}\n\n\tflavor, err := brokerMapping.BindConfig.CreateFlavor()\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tw.Write([]byte(err.Error()))\n\t}\n\t//set transport\n\tproxy.Transport = &BindTransport{\n\t\tFlavors: []bindparser.Flavor{flavor},\n\t}\n\tproxy.ServeHTTP(w, r)\n}", "func (c *Completer) Services() {\n\tfor _, svc := range di.GetConfig().GetServices() {\n\t\tc.complete(svc.Domain())\n\t}\n}", "func TestChangeServiceType(t *testing.T) {\n\tinitDone := make(chan struct{})\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tmkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"}),\n\t}, true, true, func() {\n\t\tclose(initDone)\n\t})\n\n\t// This existing ClusterIP service should be ignored\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-a\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceAUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeClusterIP,\n\t\t\t},\n\t\t},\n\t)\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tt.Error(\"No service updates expected\")\n\n\t\treturn false\n\t}, 100*time.Millisecond)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\t<-initDone\n\n\tawait.Block()\n\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\tvar assignedIP string\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\tif net.ParseIP(svc.Status.LoadBalancer.Ingress[0].IP).To4() == nil {\n\t\t\tt.Error(\"Expected service to receive a IPv4 address\")\n\t\t\treturn true\n\t\t}\n\n\t\tassignedIP = svc.Status.LoadBalancer.Ingress[0].IP\n\n\t\tif len(svc.Status.Conditions) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one condition\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.Conditions[0].Type != ciliumSvcRequestSatisfiedCondition {\n\t\t\tt.Error(\"Expected condition to be svc-satisfied:true\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.Conditions[0].Status != slim_meta_v1.ConditionTrue {\n\t\t\tt.Error(\"Expected condition to be svc-satisfied:true\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tupdatedService := &slim_core_v1.Service{\n\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\tName: \"service-a\",\n\t\t\tNamespace: \"default\",\n\t\t\tUID: serviceAUID,\n\t\t},\n\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t},\n\t}\n\n\t_, err := fixture.svcClient.Services(\"default\").Update(context.Background(), updatedService, meta_v1.UpdateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 0 {\n\t\t\tt.Error(\"Expected service to have no ingress IPs\")\n\t\t\treturn true\n\t\t}\n\n\t\tif len(svc.Status.Conditions) != 0 {\n\t\t\tt.Error(\"Expected service to have no conditions\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tupdatedService = &slim_core_v1.Service{\n\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\tName: \"service-a\",\n\t\t\tNamespace: \"default\",\n\t\t\tUID: serviceAUID,\n\t\t},\n\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\tType: slim_core_v1.ServiceTypeNodePort,\n\t\t},\n\t}\n\n\t_, err = fixture.svcClient.Services(\"default\").Update(context.Background(), updatedService, meta_v1.UpdateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n\n\tif fixture.lbIPAM.rangesStore.ranges[0].allocRange.Has(net.ParseIP(assignedIP)) {\n\t\tt.Fatal(\"Expected assigned IP to be released\")\n\t}\n}", "func loadBalancerServiceChanged(current, expected *corev1.Service) (bool, *corev1.Service) {\n\t// Preserve most fields and annotations. If a new release of the\n\t// operator starts managing an annotation or spec field that it\n\t// previously ignored, it could stomp user changes when the user\n\t// upgrades the operator to the new release (see\n\t// <https://bugzilla.redhat.com/show_bug.cgi?id=1905490>). In order to\n\t// avoid problems, make sure the previous release blocks upgrades when\n\t// the user has modified an annotation or spec field that the new\n\t// release manages.\n\tchanged, updated := loadBalancerServiceAnnotationsChanged(current, expected, managedLoadBalancerServiceAnnotations)\n\n\t// If spec.loadBalancerSourceRanges is nonempty on the service, that\n\t// means that allowedSourceRanges is nonempty on the ingresscontroller,\n\t// which means we can clear the annotation if it's set and overwrite the\n\t// value in the current service.\n\tif len(expected.Spec.LoadBalancerSourceRanges) != 0 {\n\t\tif _, ok := current.Annotations[corev1.AnnotationLoadBalancerSourceRangesKey]; ok {\n\t\t\tif !changed {\n\t\t\t\tchanged = true\n\t\t\t\tupdated = current.DeepCopy()\n\t\t\t}\n\t\t\tdelete(updated.Annotations, corev1.AnnotationLoadBalancerSourceRangesKey)\n\t\t}\n\t\tif !reflect.DeepEqual(current.Spec.LoadBalancerSourceRanges, expected.Spec.LoadBalancerSourceRanges) {\n\t\t\tif !changed {\n\t\t\t\tchanged = true\n\t\t\t\tupdated = current.DeepCopy()\n\t\t\t}\n\t\t\tupdated.Spec.LoadBalancerSourceRanges = expected.Spec.LoadBalancerSourceRanges\n\t\t}\n\t}\n\n\treturn changed, updated\n}", "func (c *HAProxyController) processEndpointsSrvs(oldEndpoints, newEndpoints *store.Endpoints) {\n\t// Compare new Endpoints with old Endpoints Addresses and sync HAProxySrvs\n\t// Also by the end we will have a temporary array holding available HAProxysrv slots\n\tavailable := []*store.HAProxySrv{}\n\tnewEndpoints.HAProxySrvs = oldEndpoints.HAProxySrvs\n\tfor _, srv := range newEndpoints.HAProxySrvs {\n\t\tif _, ok := newEndpoints.Addresses[srv.IP]; !ok {\n\t\t\tavailable = append(available, srv)\n\t\t\tif !srv.Disabled {\n\t\t\t\tsrv.IP = \"127.0.0.1\"\n\t\t\t\tsrv.Disabled = true\n\t\t\t\tsrv.Modified = true\n\t\t\t}\n\t\t}\n\t}\n\t// Check available HAProxySrvs to add new Addresses\n\tavailableIdx := len(available) - 1\n\tfor newAdr := range newEndpoints.Addresses {\n\t\tif availableIdx < 0 {\n\t\t\tbreak\n\t\t}\n\t\tif _, ok := oldEndpoints.Addresses[newAdr]; !ok {\n\t\t\tsrv := available[availableIdx]\n\t\t\tsrv.IP = newAdr\n\t\t\tsrv.Disabled = false\n\t\t\tsrv.Modified = true\n\t\t\tavailable = available[:availableIdx]\n\t\t\tavailableIdx--\n\t\t}\n\t}\n\t// Dynamically updates HAProxy backend servers with HAProxySrvs content\n\tfor srvName, srv := range newEndpoints.HAProxySrvs {\n\t\tif srv.Modified {\n\t\t\tif newEndpoints.BackendName == \"\" {\n\t\t\t\tlogger.Errorf(\"No backend Name for endpoints of service `%s` \", newEndpoints.Service.Value)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tlogger.Error(c.Client.SetServerAddr(newEndpoints.BackendName, srvName, srv.IP, 0))\n\t\t\tstatus := \"ready\"\n\t\t\tif srv.Disabled {\n\t\t\t\tstatus = \"maint\"\n\t\t\t}\n\t\t\tlogger.Debugf(\"server '%s/%s' changed status to %v\", newEndpoints.BackendName, srvName, status)\n\t\t\tlogger.Error(c.Client.SetServerState(newEndpoints.BackendName, srvName, status))\n\t\t}\n\t}\n}", "func UpdateServices(b *RDSBroker) error {\n\tcontext := context.Background()\n\n\tparameters := UpdateParameters{\n\t\tApplyImmediately: true,\n\t}\n\n\tparametersJSON, _ := json.Marshal(parameters)\n\n\tupdate := func(instanceID string, details ServiceDetails) error {\n\n\t\td := brokerapi.UpdateDetails{\n\t\t\tServiceID: details.ServiceID,\n\t\t\tPlanID: details.PlanID,\n\t\t\tRawParameters: parametersJSON,\n\t\t\tPreviousValues: brokerapi.PreviousValues{\n\t\t\t\tPlanID: details.PlanID,\n\t\t\t\tServiceID: details.ServiceID,\n\t\t\t\tOrgID: details.OrgID,\n\t\t\t\tSpaceID: details.SpaceID,\n\t\t\t},\n\t\t}\n\t\t_, err := b.Update(context, instanceID, d, true)\n\n\t\treturn err\n\t}\n\n\terr := b.BulkUpdate(context, update)\n\treturn err\n}", "func filterLoadBalancerServices(services []corev1.Service) []corev1.Service {\n\tout := []corev1.Service{}\n\tfor _, service := range services {\n\t\tif service.Spec.Type == corev1.ServiceTypeLoadBalancer {\n\t\t\tout = append(out, service)\n\t\t}\n\t}\n\treturn out\n}", "func (mm *Manager) ListAppServiceFromStatefulSet(ns, svcname string) ([]*svcclient.AppService, error) {\n\tstsSvc, err := mm.svcLister.BcsServices(ns).Get(svcname)\n\tif err != nil {\n\t\tblog.Warnf(\"get bcsService %s/%s failed, err %s\", svcname, ns, err.Error())\n\t\treturn nil, nil\n\t}\n\tif len(stsSvc.Spec.Spec.Selector) == 0 {\n\t\tblog.Warnf(\"selector of bcsService %s/%s is empty, err %s\", stsSvc.GetName(),\n\t\t\tstsSvc.GetNamespace(), err.Error())\n\t\treturn nil, nil\n\t}\n\tselector := labels.Set(stsSvc.Spec.Spec.Selector).AsSelector()\n\ttaskgroups, err := mm.taskgroupLister.TaskGroups(ns).List(selector)\n\tif err != nil {\n\t\tblog.Warnf(\"list taskgroups by selector %s failed, err %s\", selector.String(), err.Error())\n\t\treturn nil, nil\n\t}\n\tsortTaskgroups(taskgroups)\n\tappSvcList, err := convertTaskgroupsToAppServices(taskgroups)\n\tif err != nil {\n\t\tblog.Warnf(\"convert taskgroups to AppService failed, err %s\", err.Error())\n\t\treturn nil, nil\n\t}\n\treturn appSvcList, nil\n}", "func (c *Reconciler) reconcilePlaceholderServices(\n\tctx context.Context, r *v1.Route,\n) error {\n\trecorder := controller.GetEventRecorder(ctx)\n\n\tvar targets []string\n\tfor _, traffic := range r.Status.Traffic {\n\t\ttarget, err := resources.HostnameFromTemplate(ctx, r.Name, traffic.Tag)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ttargets = append(targets, target)\n\t}\n\n\tfor _, target := range targets {\n\t\tsvc, err := c.serviceLister.Services(r.Namespace).Get(target)\n\t\tif apierrs.IsNotFound(err) {\n\t\t\tdesired, err := resources.MakePlaceholderService(ctx, r, target)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tsvc, err = c.kubeclient.CoreV1().Services(desired.Namespace).Create(ctx, desired, metav1.CreateOptions{})\n\t\t\tif err != nil {\n\t\t\t\trecorder.Eventf(r, corev1.EventTypeWarning, \"CreationFailed\", \"Failed to create Service: %v\", err)\n\t\t\t\treturn fmt.Errorf(\"failed to create Service: %w\", err)\n\t\t\t}\n\t\t\trecorder.Eventf(r, corev1.EventTypeNormal, \"Created\", \"Created HTTPRoute %q\", svc.GetName())\n\t\t\treturn nil\n\t\t} else if err != nil {\n\t\t\treturn err\n\t\t} else {\n\t\t\tdesired, err := resources.MakePlaceholderService(ctx, r, target)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif !equality.Semantic.DeepEqual(svc.Spec, desired.Spec) ||\n\t\t\t\t!equality.Semantic.DeepEqual(svc.Annotations, desired.Annotations) ||\n\t\t\t\t!equality.Semantic.DeepEqual(svc.Labels, desired.Labels) {\n\n\t\t\t\t// Don't modify the informers copy.\n\t\t\t\torigin := svc.DeepCopy()\n\t\t\t\torigin.Spec = desired.Spec\n\t\t\t\torigin.Annotations = desired.Annotations\n\t\t\t\torigin.Labels = desired.Labels\n\n\t\t\t\t_, err := c.kubeclient.CoreV1().Services(origin.Namespace).Update(\n\t\t\t\t\tctx, origin, metav1.UpdateOptions{})\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"failed to update Service: %w\", err)\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func (c *controller) injectBindingIntoInstance(ID string) error {\n\tfromSI, err := c.storage.GetServiceInstance(defaultNamespace, ID)\n\tif err == nil && fromSI != nil {\n\t\t// Update the Service Instance with the new bindings\n\t\tlog.Printf(\"Found existing FROM Service: %s, should update it\", fromSI.Name)\n\t\terr = c.updateServiceInstance(fromSI)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Failed to update existing FROM service %s : %v\", fromSI.Name, err)\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func ServiceAccountBindComputedVariables() []varcontext.DefaultVariable {\n\treturn []varcontext.DefaultVariable{\n\t\t// XXX names are truncated to 20 characters because of a bug in the IAM service\n\t\t{Name: \"service_account_name\", Default: `${str.truncate(20, \"pcf-binding-${request.binding_id}\")}`, Overwrite: true},\n\t\t{Name: \"service_account_display_name\", Default: \"${service_account_name}\", Overwrite: true},\n\t}\n}", "func (c *HAProxyController) handleBinds() (err error) {\n\tvar errors utils.Errors\n\tfrontends := make(map[string]int64, 2)\n\tprotos := make(map[string]string, 2)\n\tif !c.osArgs.DisableHTTP {\n\t\tfrontends[c.Cfg.FrontHTTP] = c.osArgs.HTTPBindPort\n\t}\n\tif !c.osArgs.DisableHTTPS {\n\t\tfrontends[c.Cfg.FrontHTTPS] = c.osArgs.HTTPSBindPort\n\t}\n\tif !c.osArgs.DisableIPV4 {\n\t\tprotos[\"v4\"] = c.osArgs.IPV4BindAddr\n\t}\n\tif !c.osArgs.DisableIPV6 {\n\t\tprotos[\"v6\"] = c.osArgs.IPV6BindAddr\n\n\t\t// IPv6 not disabled, so add v6 listening to stats frontend\n\t\terrors.Add(c.Client.FrontendBindCreate(\"stats\",\n\t\t\tmodels.Bind{\n\t\t\t\tName: \"v6\",\n\t\t\t\tAddress: \":::1024\",\n\t\t\t\tV4v6: false,\n\t\t\t}))\n\t}\n\tfor ftName, ftPort := range frontends {\n\t\tfor proto, addr := range protos {\n\t\t\tbind := models.Bind{\n\t\t\t\tName: proto,\n\t\t\t\tAddress: addr,\n\t\t\t\tPort: utils.PtrInt64(ftPort),\n\t\t\t}\n\t\t\tif err = c.Client.FrontendBindEdit(ftName, bind); err != nil {\n\t\t\t\terrors.Add(c.Client.FrontendBindCreate(ftName, bind))\n\t\t\t}\n\t\t}\n\t}\n\treturn errors.Result()\n}", "func (h *httpHandler) CreateServiceBinding(w http.ResponseWriter, r *http.Request) {\n\tvar req scmodel.CreateServiceBindingRequest\n\terr := util.BodyToObject(r, &req)\n\tif err != nil {\n\t\tlog.Printf(\"Error unmarshaling: %#v\\n\", err)\n\t\tutil.WriteResponse(w, 400, err)\n\t\treturn\n\t}\n\n\tbinding := scmodel.ServiceBinding{\n\t\tName: req.Name,\n\t\tFrom: req.From,\n\t\tTo: req.To,\n\t\tParameters: req.Parameters,\n\t}\n\n\terr = h.k8sStorage.AddServiceBinding(&binding, nil)\n\tif err != nil {\n\t\tlog.Printf(\"Error creating a service binding %s: %v\\n\", req.Name, err)\n\t\tutil.WriteResponse(w, 400, err)\n\t\treturn\n\t}\n\tutil.WriteResponse(w, 200, binding)\n}", "func (n *ns1) setServices(services map[string]service) {\n\tn.lock.Lock()\n\tn.services = services\n\tn.lock.Unlock()\n}", "func ForService(existing, desired []corev1.Service) k8s.Objects {\n\tvar update []client.Object\n\tmdelete := serviceMap(existing)\n\tmcreate := serviceMap(desired)\n\n\tfor k, v := range mcreate {\n\t\tif t, ok := mdelete[k]; ok {\n\t\t\tdiff := cmp.Diff(t, v, ignore(ignoredServiceFields...))\n\t\t\tif diff != \"\" {\n\t\t\t\ttp := t.DeepCopy()\n\n\t\t\t\tif v.Spec.ClusterIP == \"\" && len(tp.Spec.ClusterIP) > 0 {\n\t\t\t\t\tv.Spec.ClusterIP = tp.Spec.ClusterIP\n\t\t\t\t}\n\n\t\t\t\ttp.Spec = v.Spec\n\t\t\t\ttp.ObjectMeta.OwnerReferences = v.ObjectMeta.OwnerReferences\n\t\t\t\tfor k, v := range v.ObjectMeta.Annotations {\n\t\t\t\t\ttp.ObjectMeta.Annotations[k] = v\n\t\t\t\t}\n\n\t\t\t\tfor k, v := range v.ObjectMeta.Labels {\n\t\t\t\t\ttp.ObjectMeta.Labels[k] = v\n\t\t\t\t}\n\n\t\t\t\tupdate = append(update, tp)\n\t\t\t}\n\t\t\tdelete(mcreate, k)\n\t\t\tdelete(mdelete, k)\n\t\t}\n\t}\n\n\treturn &Object{\n\t\tCreate: serviceList(mcreate),\n\t\tUpdate: update,\n\t\tDelete: serviceList(mdelete),\n\t}\n}", "func LoadServicesFromLocal(serviceListFile string) []*Service {\n\tvar serviceList []*Service\n\tok, err := util.IsDir(serviceListFile)\n\tif err != nil {\n\t\tlogrus.Errorf(\"read service config file error,%s\", err.Error())\n\t\treturn nil\n\t}\n\tif !ok {\n\t\tservices, err := loadServicesFromFile(serviceListFile)\n\t\tif err != nil {\n\t\t\tlogrus.Errorf(\"read service config file %s error,%s\", serviceListFile, err.Error())\n\t\t\treturn nil\n\t\t}\n\t\treturn services.Services\n\t}\n\tfilepath.Walk(serviceListFile, func(path string, info os.FileInfo, err error) error {\n\t\tif strings.HasSuffix(path, \"yaml\") && !info.IsDir() {\n\t\t\tservices, err := loadServicesFromFile(path)\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Errorf(\"read service config file %s error,%s\", path, err.Error())\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tserviceList = append(serviceList, services.Services...)\n\t\t}\n\t\treturn nil\n\t})\n\tresult := removeRepByLoop(serviceList)\n\tlogrus.Infof(\"load service config file success. load %d service\", len(result))\n\treturn result\n}", "func newServiceForCR(cr *interviewv1alpha1.Minecraft) *corev1.Service {\n\tlabels := map[string]string{\n\t\t\"app\": cr.Name,\n\t}\n\n\treturn &corev1.Service{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: cr.Name + \"-lb-service\",\n\t\t\tNamespace: cr.Namespace,\n\t\t\tLabels: labels,\n\t\t},\n\t\tSpec: corev1.ServiceSpec{\n\t\t\tPorts: []corev1.ServicePort{\n\t\t\t\t{\n\t\t\t\t\tPort: 25565,\n\t\t\t\t},\n\t\t\t},\n\t\t\tType: corev1.ServiceTypeLoadBalancer,\n\t\t\tSelector: labels,\n\t\t},\n\t}\n}", "func bindBindings(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {\n\tparsed, err := abi.JSON(strings.NewReader(BindingsABI))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil\n}", "func boundIPs(c *caddy.Controller) (ips []net.IP) {\n\tconf := dnsserver.GetConfig(c)\n\thosts := conf.ListenHosts\n\tif hosts == nil || hosts[0] == \"\" {\n\t\thosts = nil\n\t\taddrs, err := net.InterfaceAddrs()\n\t\tif err != nil {\n\t\t\treturn nil\n\t\t}\n\t\tfor _, addr := range addrs {\n\t\t\thosts = append(hosts, addr.String())\n\t\t}\n\t}\n\tfor _, host := range hosts {\n\t\tip, _, _ := net.ParseCIDR(host)\n\t\tip4 := ip.To4()\n\t\tif ip4 != nil && !ip4.IsLoopback() {\n\t\t\tips = append(ips, ip4)\n\t\t\tcontinue\n\t\t}\n\t\tip6 := ip.To16()\n\t\tif ip6 != nil && !ip6.IsLoopback() {\n\t\t\tips = append(ips, ip6)\n\t\t}\n\t}\n\treturn ips\n}", "func AddService(existingServices map[string]model.Service, flagAdvanced bool, flagForce bool, modeGenerate bool) (service model.Service, serviceName string, existingServiceNames []string) {\n\t// Get names of existing services\n\tfor name := range existingServices {\n\t\texistingServiceNames = append(existingServiceNames, name)\n\t}\n\n\t// Ask if the image should be built from source\n\tbuild, buildPath, registry := askBuildFromSource()\n\n\t// Ask for image\n\timageName := askForImage(build)\n\n\t// Search for remote image and check manifest\n\tif !build && !flagForce {\n\t\tsearchRemoteImage(registry, imageName)\n\t}\n\n\t// Ask for service name\n\tserviceName = askForServiceName(existingServices, imageName)\n\n\t// Ask for container name\n\tcontainerName := serviceName\n\tif flagAdvanced {\n\t\tcontainerName = askForContainerName(serviceName)\n\t}\n\n\t// Ask for volumes\n\tvolumes := askForVolumes(flagAdvanced)\n\n\t// Ask for networks\n\tnetworks := askForNetworks()\n\n\t// Ask for ports\n\tports := askForPorts()\n\n\t// Ask for env files\n\tenvFiles := askForEnvFiles()\n\n\t// Ask for env variables\n\tenvVariables := []string{}\n\tif len(envFiles) == 0 {\n\t\tenvVariables = askForEnvVariables()\n\t}\n\n\t// Ask for services, the new one should depend on\n\tvar dependsServices []string\n\tif !modeGenerate {\n\t\tdependsServices = askForDependsOn(util.RemoveStringFromSlice(existingServiceNames, serviceName))\n\t}\n\n\t// Ask for restart mode\n\trestartValue := askForRestart(flagAdvanced)\n\n\t// Build service object\n\tservice = model.Service{\n\t\tBuild: buildPath,\n\t\tImage: registry + imageName,\n\t\tContainerName: containerName,\n\t\tVolumes: volumes,\n\t\tNetworks: networks,\n\t\tPorts: ports,\n\t\tRestart: restartValue,\n\t\tDependsOn: dependsServices,\n\t\tEnvFile: envFiles,\n\t\tEnvironment: envVariables,\n\t}\n\treturn\n}", "func (p *BlackduckPatcher) patchExposeService() error {\n\t// TODO use contansts\n\tid := fmt.Sprintf(\"Service.%s-blackduck-webserver-exposed\", p.blackDuckCr.Name)\n\truntimeObject, ok := p.mapOfUniqueIdToBaseRuntimeObject[id]\n\tif !ok {\n\t\treturn nil\n\t}\n\n\tswitch strings.ToUpper(p.blackDuckCr.Spec.ExposeService) {\n\tcase \"LOADBALANCER\":\n\t\truntimeObject.(*corev1.Service).Spec.Type = corev1.ServiceTypeLoadBalancer\n\tcase \"NODEPORT\":\n\t\truntimeObject.(*corev1.Service).Spec.Type = corev1.ServiceTypeNodePort\n\tdefault:\n\t\tdelete(p.mapOfUniqueIdToBaseRuntimeObject, id)\n\t}\n\n\t// TODO add openhift route\n\n\treturn nil\n}", "func (ipvsc *ipvsControllerController) getServices(cfgMap *apiv1.ConfigMap) []vip {\n\tsvcs := []vip{}\n\n\t// k -> IP to use\n\t// v -> <namespace>/<service name>:<lvs method>\n\tfor externalIP, nsSvcLvs := range cfgMap.Data {\n\t\tif nsSvcLvs == \"\" {\n\t\t\t// if target is empty string we will not forward to any service but\n\t\t\t// instead just configure the IP on the machine and let it up to\n\t\t\t// another Pod or daemon to bind to the IP address\n\t\t\tsvcs = append(svcs, vip{\n\t\t\t\tName: \"\",\n\t\t\t\tIP: externalIP,\n\t\t\t\tPort: 0,\n\t\t\t\tLVSMethod: \"VIP\",\n\t\t\t\tBackends: nil,\n\t\t\t\tProtocol: \"TCP\",\n\t\t\t})\n\t\t\tglog.V(2).Infof(\"Adding VIP only service: %v\", externalIP)\n\t\t\tcontinue\n\t\t}\n\n\t\tns, svc, lvsm, err := parseNsSvcLVS(nsSvcLvs)\n\t\tif err != nil {\n\t\t\tglog.Warningf(\"%v\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\tnsSvc := fmt.Sprintf(\"%v/%v\", ns, svc)\n\t\tsvcObj, svcExists, err := ipvsc.svcLister.Store.GetByKey(nsSvc)\n\t\tif err != nil {\n\t\t\tglog.Warningf(\"error getting service %v: %v\", nsSvc, err)\n\t\t\tcontinue\n\t\t}\n\n\t\tif !svcExists {\n\t\t\tglog.Warningf(\"service %v not found\", nsSvc)\n\t\t\tcontinue\n\t\t}\n\n\t\ts := svcObj.(*apiv1.Service)\n\t\tfor _, servicePort := range s.Spec.Ports {\n\t\t\tep := ipvsc.getEndpoints(s, &servicePort)\n\t\t\tif len(ep) == 0 {\n\t\t\t\tglog.Warningf(\"no endpoints found for service %v, port %+v\", s.Name, servicePort)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tsort.Sort(serviceByIPPort(ep))\n\n\t\t\tsvcs = append(svcs, vip{\n\t\t\t\tName: fmt.Sprintf(\"%v-%v\", s.Namespace, s.Name),\n\t\t\t\tIP: externalIP,\n\t\t\t\tPort: int(servicePort.Port),\n\t\t\t\tLVSMethod: lvsm,\n\t\t\t\tBackends: ep,\n\t\t\t\tProtocol: fmt.Sprintf(\"%v\", servicePort.Protocol),\n\t\t\t})\n\t\t\tglog.V(2).Infof(\"found service: %v:%v\", s.Name, servicePort.Port)\n\t\t}\n\t}\n\n\tsort.Sort(vipByNameIPPort(svcs))\n\n\treturn svcs\n}", "func (r *reconciler) ensureLoadBalancerService(ci *operatorv1.IngressController, deploymentRef metav1.OwnerReference, platformStatus *configv1.PlatformStatus) (bool, *corev1.Service, error) {\n\twantLBS, desiredLBService, err := desiredLoadBalancerService(ci, deploymentRef, platformStatus)\n\tif err != nil {\n\t\treturn false, nil, err\n\t}\n\n\thaveLBS, currentLBService, err := r.currentLoadBalancerService(ci)\n\tif err != nil {\n\t\treturn false, nil, err\n\t}\n\n\t// BZ2054200: Don't modify/delete services that are not directly owned by this controller.\n\townLBS := isServiceOwnedByIngressController(currentLBService, ci)\n\n\tswitch {\n\tcase !wantLBS && !haveLBS:\n\t\treturn false, nil, nil\n\tcase !wantLBS && haveLBS:\n\t\tif !ownLBS {\n\t\t\treturn false, nil, fmt.Errorf(\"a conflicting load balancer service exists that is not owned by the ingress controller: %s\", controller.LoadBalancerServiceName(ci))\n\t\t}\n\t\tif err := r.deleteLoadBalancerService(currentLBService, &crclient.DeleteOptions{}); err != nil {\n\t\t\treturn true, currentLBService, err\n\t\t}\n\t\treturn false, nil, nil\n\tcase wantLBS && !haveLBS:\n\t\tif err := r.createLoadBalancerService(desiredLBService); err != nil {\n\t\t\treturn false, nil, err\n\t\t}\n\t\treturn r.currentLoadBalancerService(ci)\n\tcase wantLBS && haveLBS:\n\t\tif !ownLBS {\n\t\t\treturn false, nil, fmt.Errorf(\"a conflicting load balancer service exists that is not owned by the ingress controller: %s\", controller.LoadBalancerServiceName(ci))\n\t\t}\n\t\tif updated, err := r.normalizeLoadBalancerServiceAnnotations(currentLBService); err != nil {\n\t\t\treturn true, currentLBService, fmt.Errorf(\"failed to normalize annotations for load balancer service: %w\", err)\n\t\t} else if updated {\n\t\t\thaveLBS, currentLBService, err = r.currentLoadBalancerService(ci)\n\t\t\tif err != nil {\n\t\t\t\treturn haveLBS, currentLBService, err\n\t\t\t}\n\t\t}\n\t\tdeleteIfScopeChanged := false\n\t\tif _, ok := ci.Annotations[autoDeleteLoadBalancerAnnotation]; ok {\n\t\t\tdeleteIfScopeChanged = true\n\t\t}\n\t\tif updated, err := r.updateLoadBalancerService(currentLBService, desiredLBService, platformStatus, deleteIfScopeChanged); err != nil {\n\t\t\treturn true, currentLBService, fmt.Errorf(\"failed to update load balancer service: %v\", err)\n\t\t} else if updated {\n\t\t\treturn r.currentLoadBalancerService(ci)\n\t\t}\n\t}\n\treturn true, currentLBService, nil\n}", "func (r *NuxeoReconciler) configureBackingServices(instance *v1alpha1.Nuxeo, dep *appsv1.Deployment) (string, error) {\n\tnuxeoConf := \"\"\n\tfor idx, backingService := range instance.Spec.BackingServices {\n\t\tvar err error\n\t\tif !backingSvcIsValid(backingService) {\n\t\t\treturn \"\", fmt.Errorf(\"invalid backing service definition at ordinal position: %v\", idx)\n\t\t}\n\t\t// if configurer provided a preconfigured backing service use that as if it were actually in the CR\n\t\tif backingService.Preconfigured.Type != \"\" {\n\t\t\tif backingService, err = xlatBacking(backingService.Preconfigured); err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\t\t}\n\t\tif err = r.configureBackingService(instance, backingService, dep); err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tif err = r.annotateDep(backingService, dep); err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\t// accumulate each backing service's nuxeo.conf settings\n\t\tnuxeoConf = joinCompact(\"\\n\", nuxeoConf, backingService.NuxeoConf)\n\n\t\tif backingService.Template != \"\" {\n\t\t\t// backing service requires a template so add it to NUXEO_TEMPLATES env var\n\t\t\tif nuxeoContainer, err := GetNuxeoContainer(dep); err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t} else {\n\t\t\t\tenv := corev1.EnvVar{\n\t\t\t\t\tName: \"NUXEO_TEMPLATES\",\n\t\t\t\t\tValue: backingService.Template,\n\t\t\t\t}\n\t\t\t\tif err := util.MergeOrAddEnvVar(nuxeoContainer, env, \",\"); err != nil {\n\t\t\t\t\treturn \"\", err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn nuxeoConf, nil\n}", "func (im *IpManager) RefreshSvcList(svcList map[string]bool) {\n\tim.mutex.Lock()\n\tdefer im.mutex.Unlock()\n\n\t// Remove stale entries\n\tfor svcName := range im.svcIpMap {\n\t\tif _, found := svcList[svcName]; !found {\n\t\t\tdelete(im.svcIpMap, svcName)\n\t\t}\n\t}\n\n\t// Add missing entries\n\tfor svcName := range svcList {\n\t\tif _, found := im.svcIpMap[svcName]; !found {\n\t\t\tim.svcIpMap[svcName] = IP_ADDR_NONE\n\t\t}\n\t}\n}", "func ServiceUpdated(old, updated *apiv1.Service) error {\n\treturn nil\n}", "func SetServices(l service.List) {\n\tservices = l\n\tconn = db.GetConnection(services)\n}", "func TestAddRange(t *testing.T) {\n\tpoolA := mkPool(poolAUID, \"pool-a\", []string{\"10.0.10.0/24\"})\n\tfixture := mkTestFixture([]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool{\n\t\tpoolA,\n\t}, true, true, nil)\n\n\tfixture.coreCS.Tracker().Add(\n\t\t&slim_core_v1.Service{\n\t\t\tObjectMeta: slim_meta_v1.ObjectMeta{\n\t\t\t\tName: \"service-a\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tUID: serviceAUID,\n\t\t\t},\n\t\t\tSpec: slim_core_v1.ServiceSpec{\n\t\t\t\tType: slim_core_v1.ServiceTypeLoadBalancer,\n\t\t\t\tLoadBalancerIP: \"10.0.20.10\",\n\t\t\t},\n\t\t},\n\t)\n\n\tawait := fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 0 {\n\t\t\tt.Error(\"Expected service to receive no ingress IPs\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\tgo fixture.hive.Start(context.Background())\n\tdefer fixture.hive.Stop(context.Background())\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n\t// If t.Error was called within the await\n\tif t.Failed() {\n\t\treturn\n\t}\n\n\tpoolA.Spec.Cidrs = append(poolA.Spec.Cidrs, cilium_api_v2alpha1.CiliumLoadBalancerIPPoolCIDRBlock{\n\t\tCidr: \"10.0.20.0/24\",\n\t})\n\n\tawait = fixture.AwaitService(func(action k8s_testing.Action) bool {\n\t\tif action.GetResource() != servicesResource || action.GetVerb() != \"patch\" {\n\t\t\treturn false\n\t\t}\n\n\t\tsvc := fixture.PatchedSvc(action)\n\n\t\tif len(svc.Status.LoadBalancer.Ingress) != 1 {\n\t\t\tt.Error(\"Expected service to receive exactly one ingress IP\")\n\t\t\treturn true\n\t\t}\n\n\t\tif svc.Status.LoadBalancer.Ingress[0].IP != \"10.0.20.10\" {\n\t\t\tt.Error(\"Expected service to receive IP '10.0.20.10'\")\n\t\t\treturn true\n\t\t}\n\n\t\treturn true\n\t}, time.Second)\n\n\t_, err := fixture.poolClient.Update(context.Background(), poolA, meta_v1.UpdateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif await.Block() {\n\t\tt.Fatal(\"Expected service status update\")\n\t}\n}", "func (e *EndpointController) SyncServiceEndpoints() error {\n\tservices, err := e.client.Services(api.NamespaceAll).List(labels.Everything())\n\tif err != nil {\n\t\tglog.Errorf(\"Failed to list services: %v\", err)\n\t\treturn err\n\t}\n\tvar resultErr error\n\tfor _, service := range services.Items {\n\t\tif service.Spec.Selector == nil {\n\t\t\t// services without a selector receive no endpoints from this controller;\n\t\t\t// these services will receive the endpoints that are created out-of-band via the REST API.\n\t\t\tcontinue\n\t\t}\n\n\t\tglog.V(5).Infof(\"About to update endpoints for service %s/%s\", service.Namespace, service.Name)\n\t\tpods, err := e.client.Pods(service.Namespace).List(labels.Set(service.Spec.Selector).AsSelector())\n\t\tif err != nil {\n\t\t\tglog.Errorf(\"Error syncing service: %s/%s, skipping\", service.Namespace, service.Name)\n\t\t\tresultErr = err\n\t\t\tcontinue\n\t\t}\n\t\tendpoints := []api.Endpoint{}\n\n\t\tfor _, pod := range pods.Items {\n\t\t\tport, err := findPort(&pod, service.Spec.ContainerPort)\n\t\t\tif err != nil {\n\t\t\t\tglog.Errorf(\"Failed to find port for service %s/%s: %v\", service.Namespace, service.Name, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif len(pod.Status.PodIP) == 0 {\n\t\t\t\tglog.Errorf(\"Failed to find an IP for pod %s/%s\", pod.Namespace, pod.Name)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tinService := false\n\t\t\tfor _, c := range pod.Status.Conditions {\n\t\t\t\tif c.Type == api.PodReady && c.Status == api.ConditionFull {\n\t\t\t\t\tinService = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !inService {\n\t\t\t\tglog.V(5).Infof(\"Pod is out of service: %v/%v\", pod.Namespace, pod.Name)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tendpoints = append(endpoints, api.Endpoint{IP: pod.Status.PodIP, Port: port})\n\t\t}\n\t\tcurrentEndpoints, err := e.client.Endpoints(service.Namespace).Get(service.Name)\n\t\tif err != nil {\n\t\t\tif errors.IsNotFound(err) {\n\t\t\t\tcurrentEndpoints = &api.Endpoints{\n\t\t\t\t\tObjectMeta: api.ObjectMeta{\n\t\t\t\t\t\tName: service.Name,\n\t\t\t\t\t},\n\t\t\t\t\tProtocol: service.Spec.Protocol,\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tglog.Errorf(\"Error getting endpoints: %v\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tnewEndpoints := &api.Endpoints{}\n\t\t*newEndpoints = *currentEndpoints\n\t\tnewEndpoints.Endpoints = endpoints\n\n\t\tif len(currentEndpoints.ResourceVersion) == 0 {\n\t\t\t// No previous endpoints, create them\n\t\t\t_, err = e.client.Endpoints(service.Namespace).Create(newEndpoints)\n\t\t} else {\n\t\t\t// Pre-existing\n\t\t\tif currentEndpoints.Protocol == service.Spec.Protocol && endpointsEqual(currentEndpoints, endpoints) {\n\t\t\t\tglog.V(5).Infof(\"protocol and endpoints are equal for %s/%s, skipping update\", service.Namespace, service.Name)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t_, err = e.client.Endpoints(service.Namespace).Update(newEndpoints)\n\t\t}\n\t\tif err != nil {\n\t\t\tglog.Errorf(\"Error updating endpoints: %v\", err)\n\t\t\tcontinue\n\t\t}\n\t}\n\treturn resultErr\n}", "func (c *backingservices) Update(p *backingserviceapi.BackingService) (result *backingserviceapi.BackingService, err error) {\n\tresult = &backingserviceapi.BackingService{}\n\terr = c.r.Put().Namespace(c.ns).Resource(\"backingservices\").Name(p.Name).Body(p).Do().Into(result)\n\treturn\n}", "func (sp *ServiceProcessor) renderService(svc *Service, oldContivSvc *renderer.ContivService,\n\toldBackends []podmodel.ID) error {\n\n\tvar err error\n\tnewContivSvc := svc.GetContivService()\n\tnewBackends := svc.GetLocalBackends()\n\totherContiveServices := sp.otherContivServices(svc)\n\n\t// Render service.\n\tif newContivSvc != nil {\n\t\tif oldContivSvc == nil {\n\t\t\tfor _, renderer := range sp.renderers {\n\t\t\t\tif err = renderer.AddService(newContivSvc); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tfor _, renderer := range sp.renderers {\n\t\t\t\tif err = renderer.UpdateService(oldContivSvc, newContivSvc, otherContiveServices); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t} else {\n\t\tif oldContivSvc != nil {\n\t\t\tfor _, renderer := range sp.renderers {\n\t\t\t\tif err = renderer.DeleteService(oldContivSvc, otherContiveServices); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Render local Backends.\n\tnewBackendIfs := sp.backendIfs.Copy()\n\tupdateBackends := false\n\t// -> handle new backend interfaces\n\tfor _, newBackend := range newBackends {\n\t\tnew := true\n\t\tfor _, oldBackend := range oldBackends {\n\t\t\tif newBackend == oldBackend {\n\t\t\t\tnew = false\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif new {\n\t\t\tlocalEp := sp.getLocalEndpoint(newBackend)\n\t\t\tlocalEp.svcCount++\n\t\t\tif localEp.ifName != \"\" && localEp.svcCount == 1 {\n\t\t\t\tnewBackendIfs.Add(localEp.ifName)\n\t\t\t\tupdateBackends = true\n\t\t\t}\n\t\t}\n\t}\n\t// -> handle removed backend interfaces\n\tfor _, oldBackend := range oldBackends {\n\t\tremoved := true\n\t\tfor _, newBackend := range newBackends {\n\t\t\tif newBackend == oldBackend {\n\t\t\t\tremoved = false\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif removed {\n\t\t\tlocalEp := sp.getLocalEndpoint(oldBackend)\n\t\t\tlocalEp.svcCount--\n\t\t\tif localEp.ifName != \"\" && localEp.svcCount == 0 {\n\t\t\t\tnewBackendIfs.Del(localEp.ifName)\n\t\t\t\tupdateBackends = true\n\t\t\t}\n\t\t}\n\t}\n\t// -> update local backends\n\tif updateBackends {\n\t\tfor _, renderer := range sp.renderers {\n\t\t\terr = renderer.UpdateLocalBackendIfs(sp.backendIfs, newBackendIfs)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tsp.backendIfs = newBackendIfs\n\t}\n\n\treturn err\n}", "func (l *SharedLoadBalancer) UpdateLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) error {\n\tklog.Infof(\"UpdateLoadBalancer: called with service %s/%s, node: %d\", service.Namespace, service.Name, len(nodes))\n\t// get exits or create a new ELB instance\n\tloadbalancer, err := l.getLoadBalancerInstance(ctx, clusterName, service)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// query ELB listeners list\n\tlisteners, err := l.sharedELBClient.ListListeners(&elbmodel.ListListenersRequest{LoadbalancerId: &loadbalancer.Id})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, port := range service.Spec.Ports {\n\t\tlistener := l.filterListenerByPort(listeners, service, port)\n\t\tif listener == nil {\n\t\t\treturn status.Errorf(codes.Unavailable, \"error, can not find a listener matching %s:%v\",\n\t\t\t\tport.Protocol, port.Port)\n\t\t}\n\n\t\t// query pool or create pool\n\t\tpool, err := l.getPool(loadbalancer.Id, listener.Id)\n\t\tif err != nil && common.IsNotFound(err) {\n\t\t\tpool, err = l.createPool(listener, service)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// add new members and remove the obsolete members.\n\t\tif err = l.addOrRemoveMembers(loadbalancer, service, pool, port, nodes); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// add or remove health monitor\n\t\tif err = l.ensureHealthCheck(loadbalancer.Id, pool, port, service, nodes[0]); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func loadBalancerServiceAnnotationsChanged(current, expected *corev1.Service, annotations sets.String) (bool, *corev1.Service) {\n\tchanged := false\n\tfor annotation := range annotations {\n\t\tcurrentVal, have := current.Annotations[annotation]\n\t\texpectedVal, want := expected.Annotations[annotation]\n\t\tif (want && (!have || currentVal != expectedVal)) || (have && !want) {\n\t\t\tchanged = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif !changed {\n\t\treturn false, nil\n\t}\n\n\tupdated := current.DeepCopy()\n\n\tif updated.Annotations == nil {\n\t\tupdated.Annotations = map[string]string{}\n\t}\n\n\tfor annotation := range annotations {\n\t\tcurrentVal, have := current.Annotations[annotation]\n\t\texpectedVal, want := expected.Annotations[annotation]\n\t\tif want && (!have || currentVal != expectedVal) {\n\t\t\tupdated.Annotations[annotation] = expected.Annotations[annotation]\n\t\t} else if have && !want {\n\t\t\tdelete(updated.Annotations, annotation)\n\t\t}\n\t}\n\n\treturn true, updated\n}", "func (mm *Manager) OnBcsServiceAdd(obj interface{}) {\n\tsvc, ok := obj.(*v2.BcsService)\n\tif !ok {\n\t\tmesosCritical.WithLabelValues(typeBcsService, eventAdd).Inc()\n\t\tblog.Errorf(\"[Critical]BcsService event handler get unknown type obj %v OnAdd\", obj)\n\t\treturn\n\t}\n\tblog.V(5).Infof(\"BcsService %s/%s add, event +1\", svc.GetNamespace(), svc.GetName())\n\tmesosEvent.WithLabelValues(typeBcsService, eventAdd, statusSuccess).Inc()\n\t// BcsEndpoint event will come with all IP address information later\n\t// we don't need to handle service add event\n\t// mm.updateAppService\n}", "func (ug *Upgrader) addServiceToWorkloadLabelMapping(namespace, serviceName string) error {\n\tif _, found := ug.NamespaceToServiceToWorkloadLabels[namespace][serviceName]; found {\n\t\treturn nil\n\t}\n\n\tvar service *v1.Service\n\tif len(ug.ServiceFiles) != 0 {\n\t\tsvc := v1.Service{}\n\t\tfor _, filename := range ug.ServiceFiles {\n\t\t\tfileBuf, err := ioutil.ReadFile(filename)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treader := bytes.NewReader(fileBuf)\n\t\t\tyamlDecoder := kubeyaml.NewYAMLOrJSONDecoder(reader, 512*1024)\n\t\t\tfor {\n\t\t\t\terr = yamlDecoder.Decode(&svc)\n\t\t\t\tif err == io.EOF {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"failed to parse k8s Service file: %s\", err)\n\t\t\t\t}\n\t\t\t\tif svc.Name == serviceName && svc.Namespace == namespace {\n\t\t\t\t\tservice = &svc\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t} else {\n\t\tvar err error\n\t\tservice, err = ug.K8sClient.CoreV1().Services(namespace).Get(serviceName, metav1.GetOptions{})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif service == nil {\n\t\treturn fmt.Errorf(\"no service found in namespace %s\", namespace)\n\t}\n\tif service.Spec.Selector == nil {\n\t\treturn fmt.Errorf(\"failed because service %q does not have selector\", serviceName)\n\t}\n\t// Maps need to be initialized (from lowest level outwards) before we can write to them.\n\tif _, found := ug.NamespaceToServiceToWorkloadLabels[namespace][serviceName]; !found {\n\t\tif _, found := ug.NamespaceToServiceToWorkloadLabels[namespace]; !found {\n\t\t\tug.NamespaceToServiceToWorkloadLabels[namespace] = make(ServiceToWorkloadLabels)\n\t\t}\n\t\tug.NamespaceToServiceToWorkloadLabels[namespace][serviceName] = make(WorkloadLabels)\n\t}\n\tug.NamespaceToServiceToWorkloadLabels[namespace][serviceName] = service.Spec.Selector\n\treturn nil\n}", "func (s *server) ListInputBindings(ctx context.Context, in *empty.Empty) (*pb.ListInputBindingsResponse, error) {\n\treturn &pb.ListInputBindingsResponse{\n\t\tBindings: []string{\"storage\"},\n\t}, nil\n}", "func newBackingServices(c *Client, namespace string) *backingservices {\n\treturn &backingservices{\n\t\tr: c,\n\t\tns: namespace,\n\t}\n}", "func parseBind(flags *pflag.FlagSet, spec *api.ServiceSpec) error {\n\tif flags.Changed(\"bind\") {\n\t\tbinds, err := flags.GetStringSlice(\"bind\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tcontainer := spec.Task.GetContainer()\n\n\t\tfor _, bind := range binds {\n\t\t\tparts := strings.SplitN(bind, \":\", 2)\n\t\t\tif len(parts) != 2 {\n\t\t\t\treturn fmt.Errorf(\"bind format %q not supported\", bind)\n\t\t\t}\n\t\t\tcontainer.Mounts = append(container.Mounts, api.Mount{\n\t\t\t\tType: api.MountTypeBind,\n\t\t\t\tSource: parts[0],\n\t\t\t\tTarget: parts[1],\n\t\t\t})\n\t\t}\n\t}\n\n\treturn nil\n}", "func (k *k8sService) Start(client, strClient k8sclient.Interface, isLeader bool) {\n\t// prevent other Start/Stop operations until we are done\n\tk.startStopMutex.Lock()\n\tdefer k.startStopMutex.Unlock()\n\n\t// Protect state access from other go-routines\n\tk.Lock()\n\tdefer k.Unlock()\n\n\tif k.running {\n\t\treturn\n\t}\n\n\tk.modCh = make(chan protos.Module, maxModules)\n\tk.running = true\n\tlog.Infof(\"Starting k8s service\")\n\tlog.Infof(\"Config for starting services is [%+v]\", k.config)\n\tk.client = client\n\tk.strClient = strClient\n\tk.isLeader = isLeader\n\tk.ctx, k.cancel = context.WithCancel(context.Background())\n\n\t// Take override config now\n\tfor k, v := range k.config.OverriddenModules {\n\t\tk8sModules[k] = v\n\t}\n\tfor _, k := range k.config.DisabledModules {\n\t\tdelete(k8sModules, k)\n\t}\n\tfor _, k := range k.config.ConditionalModules {\n\t\tif v, ok := k8sModules[k]; ok {\n\t\t\tv.Spec.Disabled = true\n\t\t\tk8sModules[k] = v\n\t\t}\n\t}\n\n\t// TODO: When CMD gets upgraded, the following API will return latest info. We should use the latest info after service upgrade.\n\t// Till then We should use the old version (we may need to save the old version in kvstore and restore here)\n\n\t// Image name is always taken from containerMap, if present\n\t// Hence no need to specify the image name in override-config above\n\tcontainerInfoMap := k.getContainerInfo()\n\tfor name, module := range k8sModules {\n\t\tfor index, sm := range module.Spec.Submodules {\n\t\t\tinfo, ok := containerInfoMap[sm.Name]\n\t\t\tif ok {\n\t\t\t\tk8sModules[name].Spec.Submodules[index].Image = info.ImageName\n\t\t\t} else {\n\t\t\t\tk8sModules[name].Spec.Submodules[index].Image = sm.Name\n\t\t\t}\n\t\t}\n\t}\n\n\tbytes, _ := json.Marshal(k8sModules)\n\tlog.Infof(\"k8sModules are %s\", string(bytes))\n\n\tif k.isLeader {\n\t\tfor _, mod := range k8sModules {\n\t\t\tif mod.Spec.Disabled {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tk.modCh <- mod\n\t\t}\n\t}\n\tk.Add(1)\n\tgo k.waitForAPIServerOrCancel()\n}", "func StartBPFFSMigration(bpffsPath string, coll *ebpf.CollectionSpec) error {\n\tif coll == nil {\n\t\treturn errors.New(\"can't migrate a nil CollectionSpec\")\n\t}\n\n\tfor name, spec := range coll.Maps {\n\t\t// Skip map specs without the pinning flag. Also takes care of skipping .data,\n\t\t// .rodata and .bss.\n\t\tif spec.Pinning == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Re-pin the map with ':pending' suffix if incoming spec differs from\n\t\t// the currently-pinned map.\n\t\tif err := repinMap(bpffsPath, name, spec); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func patchService(svc *runapi.Service, envs map[string]string, image string, options options) *runapi.Service {\n\t// merge env vars\n\tsvc.Spec.Template.Spec.Containers[0].Env = mergeEnvs(svc.Spec.Template.Spec.Containers[0].Env, envs)\n\n\t// update container image\n\tsvc.Spec.Template.Spec.Containers[0].Image = image\n\n\t// update container port\n\tsvc.Spec.Template.Spec.Containers[0].Ports[0] = optionsToContainerSpec(options)\n\n\t// apply metadata annotations\n\tapplyMeta(svc.Metadata, image)\n\tapplyMeta(svc.Spec.Template.Metadata, image)\n\n\t// apply scale metadata annotations\n\tapplyScaleMeta(svc.Spec.Template.Metadata, \"maxScale\", options.MaxInstances)\n\n\t// update revision name\n\tsvc.Spec.Template.Metadata.Name = generateRevisionName(svc.Metadata.Name, svc.Metadata.Generation)\n\n\treturn svc\n}", "func (p *servicePlugin) GenerateImports(file *generator.FileDescriptor) {\n\tif !p.getGenericServicesOptions(file) {\n\t\treturn\n\t}\n\tif len(file.Service) > 0 {\n\t\tp.P(`import \"io\"`)\n\t\tp.P(`import \"log\"`)\n\t\tp.P(`import \"net\"`)\n\t\tp.P(`import \"net/rpc\"`)\n\t\tp.P(`import \"time\"`)\n\t\tp.P(`import protorpc \"wpbrpc\"`)\n\t}\n}", "func (srv *Server) setServicesHealth() {\n\n\tfor service := range srv.gRPC.GetServiceInfo() {\n\t\tsrv.health.SetServingStatus(service, healthpb.HealthCheckResponse_SERVING)\n\t\t// TODO: use debug log\n\t\t//log.Printf(\"Service health info %s is serving\\n\", service)\n\t}\n\n\tsrv.startHealthMonitor()\n\tlog.Printf(\"%s server health monitor started\", srv.name)\n}", "func (rt Kubernetes) UpdateInternalService(space string, app string, ports []int) (e error) {\n\tif space == \"\" {\n\t\treturn errors.New(\"Unable to update service, space is blank.\")\n\t}\n\tif app == \"\" {\n\t\treturn errors.New(\"Unable to update create service, the app is blank.\")\n\t}\n\n\texistingservice, e := rt.GetService(space, app+\"-cp\")\n\tif e != nil {\n\t\treturn e\n\t}\n\n\t// Replace all ports\n\ttype Ports []struct {\n\t\tName string \"json:\\\"name,omitempty\\\"\"\n\t\tProtocol string \"json:\\\"protocol\\\"\"\n\t\tPort int \"json:\\\"port\\\"\"\n\t\tTargetPort int \"json:\\\"targetPort\\\"\"\n\t\tNodePort int \"json:\\\"nodePort\\\"\"\n\t}\n\tportlist := Ports{}\n\tfor _, p := range ports {\n\t\tvar portitem PortItem\n\t\tportitem.Protocol = \"TCP\"\n\t\tportitem.Port = p\n\t\tportitem.TargetPort = p\n\t\tportitem.Name = \"cp-\" + strconv.Itoa(p)\n\t\tportlist = append(portlist, portitem)\n\t}\n\texistingservice.Spec.Ports = portlist\n\n\tresp, e := rt.k8sRequest(\"put\", \"/api/\"+rt.defaultApiServerVersion+\"/namespaces/\"+space+\"/services/\"+app+\"-cp\", existingservice)\n\tif e != nil {\n\t\treturn e\n\t}\n\tvar response Createspec\n\te = json.Unmarshal(resp.Body, &response)\n\tif e != nil {\n\t\treturn e\n\t}\n\treturn nil\n}", "func (client *serviceManagerClient) Bind(binding *types.ServiceBinding, q *Parameters) (*types.ServiceBinding, string, error) {\n\tvar newBinding *types.ServiceBinding\n\tlocation, err := client.register(binding, web.ServiceBindingsURL, q, &newBinding)\n\tif err != nil {\n\t\treturn nil, \"\", err\n\t}\n\treturn newBinding, location, nil\n}", "func (r *ProjectsBrokersV2ServiceInstancesServiceBindingsService) Create(parent string, instanceId string, bindingId string, googlecloudservicebrokerv1alpha1__binding *GoogleCloudServicebrokerV1alpha1__Binding) *ProjectsBrokersV2ServiceInstancesServiceBindingsCreateCall {\n\tc := &ProjectsBrokersV2ServiceInstancesServiceBindingsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.parent = parent\n\tc.instanceId = instanceId\n\tc.bindingId = bindingId\n\tc.googlecloudservicebrokerv1alpha1__binding = googlecloudservicebrokerv1alpha1__binding\n\treturn c\n}", "func (capabilities CapNetBindService) AddCap() error {\n\t// Craft a duplicated capabilities\n\tdupCapabilities, err := cap.GetProc().Dup()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif on, err := dupCapabilities.GetFlag(cap.Permitted, cap.NET_BIND_SERVICE); !on {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t} else {\n\t\t\treturn errors.New(fmt.Sprintf(\n\t\t\t\t\"insufficient privilege to bind to low ports - want %q, have %q\",\n\t\t\t\tcap.NET_BIND_SERVICE, dupCapabilities))\n\t\t}\n\t}\n\tif err := dupCapabilities.SetFlag(cap.Effective, true, cap.NET_BIND_SERVICE); err != nil {\n\t\treturn errors.New(fmt.Sprintf(\n\t\t\t\"unable to set capability: %q\", err))\n\t}\n\tif err := dupCapabilities.SetProc(); err != nil {\n\t\treturn errors.New(fmt.Sprintf(\n\t\t\t\"unable to raise capabilities: %q\", err))\n\t}\n\tcapabilities.set = dupCapabilities\n\treturn nil\n}", "func (h *sentryWrapperHook) putServiceToTags(entry *logrus.Entry) *logrus.Entry {\n\tserviceField, ok := entry.Data[\"service\"]\n\tif !ok {\n\t\t// No 'service' field\n\t\treturn entry\n\t}\n\n\tserviceName, ok := serviceField.(string)\n\tif !ok {\n\t\t// Service field is not a string\n\t\treturn entry\n\t}\n\n\tserviceTag := raven.Tag{\n\t\tKey: \"service\",\n\t\tValue: serviceName,\n\t}\n\n\ttagsField, ok := entry.Data[\"tags\"]\n\tif ok {\n\t\t// Try to put service into tags.\n\t\ttags, ok := tagsField.(raven.Tags)\n\t\tif !ok {\n\t\t\t// Tags field is not a raven.Tags instance. That's quite strange though.\n\t\t\treturn entry\n\t\t}\n\n\t\tentry.Data[\"tags\"] = append(tags, serviceTag)\n\t} else {\n\t\t// No tags field.\n\t\tentry = entry.WithField(\"tags\", raven.Tags{\n\t\t\tserviceTag,\n\t\t})\n\t}\n\n\treturn entry\n}", "func (g *Generator) GenerateServices() error {\n\tif g.ServiceGen == nil || len(g.depfile.ProtoFile.Services) == 0 {\n\t\treturn nil\n\t}\n\n\tfor _, svc := range g.depfile.ProtoFile.CollectServices() {\n\t\terr := g.ServiceGen.GenerateService(g, svc.(*fproto.ServiceElement))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (c *ServiceImportController) updateServiceStatus(svcImport *mcsv1alpha1.ServiceImport, derivedService *corev1.Service) error {\n\tingress := make([]corev1.LoadBalancerIngress, 0)\n\tfor _, ip := range svcImport.Spec.IPs {\n\t\tingress = append(ingress, corev1.LoadBalancerIngress{\n\t\t\tIP: ip,\n\t\t})\n\t}\n\n\terr := retry.RetryOnConflict(retry.DefaultRetry, func() (err error) {\n\t\tderivedService.Status = corev1.ServiceStatus{\n\t\t\tLoadBalancer: corev1.LoadBalancerStatus{\n\t\t\t\tIngress: ingress,\n\t\t\t},\n\t\t}\n\t\tupdateErr := c.Status().Update(context.TODO(), derivedService)\n\t\tif updateErr == nil {\n\t\t\treturn nil\n\t\t}\n\n\t\tupdated := &corev1.Service{}\n\t\tif err = c.Get(context.TODO(), client.ObjectKey{Namespace: derivedService.Namespace, Name: derivedService.Name}, updated); err == nil {\n\t\t\tderivedService = updated\n\t\t} else {\n\t\t\tklog.Errorf(\"Failed to get updated service %s/%s: %v\", derivedService.Namespace, derivedService.Name, err)\n\t\t}\n\n\t\treturn updateErr\n\t})\n\n\tif err != nil {\n\t\tklog.Errorf(\"Update derived service(%s/%s) status failed, Error: %v\", derivedService.Namespace, derivedService.Name, err)\n\t\treturn err\n\t}\n\treturn nil\n}", "func ProvideServices(\n\tcollector *collection.Collector,\n\tgrpcsvr *apigrpc.Server,\n\thttpsvr *apihttp.Server,\n) []run.Service {\n\treturn []run.Service{\n\t\tcollector,\n\t\tgrpcsvr,\n\t\thttpsvr,\n\t}\n}", "func (r *ReferenceAdapter) AddOrUpdateBindings(serviceAccountEmail string) (AddorUpdateBindingResponse, error) {\n\tpolicy, err := r.gcpClient.GetIamPolicy(r.projectReference.Spec.GCPProjectID)\n\tif err != nil {\n\t\treturn AddorUpdateBindingResponse{}, err\n\t}\n\n\t//Checking if policy is modified\n\tnewBindings, modified := util.AddOrUpdateBinding(policy.Bindings, OSDRequiredRoles, serviceAccountEmail)\n\n\t// add new bindings to policy\n\tpolicy.Bindings = newBindings\n\treturn AddorUpdateBindingResponse{\n\t\tmodified: modified,\n\t\tpolicy: policy,\n\t}, nil\n}", "func Add(c *cli.Context) {\n\tif len(c.Args()) != 5 {\n\t\tMaybeError(c, \"expected <interface> <service> <app> <role> <subnet>\")\n\t}\n\n\tvslfile := MaybeLoadVslfile(c)\n\tservice, err := NewVslConfig(c.Args()[0], c.Args()[1], \n\t\t\tc.Args()[2],\n\t\t\tc.Args()[3],\n\t\t\tc.Args()[4])\n\tif err != nil {\n\t\tMaybeError(c, fmt.Sprintf(\"Failed to parse vsl entry: %s\", err))\n\t}\n\n\ti, replace := vslfile.Services.Contains_i(service)\n\n\t// Note that Add() may return an error, but they are informational only. We\n\t// don't actually care what the error is -- we just want to add the\n\t// hostname and save the file. This way the behavior is idempotent.\n\tif (replace) {\n\t\tvslfile.Services.ReplaceIndex(service, i)\n\t} else {\n\t\tvslfile.Services.Add(service)\n\t}\n\n\t// If the user passes -n then we'll Add and show the new hosts file, but\n\t// not save it.\n\tif c.Bool(\"n\") || AnyBool(c, \"n\") {\n\t\tfmt.Printf(\"%s\", vslfile.Format())\n\t} else {\n\t\tMaybeSaveVslfile(c, vslfile)\n\t\t// We'll give a little bit of information about whether we added or\n\t\t// updated, but if the user wants to know they can use has or ls to\n\t\t// show the file before they run the operation. Maybe later we can add\n\t\t// a verbose flag to show more information.\n\t\tif replace {\n\t\t\tMaybePrintln(c, fmt.Sprintf(\"Updated %s\", service.FormatHuman()))\n\t\t} else {\n\t\t\tMaybePrintln(c, fmt.Sprintf(\"Added %s\", service.FormatHuman()))\n\t\t}\n\t}\n}", "func (s *FlagsSource) Load(ctx context.Context, services []string) (err error) {\n\tconst (\n\t\tdelimiter = \"-\"\n\t\tassignment = \"=\"\n\t)\n\n\tfor _, svc := range uniqueStrings(services) {\n\t\ts.data[svc] = conf.NewMapConfig(sieveServiceConfig(svc, s.prefix, delimiter, assignment, s.args))\n\t}\n\n\treturn err\n}", "func listKnownServices(deployableUnitSet goldpushk.DeployableUnitSet) error {\n\tmode := \"production\"\n\tif flagTesting {\n\t\tmode = \"testing\"\n\t}\n\tfmt.Printf(\"Known Gold instances and services (%s):\\n\", mode)\n\n\t// Print out table header.\n\tw := tabwriter.NewWriter(os.Stdout, 10, 0, 2, ' ', 0)\n\tif _, err := fmt.Fprintln(w, \"\\nINSTANCE\\tSERVICE\\tCANONICAL NAME\"); err != nil {\n\t\treturn skerr.Wrap(err)\n\t}\n\n\t// Print out table body.\n\tfor _, instance := range deployableUnitSet.KnownInstances() {\n\t\tfor _, service := range deployableUnitSet.KnownServices() {\n\t\t\tunit, ok := deployableUnitSet.Get(goldpushk.DeployableUnitID{Instance: instance, Service: service})\n\t\t\tif ok {\n\t\t\t\tif _, err := fmt.Fprintf(w, \"%s\\t%s\\t%s\\n\", instance, service, unit.CanonicalName()); err != nil {\n\t\t\t\t\treturn skerr.Wrap(err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Flush output and return.\n\tif err := w.Flush(); err != nil {\n\t\treturn skerr.Wrap(err)\n\t}\n\treturn nil\n}", "func BindingsFrom(api truce.API) (Bindings, error) {\n\tb := Bindings{\n\t\tFunctions: map[string]*Function{},\n\t}\n\n\tconfig := &truce.HTTP{Versions: []string{\"1.0\", \"1.1\", \"2.0\"}}\n\tif api.Transports.HTTP != nil {\n\t\tconfig = api.Transports.HTTP\n\t}\n\n\ttmpl, err := template.New(\"prefix\").Parse(config.Prefix)\n\tif err != nil {\n\t\treturn b, err\n\t}\n\n\tbuf := &bytes.Buffer{}\n\tif err := tmpl.Execute(buf, api); err != nil {\n\t\treturn b, err\n\t}\n\n\tconfig.Prefix = buf.String()\n\n\tfor _, f := range api.Functions {\n\t\tfn, err := NewFunction(config, f)\n\t\tif err != nil {\n\t\t\treturn b, err\n\t\t}\n\n\t\tif fn != nil {\n\t\t\tb.Functions[f.Name] = fn\n\t\t}\n\t}\n\n\tfor _, err := range config.Errors {\n\t\tt, ok := api.Types[err.Type]\n\t\tif !ok {\n\t\t\treturn b, errors.New(\"cannot locate type definition for transport error\")\n\t\t}\n\n\t\tif t.Type != \"error\" {\n\t\t\treturn b, errors.New(\"transport error type definition is not error\")\n\t\t}\n\n\t\tcode, err := strconv.ParseInt(err.StatusCode, 10, 64)\n\t\tif err != nil {\n\t\t\treturn b, fmt.Errorf(\"parsing status code: %w\", err)\n\t\t}\n\n\t\tb.Errors = append(b.Errors, Error{\n\t\t\tDefinition: t,\n\t\t\tStatusCode: int(code),\n\t\t})\n\t}\n\n\t// Sort the errors by status code so we have a deterministic order going\n\t// into any template phases.\n\tsort.Slice(b.Errors, func(i, j int) bool {\n\t\treturn b.Errors[i].StatusCode < b.Errors[j].StatusCode\n\t})\n\n\treturn b, nil\n}", "func (s *predefinedServiceLister) PredefinedServices(namespace string) PredefinedServiceNamespaceLister {\n\treturn predefinedServiceNamespaceLister{indexer: s.indexer, namespace: namespace}\n}", "func (c *AviController) SetupServiceImportEventHandlers(numWorkers uint32) {\n\tutils.AviLog.Infof(\"Setting up ServiceImport CRD Event handlers\")\n\n\tserviceImportEventHandler := cache.ResourceEventHandlerFuncs{\n\t\tAddFunc: func(obj interface{}) {\n\t\t\tif c.DisableSync {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tsi := obj.(*akov1alpha1.ServiceImport)\n\t\t\tnamespace, _, _ := cache.SplitMetaNamespaceKey(utils.ObjKey(si))\n\t\t\tkey := lib.ServiceImport + \"/\" + utils.ObjKey(si)\n\t\t\tif lib.IsNamespaceBlocked(namespace) || !utils.CheckIfNamespaceAccepted(namespace) {\n\t\t\t\tutils.AviLog.Debugf(\"key: %s, msg: Service Import add event: Namespace: %s didn't qualify filter. Not adding Service Import\", key, namespace)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif err := c.GetValidator().ValidateServiceImportObj(key, si); err != nil {\n\t\t\t\tutils.AviLog.Warnf(\"key: %s, msg: Validation of ServiceImport failed: %v\", key, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tutils.AviLog.Debugf(\"key: %s, msg: ADD\", key)\n\t\t\tbkt := utils.Bkt(namespace, numWorkers)\n\t\t\tc.workqueue[bkt].AddRateLimited(key)\n\t\t},\n\t\tUpdateFunc: func(old, new interface{}) {\n\t\t\tif c.DisableSync {\n\t\t\t\treturn\n\t\t\t}\n\t\t\toldObj := old.(*akov1alpha1.ServiceImport)\n\t\t\tsi := new.(*akov1alpha1.ServiceImport)\n\t\t\tif !reflect.DeepEqual(oldObj.Spec, si.Spec) {\n\t\t\t\tnamespace, _, _ := cache.SplitMetaNamespaceKey(utils.ObjKey(si))\n\t\t\t\tkey := lib.ServiceImport + \"/\" + utils.ObjKey(si)\n\t\t\t\tif lib.IsNamespaceBlocked(namespace) || !utils.CheckIfNamespaceAccepted(namespace) {\n\t\t\t\t\tutils.AviLog.Debugf(\"key: %s, msg: Service Import update event: Namespace: %s didn't qualify filter. Not updating Service Import\", key, namespace)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif err := c.GetValidator().ValidateServiceImportObj(key, si); err != nil {\n\t\t\t\t\tutils.AviLog.Warnf(\"key: %s, msg: Validation of ServiceImport failed: %v\", key, err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tutils.AviLog.Debugf(\"key: %s, msg: UPDATE\", key)\n\t\t\t\tbkt := utils.Bkt(namespace, numWorkers)\n\t\t\t\tc.workqueue[bkt].AddRateLimited(key)\n\t\t\t}\n\t\t},\n\t\tDeleteFunc: func(obj interface{}) {\n\t\t\tif c.DisableSync {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tsi, ok := obj.(*akov1alpha1.ServiceImport)\n\t\t\tif !ok {\n\t\t\t\ttombstone, ok := obj.(cache.DeletedFinalStateUnknown)\n\t\t\t\tif !ok {\n\t\t\t\t\tutils.AviLog.Errorf(\"couldn't get object from tombstone %#v\", obj)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tsi, ok = tombstone.Obj.(*akov1alpha1.ServiceImport)\n\t\t\t\tif !ok {\n\t\t\t\t\tutils.AviLog.Errorf(\"Tombstone contained object that is not a ServiceImport: %#v\", obj)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t\tnamespace, _, _ := cache.SplitMetaNamespaceKey(utils.ObjKey(si))\n\t\t\tkey := lib.ServiceImport + \"/\" + utils.ObjKey(si)\n\t\t\tif lib.IsNamespaceBlocked(namespace) || !utils.CheckIfNamespaceAccepted(namespace) {\n\t\t\t\tutils.AviLog.Debugf(\"key: %s, msg: Service Import delete event: Namespace: %s didn't qualify filter. Not deleting Service Import\", key, namespace)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tutils.AviLog.Debugf(\"key: %s, msg: DELETE\", key)\n\t\t\tbkt := utils.Bkt(namespace, numWorkers)\n\t\t\tobjects.SharedResourceVerInstanceLister().Delete(key)\n\t\t\tc.workqueue[bkt].AddRateLimited(key)\n\t\t},\n\t}\n\tc.informers.ServiceImportInformer.Informer().AddEventHandler(serviceImportEventHandler)\n}", "func (agent *MonitorAgent) addDeploymentsServices(deps map[string]appsv1.Deployment, services map[string]*pb.ServiceHealthReport) {\n\tfor depName, dep := range deps {\n\t\tserviceMsg, err := agent.buildDeploymentMessage(dep)\n\t\tif err != nil {\n\t\t\tlogger.Error(err, \"error building Deployment message\")\n\t\t\tcontinue\n\t\t}\n\t\tif _, ok := services[depName]; ok {\n\t\t\tlogger.Info(\"duplicate service name\", \"service\", depName)\n\t\t}\n\t\tservices[depName] = serviceMsg\n\t}\n}", "func serviceMutateFn(ctx context.Context, newService *corev1.Service, client client.Client) func() error {\n\treturn func() error {\n\t\t// TODO: probably nothing has to be done, check v1 implementation of CreateOrUpdate\n\t\t//existingService := existing.(*corev1.Service)\n\t\t//if !reflect.DeepEqual(newService.Spec, existingService.Spec) {\n\t\t//\treturn client.Update(ctx, existing)\n\t\t//}\n\t\treturn nil\n\t}\n}", "func (metaBundle *MetadataMapperBundle) mapServices(nodeName string, pods v1.PodList, endpointList v1.EndpointsList) error {\n\tmetaBundle.m.Lock()\n\tdefer metaBundle.m.Unlock()\n\tipToEndpoints := make(map[string][]string) // maps the IP address from an endpoint (pod) to associated services ex: \"10.10.1.1\" : [\"service1\",\"service2\"]\n\tpodToIp := make(map[string]string) // maps the pods of the currently evaluated node to their IP.\n\n\tif pods.Items == nil {\n\t\treturn fmt.Errorf(\"empty podlist received for nodeName %q\", nodeName)\n\t}\n\tif nodeName == \"\" {\n\t\tlog.Debugf(\"Service mapper was given an empty node name. Mapping might be incorrect.\")\n\t}\n\n\tfor _, pod := range pods.Items {\n\t\tif pod.Status.PodIP != \"\" {\n\t\t\tpodToIp[pod.Name] = pod.Status.PodIP\n\t\t}\n\t}\n\tfor _, svc := range endpointList.Items {\n\t\tfor _, endpointsSubsets := range svc.Subsets {\n\t\t\tif endpointsSubsets.Addresses == nil {\n\t\t\t\tlog.Tracef(\"A subset of endpoints from %s could not be evaluated\", svc.Name)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfor _, edpt := range endpointsSubsets.Addresses {\n\t\t\t\tif edpt.NodeName != nil && *edpt.NodeName == nodeName {\n\t\t\t\t\tipToEndpoints[edpt.IP] = append(ipToEndpoints[edpt.IP], svc.Name)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tfor name, ip := range podToIp {\n\t\tif svc, found := ipToEndpoints[ip]; found {\n\t\t\tmetaBundle.PodNameToService[name] = svc\n\t\t}\n\t}\n\tlog.Tracef(\"The services matched %q\", fmt.Sprintf(\"%s\", metaBundle.PodNameToService))\n\treturn nil\n}", "func (c *Config) AddVolumesToIntendedSTS(sts *appsv1.StatefulSet, volumeConfigMapMap map[string]string) {\n\tAddVolumesToIntendedSTS(sts, volumeConfigMapMap)\n}", "func StartServices(\n\tservices []SolskinService,\n\tclient kubernetes.Interface,\n\tcfg config.Config,\n) (chan struct{}, error) {\n\t// Initialize services here.\n\tfor _, service := range services {\n\t\tservice.Init()\n\t}\n\n\t// Determine our resync period, defaulting to five minutes.\n\tresyncValue := cfg.Get(\"informers\", \"resync\").String(\"5m\")\n\tresync, err := time.ParseDuration(resyncValue)\n\tif err != nil {\n\t\tlog.Printf(\"could not parse resync duration, value given: [%s]\", resyncValue)\n\t\tlog.Println(\"defaulting to 5 minute resync period\")\n\t\tresync = time.Duration(5 * time.Minute)\n\t}\n\n\t// Create our informers.\n\tfactory := informers.NewSharedInformerFactory(client, resync)\n\tinformers := []cache.SharedIndexInformer{\n\t\tfactory.Apps().V1().DaemonSets().Informer(),\n\t\tfactory.Apps().V1().Deployments().Informer(),\n\t\tfactory.Apps().V1().StatefulSets().Informer(),\n\t\tfactory.Batch().V1().Jobs().Informer(),\n\t\tfactory.Core().V1().Pods().Informer(),\n\t}\n\n\thandlers := make([]cache.ResourceEventHandlerFuncs, 0)\n\tfor _, service := range services {\n\t\thandlers = append(handlers, service.GenerateEventHandlers()...)\n\t}\n\n\tfor _, informer := range informers {\n\t\tfor _, handler := range handlers {\n\t\t\tinformer.AddEventHandler(handler)\n\t\t}\n\t}\n\n\t// Spool up services here.\n\tfor _, service := range services {\n\t\tservice.Start()\n\t}\n\n\t// Start our informers.\n\ts := make(chan struct{})\n\tfor _, informer := range informers {\n\t\tgo informer.Run(s)\n\t}\n\n\treturn s, nil\n}", "func (c *controller) getBindingsFrom(sName string, fromBindings map[string]*scmodel.Credential) error {\n\tbindings, err := storage.GetBindingsForService(c.storage, sName, storage.From)\n\tif err != nil {\n\t\tlog.Printf(\"Failed to fetch bindings for %s : %v\", sName, err)\n\t\treturn err\n\t}\n\tfor _, b := range bindings {\n\t\tlog.Printf(\"Found binding %s for service %s\", b.Name, sName)\n\t\tfromBindings[b.Name] = &b.Credentials\n\t}\n\treturn nil\n}", "func (r *Reconciler) Reconcile(request reconcile.Request) (reconcile.Result, error) {\n\tlogger := reconcilerLog.WithValues(\n\t\t\"Request.Namespace\", request.Namespace,\n\t\t\"Request.Name\", request.Name,\n\t)\n\n\tlogger.Info(\"Reconciling ServiceBindingRequest...\")\n\n\t// fetch and validate namespaced ServiceBindingRequest instance\n\tsbr, err := r.getServiceBindingRequest(request.NamespacedName)\n\tif err != nil {\n\t\tlogger.Error(err, \"On retrieving service-binding-request instance.\")\n\t\treturn DoneOnNotFound(err)\n\t}\n\n\t// validate namespaced ServiceBindingRequest instance (this check has been disabled until test data has been\n\t// adjusted to reflect the validation)\n\t//\n\t//if err = r.validateServiceBindingRequest(sbr); err != nil {\n\t//\tlogger.Error(err, \"On validating service-binding-request instance.\")\n\t//\treturn Done()\n\t//}\n\n\tlogger = logger.WithValues(\"ServiceBindingRequest.Name\", sbr.Name)\n\tlogger.Debug(\"Found service binding request to inspect\")\n\n\t// splitting instance from it's status\n\tsbrStatus := &sbr.Status\n\n\toptions := &ServiceBinderOptions{\n\t\tClient: r.client,\n\t\tDynClient: r.dynClient,\n\t\tDetectBindingResources: sbr.Spec.DetectBindingResources,\n\t\tEnvVarPrefix: sbr.Spec.EnvVarPrefix,\n\t\tSBR: sbr,\n\t\tLogger: logger,\n\t}\n\n\tbm, err := BuildServiceBinder(options)\n\tif err != nil {\n\t\tlogger.Error(err, \"Creating binding context\")\n\t\tif err == EmptyBackingServiceSelectorsErr || err == EmptyApplicationSelectorErr {\n\t\t\t// TODO: find or create an error type containing suitable information to be propagated\n\t\t\tvar reason string\n\t\t\tif err == EmptyBackingServiceSelectorsErr {\n\t\t\t\treason = \"EmptyBackingServiceSelectors\"\n\t\t\t} else {\n\t\t\t\treason = \"EmptyApplicationSelector\"\n\t\t\t}\n\n\t\t\tv1.SetStatusCondition(&sbr.Status.Conditions, v1.Condition{\n\t\t\t\tType: conditions.BindingReady,\n\t\t\t\tStatus: corev1.ConditionFalse,\n\t\t\t\tReason: reason,\n\t\t\t\tMessage: err.Error(),\n\t\t\t})\n\t\t\t_, updateErr := updateServiceBindingRequestStatus(r.dynClient, sbr)\n\t\t\tif updateErr == nil {\n\t\t\t\treturn Done()\n\t\t\t}\n\t\t}\n\t\treturn RequeueError(err)\n\t}\n\n\tif sbr.GetDeletionTimestamp() != nil {\n\t\tlogger.Info(\"Resource is marked for deletion...\")\n\t\treturn r.unbind(logger, bm)\n\t}\n\n\tlogger.Info(\"Starting the bind of application(s) with backing service...\")\n\treturn r.bind(logger, bm, sbrStatus)\n}", "func addServiceLabels(objectMeta *metav1.ObjectMeta, kogitoApp *v1alpha1.KogitoApp) {\n\tif objectMeta != nil {\n\t\tif objectMeta.Labels == nil {\n\t\t\tobjectMeta.Labels = map[string]string{}\n\t\t}\n\n\t\tif kogitoApp.Spec.Service.Labels == nil {\n\t\t\tobjectMeta.Labels[LabelKeyServiceName] = kogitoApp.Spec.Name\n\t\t} else {\n\t\t\tfor key, value := range kogitoApp.Spec.Service.Labels {\n\t\t\t\tobjectMeta.Labels[key] = value\n\t\t\t}\n\t\t}\n\n\t}\n}" ]
[ "0.56772745", "0.54476625", "0.5211605", "0.51789063", "0.51135707", "0.50000817", "0.4976379", "0.49644193", "0.49369168", "0.48852873", "0.48671865", "0.4861238", "0.48560068", "0.48487443", "0.482289", "0.48134246", "0.48115814", "0.47962728", "0.47867313", "0.47812405", "0.47598204", "0.47534397", "0.46983182", "0.4687515", "0.46868378", "0.4685911", "0.46846843", "0.4683413", "0.4678944", "0.46736607", "0.46715742", "0.46415138", "0.46241793", "0.462247", "0.4610893", "0.4603685", "0.4602587", "0.4600944", "0.45901445", "0.45892528", "0.457606", "0.4564881", "0.45637533", "0.45576712", "0.4556969", "0.45509592", "0.45492157", "0.45377427", "0.45292157", "0.45252168", "0.4509289", "0.44823718", "0.44731206", "0.4472578", "0.44720003", "0.44608572", "0.44597602", "0.44564408", "0.44459492", "0.44395837", "0.44392273", "0.44343787", "0.44299158", "0.44230318", "0.44223964", "0.44152802", "0.4414817", "0.4386789", "0.43864787", "0.43857932", "0.43805006", "0.43795365", "0.4369036", "0.43644777", "0.4360541", "0.4357003", "0.4356546", "0.43557373", "0.43518314", "0.4343209", "0.43375516", "0.43370777", "0.4334353", "0.43310374", "0.43300682", "0.4324861", "0.43221396", "0.43210635", "0.43171948", "0.4315966", "0.4305753", "0.4303686", "0.43032977", "0.42990804", "0.42988777", "0.42983598", "0.42983556", "0.4295565", "0.42868134", "0.42848945" ]
0.7591342
0
EnvironmentChange imports the current environment into the deployment. This requires only the names of the currently existing environment variables, not the values, as the import is internally done as pod env specifications using secret key references.
func (a *Workload) EnvironmentChange(ctx context.Context, varNames []string) error { return retry.RetryOnConflict(retry.DefaultRetry, func() error { // Retrieve the latest version of Deployment before attempting update // RetryOnConflict uses exponential backoff to avoid exhausting the apiserver deployment, err := a.Deployment(ctx) if err != nil { return err } evSecretName := a.app.MakeEnvSecretName() // 1. Remove all the old EVs referencing the app's EV secret. // 2. Add entries for the new set of EV's (S.a varNames). // 3. Replace container spec // // Note: While 1+2 could be optimized to only remove entries of // EVs not in varNames, and add only entries for varNames // not in Env, this is way more complex for what is likely // just 10 entries. I expect any gain in perf to be // negligible, and completely offset by the complexity of // understanding and maintaining it later. Full removal // and re-adding is much simpler to understand, and should // be fast enough. newEnvironment := []corev1.EnvVar{} for _, ev := range deployment.Spec.Template.Spec.Containers[0].Env { // Drop EV if pulled from EV secret of the app if ev.ValueFrom != nil && ev.ValueFrom.SecretKeyRef != nil && ev.ValueFrom.SecretKeyRef.Name == evSecretName { continue } // Keep everything else. newEnvironment = append(newEnvironment, ev) } for _, varName := range varNames { newEnvironment = append(newEnvironment, corev1.EnvVar{ Name: varName, ValueFrom: &corev1.EnvVarSource{ SecretKeyRef: &corev1.SecretKeySelector{ LocalObjectReference: corev1.LocalObjectReference{ Name: evSecretName, }, Key: varName, }, }, }) } deployment.Spec.Template.Spec.Containers[0].Env = newEnvironment _, err = a.cluster.Kubectl.AppsV1().Deployments(a.app.Org).Update( ctx, deployment, metav1.UpdateOptions{}) return err }) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func env() error {\n\t// regexp for TF_VAR_ terraform vars\n\ttfVar := regexp.MustCompile(`^TF_VAR_.*$`)\n\n\t// match terraform vars in environment\n\tfor _, e := range os.Environ() {\n\t\t// split on value\n\t\tpair := strings.SplitN(e, \"=\", 2)\n\n\t\t// match on TF_VAR_*\n\t\tif tfVar.MatchString(pair[0]) {\n\t\t\t// pull out the name\n\t\t\tname := strings.Split(pair[0], \"TF_VAR_\")\n\n\t\t\t// lower case the terraform variable\n\t\t\t// to accommodate cicd injection capitalization\n\t\t\terr := os.Setenv(fmt.Sprintf(\"TF_VAR_%s\",\n\t\t\t\tstrings.ToLower(name[1])), pair[1])\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func RenameEnvironment(host string, verifyTLS bool, apiKey string, project string, environment string, name string, slug string) (models.EnvironmentInfo, Error) {\n\tpostBody := map[string]string{\"project\": project, \"environment\": environment}\n\tif name != \"\" {\n\t\tpostBody[\"name\"] = name\n\t}\n\tif slug != \"\" {\n\t\tpostBody[\"slug\"] = slug\n\t}\n\tbody, err := json.Marshal(postBody)\n\tif err != nil {\n\t\treturn models.EnvironmentInfo{}, Error{Err: err, Message: \"Invalid environment info\"}\n\t}\n\n\turl, err := generateURL(host, \"/v3/environments/environment\", nil)\n\tif err != nil {\n\t\treturn models.EnvironmentInfo{}, Error{Err: err, Message: \"Unable to generate url\"}\n\t}\n\n\tstatusCode, _, response, err := PutRequest(url, verifyTLS, apiKeyHeader(apiKey), body)\n\tif err != nil {\n\t\treturn models.EnvironmentInfo{}, Error{Err: err, Message: \"Unable to rename environment\", Code: statusCode}\n\t}\n\n\tvar result map[string]interface{}\n\terr = json.Unmarshal(response, &result)\n\tif err != nil {\n\t\treturn models.EnvironmentInfo{}, Error{Err: err, Message: \"Unable to parse API response\", Code: statusCode}\n\t}\n\n\tenvironmentInfo, ok := result[\"environment\"].(map[string]interface{})\n\tif !ok {\n\t\treturn models.EnvironmentInfo{}, Error{Err: fmt.Errorf(\"Unexpected type parsing environment, expected map[string]interface{}, got %T\", result[\"environment\"]), Message: \"Unable to parse API response\", Code: statusCode}\n\t}\n\n\tinfo := models.ParseEnvironmentInfo(environmentInfo)\n\treturn info, Error{}\n}", "func (cluster *Cluster) LoadEnvironment(namespace string) (*bitesize.Environment, error) {\n\tserviceMap := make(ServiceMap)\n\n\tclient := &k8s.Client{\n\t\tNamespace: namespace,\n\t\tInterface: cluster.Interface,\n\t\tTPRClient: cluster.TPRClient,\n\t}\n\n\tns, err := client.Ns().Get()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Namespace %s not found\", namespace)\n\t}\n\tenvironmentName := ns.ObjectMeta.Labels[\"environment\"]\n\n\tservices, err := client.Service().List()\n\tif err != nil {\n\t\tlog.Errorf(\"Error loading kubernetes services: %s\", err.Error())\n\t}\n\tfor _, service := range services {\n\t\tserviceMap.AddService(service)\n\t}\n\n\tdeployments, err := client.Deployment().List()\n\tif err != nil {\n\t\tlog.Errorf(\"Error loading kubernetes deployments: %s\", err.Error())\n\t}\n\tfor _, deployment := range deployments {\n\t\tserviceMap.AddDeployment(deployment)\n\t}\n\n\thpas, err := client.HorizontalPodAutoscaler().List()\n\tif err != nil {\n\t\tlog.Errorf(\"Error loading kubernetes hpas: %s\", err.Error())\n\t}\n\tfor _, hpa := range hpas {\n\t\tserviceMap.AddHPA(hpa)\n\t}\n\n\tingresses, err := client.Ingress().List()\n\tif err != nil {\n\t\tlog.Errorf(\"Error loading kubernetes ingresses: %s\", err.Error())\n\t}\n\n\tfor _, ingress := range ingresses {\n\t\tserviceMap.AddIngress(ingress)\n\t}\n\n\tstatefulsets, err := client.StatefulSet().List()\n\tif err != nil {\n\t\tlog.Errorf(\"Error loading kubernetes statefulsets : %s\", err.Error())\n\t}\n\n\tfor _, statefulset := range statefulsets {\n\t\tserviceMap.AddMongoStatefulSet(statefulset)\n\t}\n\n\t// we'll need the same for tprs\n\tclaims, _ := client.PVC().List()\n\tfor _, claim := range claims {\n\t\tserviceMap.AddVolumeClaim(claim)\n\t}\n\n\tfor _, supported := range k8_extensions.SupportedThirdPartyResources {\n\t\ttprs, _ := client.ThirdPartyResource(supported).List()\n\t\tfor _, tpr := range tprs {\n\t\t\tserviceMap.AddThirdPartyResource(tpr)\n\t\t}\n\t}\n\n\tbitesizeConfig := bitesize.Environment{\n\t\tName: environmentName,\n\t\tNamespace: namespace,\n\t\tServices: serviceMap.Services(),\n\t}\n\n\treturn &bitesizeConfig, nil\n}", "func (hc ApplicationsController) EnvSet(w http.ResponseWriter, r *http.Request) APIErrors {\n\tctx := r.Context()\n\tlog := tracelog.Logger(ctx)\n\n\tparams := httprouter.ParamsFromContext(ctx)\n\torgName := params.ByName(\"org\")\n\tappName := params.ByName(\"app\")\n\n\tlog.Info(\"processing environment variable assignment\",\n\t\t\"org\", orgName, \"app\", appName)\n\n\tcluster, err := kubernetes.GetCluster(ctx)\n\tif err != nil {\n\t\treturn InternalError(err)\n\t}\n\n\texists, err := organizations.Exists(ctx, cluster, orgName)\n\tif err != nil {\n\t\treturn InternalError(err)\n\t}\n\n\tif !exists {\n\t\treturn OrgIsNotKnown(orgName)\n\t}\n\n\tapp := models.NewAppRef(appName, orgName)\n\n\texists, err = application.Exists(ctx, cluster, app)\n\tif err != nil {\n\t\treturn InternalError(err)\n\t}\n\n\tif !exists {\n\t\treturn AppIsNotKnown(appName)\n\t}\n\n\tdefer r.Body.Close()\n\tbodyBytes, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\treturn InternalError(err)\n\t}\n\n\tvar setRequest models.EnvVariableList\n\terr = json.Unmarshal(bodyBytes, &setRequest)\n\tif err != nil {\n\t\treturn BadRequest(err)\n\t}\n\n\terr = application.EnvironmentSet(ctx, cluster, app, setRequest)\n\tif err != nil {\n\t\treturn InternalError(err)\n\t}\n\n\treturn nil\n}", "func (input *CreateTaskDefinitionInput) Environment() []*awsecs.KeyValuePair {\n\treturn convertEnvVars(input.EnvVars)\n}", "func GetENV(experimentDetails *experimentTypes.ExperimentDetails) {\n\texperimentDetails.ExperimentName = Getenv(\"EXPERIMENT_NAME\", \"\")\n\texperimentDetails.AppNS = Getenv(\"APP_NS\", \"\")\n\texperimentDetails.TargetContainer = Getenv(\"APP_CONTAINER\", \"\")\n\texperimentDetails.TargetPods = Getenv(\"APP_POD\", \"\")\n\texperimentDetails.AppLabel = Getenv(\"APP_LABEL\", \"\")\n\texperimentDetails.ChaosDuration, _ = strconv.Atoi(Getenv(\"TOTAL_CHAOS_DURATION\", \"30\"))\n\texperimentDetails.ChaosNamespace = Getenv(\"CHAOS_NAMESPACE\", \"litmus\")\n\texperimentDetails.EngineName = Getenv(\"CHAOS_ENGINE\", \"\")\n\texperimentDetails.ChaosUID = clientTypes.UID(Getenv(\"CHAOS_UID\", \"\"))\n\texperimentDetails.ChaosPodName = Getenv(\"POD_NAME\", \"\")\n\texperimentDetails.ContainerRuntime = Getenv(\"CONTAINER_RUNTIME\", \"\")\n\texperimentDetails.NetworkInterface = Getenv(\"NETWORK_INTERFACE\", \"eth0\")\n\texperimentDetails.TargetIPs = Getenv(\"TARGET_IPs\", \"\")\n}", "func (c *Client) UpdateEnv(name, value string, encrypt, remove bool) (*http.Response, error) {\n\treturn c.post(\"/env\", common.EnvRequest{\n\t\tName: name, Value: value, Encrypt: encrypt, Remove: remove,\n\t})\n}", "func UpdateEnvVars(template *servingv1alpha1.RevisionTemplateSpec, toUpdate map[string]string, toRemove []string) error {\n\tcontainer, err := ContainerOfRevisionTemplate(template)\n\tif err != nil {\n\t\treturn err\n\t}\n\tupdated := updateEnvVarsFromMap(container.Env, toUpdate)\n\tupdated = removeEnvVars(updated, toRemove)\n\t// Sort by env key name\n\tsort.SliceStable(updated, func(i, j int) bool {\n\t\treturn updated[i].Name < updated[j].Name\n\t})\n\tcontainer.Env = updated\n\n\treturn nil\n}", "func InjectEnvIntoDeployment(podSpec *corev1.PodSpec, envVars []corev1.EnvVar) error {\n\tif podSpec == nil {\n\t\treturn errors.New(\"no pod spec provided\")\n\t}\n\n\tfor i := range podSpec.Containers {\n\t\tcontainer := &podSpec.Containers[i]\n\t\tcontainer.Env = merge(container.Env, envVars)\n\t}\n\n\treturn nil\n}", "func (cluster *Cluster) ApplyEnvironment(currentEnvironment, newEnvironment *bitesize.Environment) error {\n\tvar err error\n\n\tfor _, service := range newEnvironment.Services {\n\n\t\tmapper := &translator.KubeMapper{\n\t\t\tBiteService: &service,\n\t\t\tNamespace: newEnvironment.Namespace,\n\t\t}\n\n\t\tclient := &k8s.Client{\n\t\t\tInterface: cluster.Interface,\n\t\t\tNamespace: newEnvironment.Namespace,\n\t\t\tTPRClient: cluster.TPRClient,\n\t\t}\n\n\t\tif service.Type == \"\" {\n\n\t\t\tif !shouldDeploy(currentEnvironment, newEnvironment, service.Name) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif service.DatabaseType == \"mongo\" {\n\t\t\t\tlog.Debugf(\"Applying Stateful set for Mongo DB Service: %s \", service.Name)\n\n\t\t\t\tsecret, _ := mapper.MongoInternalSecret()\n\n\t\t\t\t//Only apply the secret if it doesnt exist. Changing this secret would cause a deployed mongo\n\t\t\t\t//cluster from being able to communicate between replicas. Need a way to update this secret\n\t\t\t\t// and redploy the mongo statefulset. For now, just protect against changing the secret\n\t\t\t\t// via environment operator\n\t\t\t\tif !client.Secret().Exists(secret.Name) {\n\t\t\t\t\tif err = client.Secret().Apply(secret); err != nil {\n\t\t\t\t\t\tlog.Error(err)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tstatefulset, _ := mapper.MongoStatefulSet()\n\t\t\t\tif err = client.StatefulSet().Apply(statefulset); err != nil {\n\t\t\t\t\tlog.Error(err)\n\t\t\t\t}\n\n\t\t\t\tsvc, _ := mapper.HeadlessService()\n\t\t\t\tif err = client.Service().Apply(svc); err != nil {\n\t\t\t\t\tlog.Error(err)\n\t\t\t\t}\n\n\t\t\t} else { //Only apply a Deployment and PVCs if this is not a DB service. The DB Statefulset creates its own PVCs\n\t\t\t\tlog.Debugf(\"Applying Deployment for Service %s \", service.Name)\n\t\t\t\tdeployment, err := mapper.Deployment()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif err = client.Deployment().Apply(deployment); err != nil {\n\t\t\t\t\tlog.Error(err)\n\t\t\t\t}\n\n\t\t\t\tpvc, _ := mapper.PersistentVolumeClaims()\n\t\t\t\tfor _, claim := range pvc {\n\t\t\t\t\tif err = client.PVC().Apply(&claim); err != nil {\n\t\t\t\t\t\tlog.Error(err)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tsvc, _ := mapper.Service()\n\t\t\t\tif err = client.Service().Apply(svc); err != nil {\n\t\t\t\t\tlog.Error(err)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\thpa, _ := mapper.HPA()\n\t\t\tif err = client.HorizontalPodAutoscaler().Apply(&hpa); err != nil {\n\t\t\t\tlog.Error(err)\n\t\t\t}\n\n\t\t\tif service.ExternalURL != \"\" {\n\t\t\t\tingress, _ := mapper.Ingress()\n\t\t\t\tif err = client.Ingress().Apply(ingress); err != nil {\n\t\t\t\t\tlog.Error(err)\n\t\t\t\t}\n\t\t\t}\n\n\t\t} else {\n\t\t\ttpr, _ := mapper.ThirdPartyResource()\n\t\t\tif err = client.ThirdPartyResource(tpr.Kind).Apply(tpr); err != nil {\n\t\t\t\tlog.Error(err)\n\t\t\t}\n\t\t}\n\t}\n\treturn err\n}", "func (c *Context) SetEnvironment(e string) { c.envName = e }", "func (gf *genericFramework) Env(key, value string) error {\n\tif gf.adam.Variables == nil {\n\t\tgf.adam.Variables = jsonutil.NewVariableMap(\"\", nil)\n\t}\n\tif _, ok := gf.adam.Variables.Get(key); ok {\n\t\treturn fmt.Errorf(\"%v has been defined\", key)\n\t}\n\tgf.adam.Variables.Set(key, jsonutil.NewStringVariable(key, value))\n\treturn nil\n}", "func (b *taskBuilder) env(key, value string) {\n\tif b.Spec.Environment == nil {\n\t\tb.Spec.Environment = map[string]string{}\n\t}\n\tb.Spec.Environment[key] = value\n}", "func importEnv() map[string]string {\r\n\tvar myEnv map[string]string\r\n\tmyEnv, err := godotenv.Read()\r\n\tif err != nil {\r\n\t\tlog.Fatal(\"Error loading .env file\")\r\n\t}\r\n\r\n\treturn myEnv\r\n}", "func (a *Client) ModifyRuntimeEnv(params *ModifyRuntimeEnvParams) (*ModifyRuntimeEnvOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewModifyRuntimeEnvParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"ModifyRuntimeEnv\",\n\t\tMethod: \"PATCH\",\n\t\tPathPattern: \"/v1/runtime_envs\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\"},\n\t\tParams: params,\n\t\tReader: &ModifyRuntimeEnvReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*ModifyRuntimeEnvOK), nil\n\n}", "func TestInjectEnvironment(t *testing.T) {\n\tspec := func(containers ...corev1.Container) appsv1.DeploymentSpec {\n\t\treturn appsv1.DeploymentSpec{\n\t\t\tTemplate: corev1.PodTemplateSpec{\n\t\t\t\tSpec: corev1.PodSpec{\n\t\t\t\t\tContainers: containers,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t}\n\n\ttests := []struct {\n\t\tname string\n\t\tin *appsv1.Deployment\n\t\tdeployment string\n\t\tcontainer string\n\t\tenvs []corev1.EnvVar\n\t\tcommonEnvs []corev1.EnvVar\n\t\twant *appsv1.Deployment\n\t}{{\n\t\tname: \"ignore\",\n\t\tdeployment: \"foo\",\n\t\tcontainer: \"container1\",\n\t\tenvs: []corev1.EnvVar{envVar(\"foo\", \"bar\")},\n\t\tcommonEnvs: []corev1.EnvVar{envVar(\"KUBERNETES_MIN_VERSION\", \"v1.0.0\")},\n\t\tin: &appsv1.Deployment{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"test\",\n\t\t\t},\n\t\t\tSpec: spec(corev1.Container{\n\t\t\t\tName: \"container1\",\n\t\t\t\tEnv: []corev1.EnvVar{envVar(\"1\", \"1\")},\n\t\t\t}),\n\t\t},\n\t\twant: &appsv1.Deployment{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"test\",\n\t\t\t},\n\t\t\tSpec: spec(corev1.Container{\n\t\t\t\tName: \"container1\",\n\t\t\t\tEnv: []corev1.EnvVar{envVar(\"1\", \"1\"), envVar(\"KUBERNETES_MIN_VERSION\", \"v1.0.0\")},\n\t\t\t}),\n\t\t},\n\t}, {\n\t\tname: \"append\",\n\t\tdeployment: \"test\",\n\t\tcontainer: \"container1\",\n\t\tenvs: []corev1.EnvVar{envVar(\"foo\", \"bar\")},\n\t\tcommonEnvs: []corev1.EnvVar{envVar(\"KUBERNETES_MIN_VERSION\", \"v1.0.0\")},\n\t\tin: &appsv1.Deployment{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"test\",\n\t\t\t},\n\t\t\tSpec: spec(corev1.Container{\n\t\t\t\tName: \"container1\",\n\t\t\t\tEnv: []corev1.EnvVar{envVar(\"1\", \"1\")},\n\t\t\t}, corev1.Container{\n\t\t\t\tName: \"container2\",\n\t\t\t\tEnv: []corev1.EnvVar{envVar(\"2\", \"2\")},\n\t\t\t}),\n\t\t},\n\t\twant: &appsv1.Deployment{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"test\",\n\t\t\t},\n\t\t\tSpec: spec(corev1.Container{\n\t\t\t\tName: \"container1\",\n\t\t\t\tEnv: []corev1.EnvVar{envVar(\"1\", \"1\"), envVar(\"foo\", \"bar\"), envVar(\"KUBERNETES_MIN_VERSION\", \"v1.0.0\")},\n\t\t\t}, corev1.Container{\n\t\t\t\tName: \"container2\",\n\t\t\t\tEnv: []corev1.EnvVar{envVar(\"2\", \"2\"), envVar(\"KUBERNETES_MIN_VERSION\", \"v1.0.0\")},\n\t\t\t}),\n\t\t},\n\t}, {\n\t\tname: \"update\",\n\t\tdeployment: \"test\",\n\t\tcontainer: \"container2\",\n\t\tenvs: []corev1.EnvVar{envVar(\"2\", \"bar\")},\n\t\tcommonEnvs: []corev1.EnvVar{envVar(\"KUBERNETES_MIN_VERSION\", \"v1.0.0\")},\n\t\tin: &appsv1.Deployment{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"test\",\n\t\t\t},\n\t\t\tSpec: spec(corev1.Container{\n\t\t\t\tName: \"container1\",\n\t\t\t\tEnv: []corev1.EnvVar{envVar(\"1\", \"1\")},\n\t\t\t}, corev1.Container{\n\t\t\t\tName: \"container2\",\n\t\t\t\tEnv: []corev1.EnvVar{envVar(\"2\", \"2\")},\n\t\t\t}),\n\t\t},\n\t\twant: &appsv1.Deployment{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"test\",\n\t\t\t},\n\t\t\tSpec: spec(corev1.Container{\n\t\t\t\tName: \"container1\",\n\t\t\t\tEnv: []corev1.EnvVar{envVar(\"1\", \"1\"), envVar(\"KUBERNETES_MIN_VERSION\", \"v1.0.0\")},\n\t\t\t}, corev1.Container{\n\t\t\t\tName: \"container2\",\n\t\t\t\tEnv: []corev1.EnvVar{envVar(\"2\", \"bar\"), envVar(\"KUBERNETES_MIN_VERSION\", \"v1.0.0\")},\n\t\t\t}),\n\t\t},\n\t}}\n\n\tfor _, test := range tests {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\tu := &unstructured.Unstructured{}\n\t\t\tif err := scheme.Scheme.Convert(test.in, u, nil); err != nil {\n\t\t\t\tt.Fatal(\"Failed to convert deployment to unstructured\", err)\n\t\t\t}\n\n\t\t\tif err := InjectEnvironmentIntoDeployment(test.deployment, test.container, test.envs...)(u); err != nil {\n\t\t\t\tt.Fatal(\"Unexpected error from transformer\", err)\n\t\t\t}\n\n\t\t\tif err := InjectCommonEnvironment(test.commonEnvs...)(u); err != nil {\n\t\t\t\tt.Fatal(\"Unexpected error from transformer\", err)\n\t\t\t}\n\n\t\t\tgot := &appsv1.Deployment{}\n\t\t\tif err := scheme.Scheme.Convert(u, got, nil); err != nil {\n\t\t\t\tt.Fatal(\"Failed to convert unstructured to deployment\", err)\n\t\t\t}\n\n\t\t\tif !cmp.Equal(got, test.want) {\n\t\t\t\tt.Errorf(\"Got = %v, want: %v, diff:\\n%s\", got, test.want, cmp.Diff(got, test.want))\n\t\t\t}\n\t\t})\n\t}\n}", "func storeEnvs(origEnvs map[string]string) error {\n\tfor key, val := range origEnvs {\n\t\tkeyParts := strings.Split(key, \"_\")\n\n\t\t// skip error format\n\t\t// env key format must be MODNAME_XXX_YYYY...\n\t\tif len(keyParts) < 2 {\n\t\t\tcontinue\n\t\t}\n\n\t\tenvType := strings.ToLower(keyParts[0])\n\t\tnewKey := strings.Join(keyParts[1:], \"_\")\n\t\tif _, ok := envs[envType]; !ok {\n\t\t\tenvs[envType] = make(map[string]string)\n\t\t}\n\n\t\tmoduleEnv := envs[envType]\n\t\tsetValue := strings.Trim(val, \"\\\"\")\n\t\tmoduleEnv[newKey] = setValue\n\t}\n\n\tenvsStr, _ := json.MarshalIndent(envs, \"\", \" \")\n\tlogger.Info.Printf(\"Load config: %s\\n\", envsStr)\n\treturn nil\n}", "func EnvAppUpdate(ctx *Context, id int64, value string) error {\n\th := authHeader(ctx.Config.AuthToken)\n\ts := fmt.Sprintf(\"environment_variable(%d)\", id)\n\turi := ctx.Config.APIEndpoint(s)\n\tdata := make(map[string]interface{})\n\tdata[\"value\"] = value\n\tbody, err := marhsalReader(data)\n\tif err != nil {\n\t\treturn err\n\t}\n\tb, err := doJSON(ctx, \"PATCH\", uri, h, nil, body)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif string(b) != \"OK\" {\n\t\treturn errors.New(\"bad response\")\n\t}\n\treturn nil\n}", "func (e *echo) Env(val string) *echo {\n\tvars := e.declareVars(val)\n\tfor k, v := range vars {\n\t\tif err := os.Setenv(k, v); err != nil {\n\t\t\te.shouldPanic(err.Error())\n\t\t} else {\n\t\t\tdelete(e.vars, k) // overwrite local var\n\t\t}\n\t}\n\treturn e\n}", "func (c *Client) EnvUpdate(ctx context.Context, req *EnvUpdateRequest) (*EnvUpdateResponse, error) {\n\tvar resp EnvUpdateResponse\n\tif err := c.client.Do(ctx, \"PATCH\", envURL, req, &resp); err != nil {\n\t\treturn nil, err\n\t}\n\tresp.Normalize()\n\treturn &resp, nil\n}", "func (ident *Identity) ApplyEnvVars() error {\n\tjID, err := ident.toIdentityJSON()\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = envconfig.Process(ident.ConfigKey(), jID)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn ident.applyIdentityJSON(jID)\n}", "func EnvOverride(dst, src []corev1.EnvVar) []corev1.EnvVar {\n\tfor _, cre := range src {\n\t\tpos := GetEnvVar(cre.Name, dst)\n\t\tif pos != -1 {\n\t\t\tdst[pos] = cre\n\t\t} else {\n\t\t\tdst = append(dst, cre)\n\t\t}\n\t}\n\treturn dst\n}", "func Deploy(args []string) error {\n\t// TODO: check if the main/env.go file has changed based on env.txt:\n\t// If it has changed and the \"change-env\" argument is not set, print\n\t// an error message and return. If it has changed and \"change-env\" is\n\t// set, replace main/env.go.\n\t// If it has not changed, don't do anything to the existing file.\n\treturn nil\n}", "func DeploymentConfigEnvVarReconciler(desired, existing *appsv1.DeploymentConfig, envVar string) bool {\n\tupdate := false\n\n\texistingContainer := &existing.Spec.Template.Spec.Containers[0]\n\tdesiredContainer := desired.Spec.Template.Spec.Containers[0]\n\n\tdesiredIdx := helper.FindEnvVar(desiredContainer.Env, envVar)\n\texistingIdx := helper.FindEnvVar(existingContainer.Env, envVar)\n\n\tif desiredIdx < 0 && existingIdx >= 0 {\n\t\t// env var exists in existing and does not exist in desired => Remove from the list\n\t\t// shift all of the elements at the right of the deleting index by one to the left\n\t\texistingContainer.Env = append(existingContainer.Env[:existingIdx], existingContainer.Env[existingIdx+1:]...)\n\t\tupdate = true\n\t} else if desiredIdx < 0 && existingIdx < 0 {\n\t\t// env var does not exist in existing and does not exist in desired => NOOP\n\t} else if desiredIdx >= 0 && existingIdx < 0 {\n\t\t// env var does not exist in existing and exists in desired => ADD it\n\t\texistingContainer.Env = append(existingContainer.Env, desiredContainer.Env[desiredIdx])\n\t\tupdate = true\n\t} else {\n\t\t// env var exists in existing and exists in desired\n\t\tif !reflect.DeepEqual(existingContainer.Env[existingIdx], desiredContainer.Env[desiredIdx]) {\n\t\t\texistingContainer.Env[existingIdx] = desiredContainer.Env[desiredIdx]\n\t\t\tupdate = true\n\t\t}\n\t}\n\treturn update\n}", "func Setenv(key, value string) error", "func logEnvironmentOverride(lc logger.LoggingClient, name string, key string, value string) {\n\tlc.Info(fmt.Sprintf(\"Variables override of '%s' by environment variable: %s=%s\", name, key, value))\n}", "func (in *Input) LoadFromEnv() {\n\tnum := reflect.ValueOf(in).Elem().NumField()\n\tfor i := 0; i < num; i++ {\n\t\ttField := reflect.TypeOf(in).Elem().Field(i)\n\t\tvField := reflect.ValueOf(in).Elem().Field(i)\n\t\tvalue, ok := os.LookupEnv(envPrefix + tField.Tag.Get(\"env\"))\n\t\tif ok {\n\t\t\tvField.Set(reflect.ValueOf(value))\n\t\t}\n\t}\n}", "func assertEnv(ctx context.Context, client client.Client, spec corev1.PodSpec, component, container, key, expectedValue string) error {\n\tvalue, err := getEnv(ctx, client, spec, component, container, key)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif value != nil && strings.ToLower(*value) != expectedValue {\n\t\treturn ErrIncompatibleCluster{\n\t\t\terr: fmt.Sprintf(\"%s=%s is not supported\", key, *value),\n\t\t\tcomponent: component,\n\t\t\tfix: fmt.Sprintf(\"remove the %s env var or set it to '%s'\", key, expectedValue),\n\t\t}\n\t}\n\n\treturn nil\n}", "func LoadFromEnv(v interface{}, prefix string) (result []MarshalledEnvironmentVar) {\n\tpointerValue := reflect.ValueOf(v)\n\tstructValue := pointerValue.Elem()\n\tstructType := structValue.Type()\n\n\tfor i := 0; i < structValue.NumField(); i++ {\n\t\tstructField := structType.Field(i)\n\t\tfieldValue := structValue.Field(i)\n\n\t\tif fieldValue.CanSet() {\n\t\t\tenvKey := strings.ToUpper(prefix) + gocase.ToUpperSnake(structField.Name)\n\t\t\tenvVal := os.Getenv(envKey)\n\n\t\t\tif envVal != \"\" {\n\t\t\t\t// create a json blob with the env data\n\t\t\t\tjsonStr := \"\"\n\t\t\t\tif fieldValue.Kind() == reflect.String {\n\t\t\t\t\tjsonStr = fmt.Sprintf(`{\"%s\": \"%s\"}`, structField.Name, envVal)\n\t\t\t\t} else {\n\t\t\t\t\tjsonStr = fmt.Sprintf(`{\"%s\": %s}`, structField.Name, envVal)\n\t\t\t\t}\n\n\t\t\t\terr := json.Unmarshal([]byte(jsonStr), v)\n\t\t\t\tresult = append(result, MarshalledEnvironmentVar{envKey, envVal, structField.Name, err})\n\t\t\t}\n\t\t}\n\t}\n\n\treturn\n}", "func envOverride(config *Config) error {\n\tconst defaultPort = \":3000\"\n\terr := envconfig.Process(\"athens\", config)\n\tif err != nil {\n\t\treturn err\n\t}\n\tportEnv := os.Getenv(\"PORT\")\n\t// ATHENS_PORT takes precedence over PORT\n\tif portEnv != \"\" && os.Getenv(\"ATHENS_PORT\") == \"\" {\n\t\tconfig.Port = portEnv\n\t}\n\tif config.Port == \"\" {\n\t\tconfig.Port = defaultPort\n\t}\n\tconfig.Port = ensurePortFormat(config.Port)\n\treturn nil\n}", "func LoadEnvironment(object interface{}, metaDataKey string) error {\n\tvar values = func(key string) (string, bool) {\n\t\treturn os.LookupEnv(key)\n\t}\n\treturn commonLoad(values, object, metaDataKey)\n}", "func ApplyEnvironment(prefix string, cfg interface{}) error {\n\tc := cfg.(*Config)\n\tfor _, val := range []struct {\n\t\ts *string\n\t\tenv string\n\t}{\n\t\t// v2/v3 specific\n\t\t{&c.UserName, prefix + \"OS_USERNAME\"},\n\t\t{&c.APIKey, prefix + \"OS_PASSWORD\"},\n\t\t{&c.Region, prefix + \"OS_REGION_NAME\"},\n\t\t{&c.AuthURL, prefix + \"OS_AUTH_URL\"},\n\n\t\t// v3 specific\n\t\t{&c.Domain, prefix + \"OS_USER_DOMAIN_NAME\"},\n\t\t{&c.Tenant, prefix + \"OS_PROJECT_NAME\"},\n\t\t{&c.TenantDomain, prefix + \"OS_PROJECT_DOMAIN_NAME\"},\n\n\t\t// v2 specific\n\t\t{&c.TenantID, prefix + \"OS_TENANT_ID\"},\n\t\t{&c.Tenant, prefix + \"OS_TENANT_NAME\"},\n\n\t\t// v1 specific\n\t\t{&c.AuthURL, prefix + \"ST_AUTH\"},\n\t\t{&c.UserName, prefix + \"ST_USER\"},\n\t\t{&c.APIKey, prefix + \"ST_KEY\"},\n\n\t\t// Application Credential auth\n\t\t{&c.ApplicationCredentialID, prefix + \"OS_APPLICATION_CREDENTIAL_ID\"},\n\t\t{&c.ApplicationCredentialName, prefix + \"OS_APPLICATION_CREDENTIAL_NAME\"},\n\t\t{&c.ApplicationCredentialSecret, prefix + \"OS_APPLICATION_CREDENTIAL_SECRET\"},\n\n\t\t// Manual authentication\n\t\t{&c.StorageURL, prefix + \"OS_STORAGE_URL\"},\n\t\t{&c.AuthToken, prefix + \"OS_AUTH_TOKEN\"},\n\n\t\t{&c.DefaultContainerPolicy, prefix + \"SWIFT_DEFAULT_CONTAINER_POLICY\"},\n\t} {\n\t\tif *val.s == \"\" {\n\t\t\t*val.s = os.Getenv(val.env)\n\t\t}\n\t}\n\treturn nil\n}", "func (swagger *MgwSwagger) SetEnvVariables(apiHashValue string) {\n\tproductionEndpoints, sandboxEndpoints := retrieveEndpointsFromEnv(apiHashValue)\n\tif len(productionEndpoints) > 0 {\n\t\tlogger.LoggerOasparser.Infof(\"Applying production endpoints provided in env variables for API %v : %v\", swagger.title, swagger.version)\n\t\tswagger.productionEndpoints.EndpointPrefix = constants.ProdClustersConfigNamePrefix\n\t\tswagger.productionEndpoints.Endpoints = productionEndpoints\n\t\tswagger.productionEndpoints.EndpointType = constants.LoadBalance\n\n\t}\n\tif len(sandboxEndpoints) > 0 {\n\t\tlogger.LoggerOasparser.Infof(\"Applying sandbox endpoints provided in env variables for API %v : %v\", swagger.title, swagger.version)\n\t\tswagger.sandboxEndpoints.EndpointPrefix = constants.SandClustersConfigNamePrefix\n\t\tswagger.sandboxEndpoints.Endpoints = sandboxEndpoints\n\t\tswagger.sandboxEndpoints.EndpointType = constants.LoadBalance\n\t}\n\n\t// retrieving security credentials from environment variables\n\tif swagger.productionEndpoints != nil && swagger.productionEndpoints.SecurityConfig.Enabled {\n\t\tswagger.productionEndpoints.SecurityConfig = RetrieveEndpointBasicAuthCredentialsFromEnv(apiHashValue,\n\t\t\t\"prod\", swagger.productionEndpoints.SecurityConfig)\n\t}\n\tif swagger.sandboxEndpoints != nil && swagger.sandboxEndpoints.SecurityConfig.Enabled {\n\t\tswagger.sandboxEndpoints.SecurityConfig = RetrieveEndpointBasicAuthCredentialsFromEnv(apiHashValue, \"sand\",\n\t\t\tswagger.sandboxEndpoints.SecurityConfig)\n\t}\n}", "func ReadEnv(c interface{}) {\r\n\tfor _, value := range os.Environ() {\r\n\t\tif strings.HasPrefix(value, \"ENV_\") {\r\n\t\t\tkv := strings.SplitN(value,\"=\",2)\r\n\t\t\tkv[0] = strings.ToLower(strings.Replace(kv[0],\"_\",\".\",-1))[4:]\r\n\t\t\tSetData(c,strings.Join(kv,\"=\"))\r\n\t\t}\r\n\t}\r\n}", "func (c *environmentClient) Set(appName string, values map[string]string, opts ...SetEnvOption) error {\n\tif appName == \"\" {\n\t\treturn errors.New(\"invalid app name\")\n\t}\n\tcfg := SetEnvOptionDefaults().Extend(opts).toConfig()\n\n\ts, err := c.fetchService(cfg.Namespace, appName)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tnewValues := c.dedupeEnvs(\n\t\tvalues,\n\t\ts.Spec.RunLatest.Configuration.RevisionTemplate.Spec.Container.Env,\n\t)\n\n\ts.Spec.RunLatest.Configuration.RevisionTemplate.Spec.Container.Env = c.mapToEnvs(newValues)\n\tif _, err := c.c.Services(cfg.Namespace).Update(s); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func SyncEnvVar(env interface{}) {\n\tbs, err := json.Marshal(&env)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tenvVar := map[string]string{}\n\terr = json.Unmarshal(bs, &envVar)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tviper.AutomaticEnv()\n\tfor k := range envVar {\n\t\tval := viper.GetString(k)\n\t\tif val != \"\" {\n\t\t\tenvVar[k] = val\n\t\t}\n\t}\n\n\tbs, err = json.Marshal(envVar)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\terr = json.Unmarshal(bs, &env)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}", "func (c *KubeTestPlatform) SetAppEnv(name, key, value string) error {\n\tapp := c.AppResources.FindActiveResource(name)\n\tappManager := app.(*kube.AppManager)\n\n\tif err := appManager.SetAppEnv(key, value); err != nil {\n\t\treturn err\n\t}\n\n\tif _, err := appManager.WaitUntilDeploymentState(appManager.IsDeploymentDone); err != nil {\n\t\treturn err\n\t}\n\n\tappManager.StreamContainerLogs()\n\n\treturn nil\n}", "func EnvAppCreate(ctx *Context, id int64, key, value string) (*AppEnv, error) {\n\th := authHeader(ctx.Config.AuthToken)\n\turi := ctx.Config.APIEndpoint(\"environment_variable\")\n\tdata := make(map[string]interface{})\n\tdata[\"application\"] = id\n\tdata[\"name\"] = key\n\tdata[\"value\"] = value\n\tbody, err := marhsalReader(data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tb, err := doJSON(ctx, \"POST\", uri, h, nil, body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\te := &AppEnv{}\n\terr = json.Unmarshal(b, e)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn e, nil\n}", "func SetEnvironment(envar string) {\n\tc.setEnvironment(envar)\n}", "func updateEnvFile(key string, value string) {\n\tenvFile := \"./.env\"\n\tif fileExists(envFile) {\n\t\tenvMap, err := godotenv.Read(envFile)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tenvMap[key] = value\n\n\t\terr = godotenv.Write(envMap, envFile)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n}", "func EnvDevCreate(ctx *Context, id int64, key, value string) (*Env, error) {\n\th := authHeader(ctx.Config.AuthToken)\n\turi := ctx.Config.APIEndpoint(\"device_environment_variable\")\n\tdata := make(map[string]interface{})\n\tdata[\"device\"] = id\n\tdata[\"env_var_name\"] = key\n\tdata[\"value\"] = value\n\tbody, err := marhsalReader(data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tb, err := doJSON(ctx, \"POST\", uri, h, nil, body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\te := &Env{}\n\terr = json.Unmarshal(b, e)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn e, nil\n}", "func (e *ChefEnvironment) UpdateFromJSON(jsonEnv map[string]interface{}) util.Gerror {\n\tif e.Name != jsonEnv[\"name\"].(string) {\n\t\terr := util.Errorf(\"Environment name %s and %s from JSON do not match\", e.Name, jsonEnv[\"name\"].(string))\n\t\treturn err\n\t} else if e.Name == \"_default\" {\n\t\terr := util.Errorf(\"The '_default' environment cannot be modified.\")\n\t\terr.SetStatus(http.StatusMethodNotAllowed)\n\t\treturn err\n\t}\n\n\t/* Validations */\n\tvalidElements := []string{\"name\", \"chef_type\", \"json_class\", \"description\", \"default_attributes\", \"override_attributes\", \"cookbook_versions\"}\nValidElem:\n\tfor k := range jsonEnv {\n\t\tfor _, i := range validElements {\n\t\t\tif k == i {\n\t\t\t\tcontinue ValidElem\n\t\t\t}\n\t\t}\n\t\terr := util.Errorf(\"Invalid key %s in request body\", k)\n\t\treturn err\n\t}\n\n\tvar verr util.Gerror\n\n\tattrs := []string{\"default_attributes\", \"override_attributes\"}\n\tfor _, a := range attrs {\n\t\tjsonEnv[a], verr = util.ValidateAttributes(a, jsonEnv[a])\n\t\tif verr != nil {\n\t\t\treturn verr\n\t\t}\n\t}\n\n\tjsonEnv[\"json_class\"], verr = util.ValidateAsFieldString(jsonEnv[\"json_class\"])\n\tif verr != nil {\n\t\tif verr.Error() == \"Field 'name' nil\" {\n\t\t\tjsonEnv[\"json_class\"] = e.JSONClass\n\t\t} else {\n\t\t\treturn verr\n\t\t}\n\t} else {\n\t\tif jsonEnv[\"json_class\"].(string) != \"Chef::Environment\" {\n\t\t\tverr = util.Errorf(\"Field 'json_class' invalid\")\n\t\t\treturn verr\n\t\t}\n\t}\n\n\tjsonEnv[\"chef_type\"], verr = util.ValidateAsFieldString(jsonEnv[\"chef_type\"])\n\tif verr != nil {\n\t\tif verr.Error() == \"Field 'name' nil\" {\n\t\t\tjsonEnv[\"chef_type\"] = e.ChefType\n\t\t} else {\n\t\t\treturn verr\n\t\t}\n\t} else {\n\t\tif jsonEnv[\"chef_type\"].(string) != \"environment\" {\n\t\t\tverr = util.Errorf(\"Field 'chef_type' invalid\")\n\t\t\treturn verr\n\t\t}\n\t}\n\n\tjsonEnv[\"cookbook_versions\"], verr = util.ValidateAttributes(\"cookbook_versions\", jsonEnv[\"cookbook_versions\"])\n\tif verr != nil {\n\t\treturn verr\n\t}\n\tfor k, v := range jsonEnv[\"cookbook_versions\"].(map[string]interface{}) {\n\t\tif !util.ValidateEnvName(k) || k == \"\" {\n\t\t\tmerr := util.Errorf(\"Cookbook name %s invalid\", k)\n\t\t\tmerr.SetStatus(http.StatusBadRequest)\n\t\t\treturn merr\n\t\t}\n\n\t\tif v == nil {\n\t\t\tverr = util.Errorf(\"Invalid version number\")\n\t\t\treturn verr\n\t\t}\n\t\t_, verr = util.ValidateAsConstraint(v)\n\t\tif verr != nil {\n\t\t\t/* try validating as a version */\n\t\t\tv, verr = util.ValidateAsVersion(v)\n\t\t\tif verr != nil {\n\t\t\t\treturn verr\n\t\t\t}\n\t\t}\n\t}\n\n\tjsonEnv[\"description\"], verr = util.ValidateAsString(jsonEnv[\"description\"])\n\tif verr != nil {\n\t\tif verr.Error() == \"Field 'name' missing\" {\n\t\t\tjsonEnv[\"description\"] = \"\"\n\t\t} else {\n\t\t\treturn verr\n\t\t}\n\t}\n\n\te.ChefType = jsonEnv[\"chef_type\"].(string)\n\te.JSONClass = jsonEnv[\"json_class\"].(string)\n\te.Description = jsonEnv[\"description\"].(string)\n\te.Default = jsonEnv[\"default_attributes\"].(map[string]interface{})\n\te.Override = jsonEnv[\"override_attributes\"].(map[string]interface{})\n\t/* clear out, then loop over the cookbook versions */\n\te.CookbookVersions = make(map[string]string, len(jsonEnv[\"cookbook_versions\"].(map[string]interface{})))\n\tfor c, v := range jsonEnv[\"cookbook_versions\"].(map[string]interface{}) {\n\t\te.CookbookVersions[c] = v.(string)\n\t}\n\n\treturn nil\n}", "func (e Environment) RestoreOriginalVars() {\n\tfor key, value := range e.backup {\n\t\terr := os.Setenv(key, value)\n\t\tif err != nil {\n\t\t\tpanic(fmt.Sprintf(\"Failed to restore env %s\", key))\n\t\t}\n\t}\n}", "func replaceEnvironmentVariables(env map[string]string) map[string]string {\n\treplacer := createReplacer()\n\n\treturnMap := make(map[string]string)\n\tfor key, value := range env {\n\t\treturnMap[key] = replacer.Replace(value)\n\t}\n\n\treturn returnMap\n}", "func EnvMerge(existing []corev1.EnvVar, merge []corev1.EnvVar, override bool) []corev1.EnvVar {\n\tret := []corev1.EnvVar{}\n\tfinal := map[string]corev1.EnvVar{}\n\tfor _, e := range existing {\n\t\tfinal[e.Name] = e\n\t}\n\tfor _, m := range merge {\n\t\tif _, ok := final[m.Name]; ok {\n\t\t\tif override {\n\t\t\t\tfinal[m.Name] = m\n\t\t\t}\n\t\t} else {\n\t\t\tfinal[m.Name] = m\n\t\t}\n\t}\n\n\tfor _, v := range final {\n\t\tret = append(ret, v)\n\t}\n\n\treturn ret\n}", "func (f *authFile) updateEnv(e *azure.Environment) {\n\te.ActiveDirectoryEndpoint = normEndpoint(f.ActiveDirectoryEndpointURL)\n\te.ResourceManagerEndpoint = normEndpoint(f.ResourceManagerEndpointURL)\n\te.GraphEndpoint = normEndpoint(f.ActiveDirectoryGraphResourceID)\n\te.GalleryEndpoint = normEndpoint(f.GalleryEndpointURL)\n\te.ServiceManagementEndpoint = normEndpoint(f.ManagementEndpointURL)\n}", "func (ccc *CustomCloudConfig) SetEnvironment() error {\n\tvar cmd *exec.Cmd\n\tvar err error\n\n\t// Add to python cert store the self-signed root CA generated by Azure Stack's CI\n\t// as azure-cli complains otherwise\n\tazsSelfSignedCaPath := \"/aks-engine/Certificates.pem\"\n\tif _, err = os.Stat(azsSelfSignedCaPath); err == nil {\n\t\t// latest dev_image has an azure-cli version that requires python3\n\t\tcert_command := fmt.Sprintf(`VER=$(python3 -V | grep -o [0-9].[0-9]*. | grep -o [0-9].[0-9]*);\n\t\tCA=/usr/local/lib/python${VER}/dist-packages/certifi/cacert.pem;\n\t\tif [ -f ${CA} ]; then cat %s >> ${CA}; fi;`, azsSelfSignedCaPath)\n\t\t// include cacert.pem from python2.7 path for upgrade scenario\n\t\tif _, err := os.Stat(\"/usr/local/lib/python2.7/dist-packages/certifi/cacert.pem\"); err == nil {\n\t\t\tcert_command = fmt.Sprintf(`CA=/usr/local/lib/python2.7/dist-packages/certifi/cacert.pem;\n\t\t\tif [ -f ${CA} ]; then cat %s >> ${CA}; fi;`, azsSelfSignedCaPath)\n\t\t}\n\n\t\tcmd := exec.Command(\"/bin/bash\", \"-c\", cert_command)\n\n\t\tif out, err := cmd.CombinedOutput(); err != nil {\n\t\t\tlog.Printf(\"output:%s\\n\", out)\n\t\t\treturn err\n\t\t}\n\t}\n\n\tenvironmentName := fmt.Sprintf(\"AzureStack%v\", time.Now().Unix())\n\tif ccc.TimeoutCommands {\n\t\tcmd = exec.Command(\"timeout\", \"60\", \"az\", \"cloud\", \"register\",\n\t\t\t\"-n\", environmentName,\n\t\t\t\"--endpoint-resource-manager\", ccc.ResourceManagerEndpoint,\n\t\t\t\"--suffix-storage-endpoint\", ccc.StorageEndpointSuffix,\n\t\t\t\"--suffix-keyvault-dns\", ccc.KeyVaultDNSSuffix,\n\t\t\t\"--endpoint-active-directory-resource-id\", ccc.ServiceManagementEndpoint,\n\t\t\t\"--endpoint-active-directory\", ccc.ActiveDirectoryEndpoint,\n\t\t\t\"--endpoint-active-directory-graph-resource-id\", ccc.GraphEndpoint)\n\t} else {\n\t\tcmd = exec.Command(\"az\", \"cloud\", \"register\",\n\t\t\t\"-n\", environmentName,\n\t\t\t\"--endpoint-resource-manager\", ccc.ResourceManagerEndpoint,\n\t\t\t\"--suffix-storage-endpoint\", ccc.StorageEndpointSuffix,\n\t\t\t\"--suffix-keyvault-dns\", ccc.KeyVaultDNSSuffix,\n\t\t\t\"--endpoint-active-directory-resource-id\", ccc.ServiceManagementEndpoint,\n\t\t\t\"--endpoint-active-directory\", ccc.ActiveDirectoryEndpoint,\n\t\t\t\"--endpoint-active-directory-graph-resource-id\", ccc.GraphEndpoint)\n\t}\n\tif out, err := cmd.CombinedOutput(); err != nil {\n\t\tlog.Printf(\"output:%s\\n\", out)\n\t\treturn err\n\t}\n\n\tif ccc.TimeoutCommands {\n\t\tcmd = exec.Command(\"timeout\", \"60\", \"az\", \"cloud\", \"set\", \"-n\", environmentName)\n\t} else {\n\t\tcmd = exec.Command(\"az\", \"cloud\", \"set\", \"-n\", environmentName)\n\t}\n\tif out, err := cmd.CombinedOutput(); err != nil {\n\t\tlog.Printf(\"output:%s\\n\", out)\n\t\treturn err\n\t}\n\n\tif ccc.TimeoutCommands {\n\t\tcmd = exec.Command(\"timeout\", \"60\", \"az\", \"cloud\", \"update\", \"--profile\", ccc.APIProfile)\n\t} else {\n\t\tcmd = exec.Command(\"az\", \"cloud\", \"update\", \"--profile\", ccc.APIProfile)\n\t}\n\tif out, err := cmd.CombinedOutput(); err != nil {\n\t\tlog.Printf(\"output:%s\\n\", out)\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func GetAllEnvironmentVariablesSet() {\n\tvar err error\n\t//get all the env variables set\n\ttypes.Namespace = os.Getenv(\"K8S_NAMESPACE\")\n\ttypes.PodID = os.Getenv(\"K8S_POD_ID\")\n\ttypes.MsConfigVersion = os.Getenv(\"MS_CONFIG_REVISION\")\n\ttypes.NfConfigVersion = os.Getenv(\"NF_CONFIG_REVISION\")\n\n\t// read cim REST port value\n\ttypes.CIMRestPort = 6060\n\tif os.Getenv(\"CIM_REST_PORT\") != \"\" {\n\t\ttypes.CIMRestPort, err = strconv.Atoi(os.Getenv(\"CIM_REST_PORT\"))\n\t\tif err != nil {\n\t\t\tlog.Println(\"Improper value provided for env variable CIM_REST_PORT. Continuing with default CIM REST port 6060.\")\n\t\t\ttypes.CIMRestPort = 6060\n\t\t}\n\t}\n\n\t// read cim NATS port value\n\ttypes.CIMNatsPort = 4222\n\tif os.Getenv(\"CIM_NATS_PORT\") != \"\" {\n\t\ttypes.CIMNatsPort, err = strconv.Atoi(os.Getenv(\"CIM_NATS_PORT\"))\n\t\tif err != nil {\n\t\t\tlog.Println(\"Improper value provided for env variable CIM_NATS_PORT. Continuing with default CIM NATS port 4222.\")\n\t\t\ttypes.CIMNatsPort = 4222\n\t\t}\n\t}\n}", "func createOrUpdateEnvVariables(ctx context.Context, client *tfe.Client, workspaceId string, variables map[string]string) error {\n\tvar err error\n\tvar allV *tfe.VariableList\n\tisSensitive := false\n\n\t// Read all variables and search\n\t// TODO: is there a better way? API doesnt expose a variable by name lookup\n\tallV, err = client.Variables.List(ctx, workspaceId, tfe.VariableListOptions{})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// loop over each env variable\n\tfor key, val := range variables {\n\t\t// determine if variable already exists\n\t\tvar found *tfe.Variable\n\t\tfor i := range allV.Items {\n\t\t\tif allV.Items[i].Key == key {\n\t\t\t\tfound = allV.Items[i]\n\t\t\t}\n\t\t}\n\n\t\ttimestamp := time.Now()\n\t\tif found == nil {\n\t\t\tfmt.Print(\"Creating new Variable: \", color.GreenString(key), \" ...\")\n\t\t\t_, err = client.Variables.Create(ctx, workspaceId, tfe.VariableCreateOptions{\n\t\t\t\tKey: &key,\n\t\t\t\tValue: &val,\n\t\t\t\tDescription: tfe.String(fmt.Sprintf(\"Written by TFx at %s\", timestamp)),\n\t\t\t\tCategory: tfe.Category(\"env\"),\n\t\t\t\tSensitive: tfe.Bool(isSensitive),\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\tfmt.Print(\"Updating existing Variable: \", color.GreenString(key), \" ...\")\n\t\t\t_, err = client.Variables.Update(ctx, workspaceId, found.ID, tfe.VariableUpdateOptions{\n\t\t\t\tKey: &key,\n\t\t\t\tValue: &val,\n\t\t\t\tDescription: tfe.String(fmt.Sprintf(\"Written by TFx at %s\", timestamp)),\n\t\t\t\t// Category: tfe.Category(\"env\"),\n\t\t\t\tSensitive: tfe.Bool(isSensitive),\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tfmt.Println(\" Done\")\n\t}\n\n\treturn nil\n}", "func (s *Action) Env(c *cli.Context) error {\n\tctx := ctxutil.WithGlobalFlags(c)\n\tname := c.Args().First()\n\targs := c.Args().Tail()\n\tkeepCase := c.Bool(\"keep-case\")\n\n\tif len(args) == 0 {\n\t\treturn exit.Error(exit.Usage, nil, \"Missing subcommand to execute\")\n\t}\n\n\tif !s.Store.Exists(ctx, name) && !s.Store.IsDir(ctx, name) {\n\t\treturn exit.Error(exit.NotFound, nil, \"Secret %s not found\", name)\n\t}\n\n\tkeys := make([]string, 0, 1)\n\tif s.Store.IsDir(ctx, name) {\n\t\tdebug.Log(\"%q is a dir, adding it's entries\", name)\n\n\t\tl, err := s.Store.Tree(ctx)\n\t\tif err != nil {\n\t\t\treturn exit.Error(exit.List, err, \"failed to list store: %s\", err)\n\t\t}\n\n\t\tsubtree, err := l.FindFolder(name)\n\t\tif err != nil {\n\t\t\treturn exit.Error(exit.NotFound, nil, \"Entry %q not found\", name)\n\t\t}\n\n\t\tfor _, e := range subtree.List(tree.INF) {\n\t\t\tdebug.Log(\"found key: %s\", e)\n\t\t\tkeys = append(keys, e)\n\t\t}\n\t} else {\n\t\tkeys = append(keys, name)\n\t}\n\n\tenv := make([]string, 0, 1)\n\tfor _, key := range keys {\n\t\tdebug.Log(\"exporting to environment key: %s\", key)\n\t\tsec, err := s.Store.Get(ctx, key)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to get entry for env prefix %q: %w\", name, err)\n\t\t}\n\t\tenvKey := path.Base(key)\n\t\tif !keepCase {\n\t\t\tenvKey = strings.ToUpper(envKey)\n\t\t}\n\t\tenv = append(env, fmt.Sprintf(\"%s=%s\", envKey, sec.Password()))\n\t}\n\n\tcmd := exec.CommandContext(ctx, args[0], args[1:]...)\n\tcmd.Env = append(os.Environ(), env...)\n\tcmd.Stdin = os.Stdin\n\tcmd.Stdout = stdout\n\tcmd.Stderr = os.Stderr\n\n\treturn cmd.Run()\n}", "func LoadEnv() {\n\tenv := os.Getenv(\"TEMPLATE_ENV\")\n\tif \"\" == env {\n\t\tenv = \"development\"\n\t}\n\n\tgodotenv.Load(\".env.\" + env + \".local\")\n\tif \"test\" != env {\n\t\tgodotenv.Load(\".env.local\")\n\t}\n\tgodotenv.Load(\".env.\" + env)\n\tgodotenv.Load() // The Original .env\n\tif vars := checkVars(); len(vars) != 0 {\n\t\tlog.Printf(\"ERROR: Variables de entorno necesarias no definidas: %v\", vars)\n\t\tpanic(fmt.Sprintf(\"ERROR: Variables de entorno necesarias no definidas: %v\", vars))\n\t}\n}", "func (evs *EnvVarService) CreateOrUpdate(ev *travis.EnvironmentVariable) error {\n\texistingVars := evs.byName(*ev.Name)\n\tif len(existingVars) == 0 {\n\t\treturn evs.create(ev)\n\t}\n\n\t// Travis doesn't enforce uniqueness on variable names: pop latest and\n\t// prepare list of dups for deletion.\n\texistingVar, dups := existingVars[len(existingVars)-1], existingVars[:len(existingVars)-1]\n\n\t// Update internal var\n\texistingVar.Value = ev.Value\n\texistingVar.Public = ev.Public\n\n\t// Update remote var\n\tif _, err := evs.client.UpdateEnvironmentVariable(evs.repoID, *existingVar.ID, existingVar); err != nil {\n\t\treturn err\n\t}\n\n\t// TODO: We know IDs in advance and can run requests in parallel. We should.\n\treturn evs.removeAll(dups)\n}", "func envOverride(config *DefaultConfig) (*DefaultConfig, error) {\n\t// override UpdateTime\n\tupdateTime := os.Getenv(\"XIGNITE_FEEDER_UPDATE_TIME\")\n\tif updateTime != \"\" {\n\t\tt, err := time.Parse(ctLayout, updateTime)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tconfig.UpdateTime = t\n\t}\n\n\t// override APIToken\n\tapiToken := os.Getenv(\"XIGNITE_FEEDER_API_TOKEN\")\n\tif apiToken != \"\" {\n\t\tconfig.APIToken = apiToken\n\t}\n\n\t// override NotQuoteSymbolList\n\tnotQuoteStockList := os.Getenv(\"XIGNITE_FEEDER_NOT_QUOTE_STOCK_LIST\")\n\tif notQuoteStockList != \"\" {\n\t\tconfig.NotQuoteStockList = strings.Split(notQuoteStockList, \",\")\n\t}\n\n\treturn config, nil\n}", "func (e *OverlayEnv) Setenv(key, value string) error {\n\t// do we have a stack?\n\tif e == nil {\n\t\treturn ErrNilPointer{\"OverlayEnv.Setenv\"}\n\t}\n\n\t// do we have any environments in the stack?\n\tif len(e.envs) == 0 {\n\t\treturn ErrEmptyOverlayEnv{\"OverlayEnv.Setenv\"}\n\t}\n\n\t// are we updating an existing variable?\n\tfor _, env := range e.envs {\n\t\t_, ok := env.LookupEnv(key)\n\t\tif ok {\n\t\t\treturn env.Setenv(key, value)\n\t\t}\n\t}\n\n\t// nope, it's a brand new variable\n\treturn e.envs[0].Setenv(key, value)\n}", "func populateProcessEnvironment(env []string) error {\n\tfor _, pair := range env {\n\t\tp := strings.SplitN(pair, \"=\", 2)\n\t\tif len(p) < 2 {\n\t\t\treturn fmt.Errorf(\"invalid environment '%v'\", pair)\n\t\t}\n\t\tif err := os.Setenv(p[0], p[1]); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func InjectEnv() env.Env {\n\twire.Build(\n\t\twire.Bind(new(env.Env), new(env.GoDotEnv)),\n\t\tenv.NewGoDotEnv,\n\t)\n\treturn env.GoDotEnv{}\n}", "func (env Environment) Add(otherenv Environment) {\n\tfor k, v := range otherenv {\n\t\tenv[k] = v\n\t}\n}", "func CreateEnvironment(host string, verifyTLS bool, apiKey string, project string, name string, slug string) (models.EnvironmentInfo, Error) {\n\tpostBody := map[string]string{\"project\": project, \"name\": name, \"slug\": slug}\n\tbody, err := json.Marshal(postBody)\n\tif err != nil {\n\t\treturn models.EnvironmentInfo{}, Error{Err: err, Message: \"Invalid environment info\"}\n\t}\n\n\turl, err := generateURL(host, \"/v3/environments\", nil)\n\tif err != nil {\n\t\treturn models.EnvironmentInfo{}, Error{Err: err, Message: \"Unable to generate url\"}\n\t}\n\n\tstatusCode, _, response, err := PostRequest(url, verifyTLS, apiKeyHeader(apiKey), body)\n\tif err != nil {\n\t\treturn models.EnvironmentInfo{}, Error{Err: err, Message: \"Unable to create environment\", Code: statusCode}\n\t}\n\n\tvar result map[string]interface{}\n\terr = json.Unmarshal(response, &result)\n\tif err != nil {\n\t\treturn models.EnvironmentInfo{}, Error{Err: err, Message: \"Unable to parse API response\", Code: statusCode}\n\t}\n\n\tenvironmentInfo, ok := result[\"environment\"].(map[string]interface{})\n\tif !ok {\n\t\treturn models.EnvironmentInfo{}, Error{Err: fmt.Errorf(\"Unexpected type parsing environment, expected map[string]interface{}, got %T\", result[\"environment\"]), Message: \"Unable to parse API response\", Code: statusCode}\n\t}\n\n\tinfo := models.ParseEnvironmentInfo(environmentInfo)\n\n\treturn info, Error{}\n}", "func (v *VirtualEnvironment) GetEnv(inherit bool, projectDir string) (map[string]string, error) {\n\tenv := make(map[string]string)\n\tif v.getEnv == nil {\n\t\t// if runtime is not explicitly disabled, this is an error\n\t\tif os.Getenv(constants.DisableRuntime) != \"true\" {\n\t\t\treturn nil, locale.NewError(\n\t\t\t\t\"err_get_env_unactivated\", \"Trying to set up an environment in an un-activated environment. This should not happen. Please report this issue in our forum: %s\",\n\t\t\t\tconstants.ForumsURL,\n\t\t\t)\n\t\t}\n\t\tenv[\"PATH\"] = os.Getenv(\"PATH\")\n\t} else {\n\t\tvar err error\n\t\tenv, err = v.getEnv(inherit, projectDir)\n\t\tif err != nil {\n\t\t\treturn env, err\n\t\t}\n\t}\n\n\tif projectDir != \"\" {\n\t\tenv[constants.ActivatedStateEnvVarName] = projectDir\n\t\tenv[constants.ActivatedStateIDEnvVarName] = v.activationID\n\n\t\t// Get project from explicitly defined configuration file\n\t\tpj, fail := project.Parse(filepath.Join(projectDir, constants.ConfigFileName))\n\t\tif fail != nil {\n\t\t\treturn env, fail.ToError()\n\t\t}\n\t\tfor _, constant := range pj.Constants() {\n\t\t\tenv[constant.Name()] = constant.Value()\n\t\t}\n\t}\n\n\tif inherit {\n\t\treturn inheritEnv(env), nil\n\t}\n\n\treturn env, nil\n}", "func (suite *Suite[Env]) UpdateEnv(stackDef *StackDefinition[Env]) {\n\tif stackDef != suite.currentStackDef {\n\t\tif (suite.firstFailTest != \"\" || suite.T().Failed()) && suite.params.SkipDeleteOnFailure {\n\t\t\t// In case of failure, do not override the environment\n\t\t\tsuite.T().SkipNow()\n\t\t}\n\t\tenv, upResult, err := createEnv(suite, stackDef)\n\t\tsuite.Require().NoError(err)\n\t\terr = client.CallStackInitializers(suite.T(), env, upResult)\n\t\tsuite.Require().NoError(err)\n\t\tsuite.env = env\n\t\tsuite.currentStackDef = stackDef\n\t}\n\tsuite.isUpdateEnvCalledInThisTest = true\n}", "func SetEnvs(iface interface{}) error {\n\tv := reflect.Indirect(reflect.ValueOf(iface))\n\tif v.Kind() != reflect.Struct {\n\t\treturn errors.Errorf(\"expected struct, received %v\", v.Type())\n\t}\n\tfor i := 0; i < v.Type().NumField(); i++ {\n\t\tfv := v.Field(i)\n\t\ttv, ok := v.Type().Field(i).Tag.Lookup(\"env\")\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\t\tif v, ok := os.LookupEnv(strings.ToUpper(tv)); ok {\n\t\t\tif err := setValue(fv, v); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func SetupEnvironment(trelloKey, trelloToken, trelloNextActionsListID, trelloProjectsListID string) {\n\tos.Setenv(\"TRELLO_KEY\", trelloKey)\n\tos.Setenv(\"TRELLO_TOKEN\", trelloToken)\n\tos.Setenv(\"TRELLO_NEXT_ACTIONS_LIST_ID\", trelloNextActionsListID)\n\tos.Setenv(\"TRELLO_PROJECTS_LIST_ID\", trelloProjectsListID)\n}", "func setEngineVar(chaosEngine *v1alpha1.ChaosEngine, testsDetails *types.TestDetails) {\n\n\t// contains all the envs\n\tenvDetails := ENVDetails{\n\t\tENV: map[string]string{},\n\t}\n\n\t// Add JobCleanUpPolicy of chaos-runner to retain\n\tchaosEngine.Spec.JobCleanUpPolicy = v1alpha1.CleanUpPolicy(testsDetails.JobCleanUpPolicy)\n\n\t// Add ImagePullPolicy of chaos-runner to Always\n\tchaosEngine.Spec.Components.Runner.ImagePullPolicy = corev1.PullPolicy(testsDetails.ImagePullPolicy)\n\n\t// Modify the spec of engine file\n\tchaosEngine.ObjectMeta.Name = testsDetails.EngineName\n\tchaosEngine.ObjectMeta.Namespace = testsDetails.ChaosNamespace\n\n\t// If ChaosEngine contain App Info then update it\n\tif chaosEngine.Spec.Appinfo.Appns != \"\" && chaosEngine.Spec.Appinfo.Applabel != \"\" {\n\t\tchaosEngine.Spec.Appinfo.Appns = testsDetails.AppNS\n\t\tchaosEngine.Spec.Appinfo.Applabel = testsDetails.AppLabel\n\t}\n\tif testsDetails.ChaosServiceAccount != \"\" {\n\t\tchaosEngine.Spec.ChaosServiceAccount = testsDetails.ChaosServiceAccount\n\t} else {\n\t\tchaosEngine.Spec.ChaosServiceAccount = testsDetails.ExperimentName + \"-sa\"\n\t}\n\tchaosEngine.Spec.Experiments[0].Name = testsDetails.NewExperimentName\n\tchaosEngine.Spec.AnnotationCheck = testsDetails.AnnotationCheck\n\n\tswitch testsDetails.ExperimentName {\n\tcase \"ec2-terminate-by-id\":\n\t\tenvDetails.SetEnv(\"EC2_INSTANCE_ID\", testsDetails.InstanceID).\n\t\t\tSetEnv(\"REGION\", testsDetails.Region)\n\tcase \"ec2-terminate-by-tag\":\n\t\tenvDetails.SetEnv(\"INSTANCE_TAG\", testsDetails.InstanceTag).\n\t\t\tSetEnv(\"REGION\", testsDetails.Region)\n\tcase \"ebs-loss-by-id\":\n\t\tenvDetails.SetEnv(\"EBS_VOLUME_ID\", testsDetails.EBSVolumeID).\n\t\t\tSetEnv(\"REGION\", testsDetails.Region)\n\tcase \"ebs-loss-by-tag\":\n\t\tenvDetails.SetEnv(\"EBS_VOLUME_TAG\", testsDetails.EBSVolumeTag).\n\t\t\tSetEnv(\"REGION\", testsDetails.Region)\n\tcase \"disk-fill\":\n\t\tif testsDetails.FillPercentage != 80 {\n\t\t\tenvDetails.SetEnv(\"FILL_PERCENTAGE\", strconv.Itoa(testsDetails.FillPercentage))\n\t\t}\n\t\t// Here not using SetEnv function as SetEnv will not add new variables\n\t\tchaosEngine.Spec.Experiments[0].Spec.Components.ENV = append(chaosEngine.Spec.Experiments[0].Spec.Components.ENV, corev1.EnvVar{\n\t\t\tName: \"EPHEMERAL_STORAGE_MEBIBYTES\",\n\t\t\tValue: \"200\",\n\t\t})\n\tcase \"pod-cpu-hog-exec\":\n\t\tchaosEngine.Spec.Experiments[0].Spec.Components.ENV = append(chaosEngine.Spec.Experiments[0].Spec.Components.ENV, corev1.EnvVar{\n\t\t\tName: \"CHAOS_KILL_COMMAND\",\n\t\t\tValue: testsDetails.CPUKillCommand,\n\t\t})\n\tcase \"pod-memory-hog-exec\":\n\t\tchaosEngine.Spec.Experiments[0].Spec.Components.ENV = append(chaosEngine.Spec.Experiments[0].Spec.Components.ENV, corev1.EnvVar{\n\t\t\tName: \"CHAOS_KILL_COMMAND\",\n\t\t\tValue: testsDetails.MemoryKillCommand,\n\t\t})\n\tcase \"azure-instance-stop\":\n\t\tenvDetails.SetEnv(\"RESOURCE_GROUP\", testsDetails.AzureResourceGroup).\n\t\t\tSetEnv(\"AZURE_INSTANCE_NAME\", testsDetails.AzureInstanceName).\n\t\t\tSetEnv(\"AZURE_SCALE_SET\", testsDetails.AzureScaleSet)\n\tcase \"azure-disk-loss\":\n\t\tenvDetails.SetEnv(\"RESOURCE_GROUP\", testsDetails.AzureResourceGroup).\n\t\t\tSetEnv(\"AZURE_SCALE_SET\", testsDetails.AzureScaleSet).\n\t\t\tSetEnv(\"VIRTUAL_DISK_NAMES\", testsDetails.AzureDiskName)\n\tcase \"gcp-vm-instance-stop\":\n\t\tenvDetails.SetEnv(\"GCP_PROJECT_ID\", testsDetails.GCPProjectID).\n\t\t\tSetEnv(\"VM_INSTANCE_NAMES\", testsDetails.VMInstanceNames).\n\t\t\tSetEnv(\"INSTANCE_ZONES\", testsDetails.InstanceZones)\n\tcase \"gcp-vm-disk-loss\":\n\t\tenvDetails.SetEnv(\"GCP_PROJECT_ID\", testsDetails.GCPProjectID).\n\t\t\tSetEnv(\"DISK_VOLUME_NAMES\", testsDetails.DiskVolumeNames).\n\t\t\tSetEnv(\"DISK_ZONES\", testsDetails.DiskZones).\n\t\t\tSetEnv(\"DEVICE_NAMES\", testsDetails.DeviceNames)\n\tcase \"vm-poweroff\":\n\t\tenvDetails.SetEnv(\"APP_VM_MOIDS\", testsDetails.VMIds)\n\tcase \"process-kill\":\n\t\tenvDetails.SetEnv(\"PROCESS_IDS\", testsDetails.ProcessIds)\n\t}\n\n\t// for experiments like pod network latency\n\tenvDetails.SetEnv(\"NETWORK_LATENCY\", testsDetails.NetworkLatency)\n\n\t// update App Node Details\n\tif testsDetails.ApplicationNodeName != \"\" {\n\t\tenvDetails.SetEnv(\"TARGET_NODE\", testsDetails.ApplicationNodeName)\n\t\tif chaosEngine.Spec.Experiments[0].Spec.Components.NodeSelector == nil {\n\t\t\tchaosEngine.Spec.Experiments[0].Spec.Components.NodeSelector = map[string]string{}\n\t\t}\n\t\tchaosEngine.Spec.Experiments[0].Spec.Components.NodeSelector[\"kubernetes.io/hostname\"] = testsDetails.NodeSelectorName\n\t}\n\n\t// update Target Node Details\n\tif testsDetails.TargetNodes != \"\" {\n\t\tlog.Infof(\"[Info] Target Nodes: %v\", testsDetails.TargetNodes)\n\t\tenvDetails.SetEnv(\"TARGET_NODES\", testsDetails.TargetNodes)\n\t}\n\n\t// NODE_LABEL for Node-memory-hog and node-cpu-hog\n\tif testsDetails.NodeLabel != \"\" {\n\t\tchaosEngine.Spec.Experiments[0].Spec.Components.ENV = append(chaosEngine.Spec.Experiments[0].Spec.Components.ENV, corev1.EnvVar{\n\t\t\tName: \"NODE_LABEL\",\n\t\t\tValue: testsDetails.NodeLabel,\n\t\t})\n\t}\n\n\t// update all the value corresponding to keys from the ENV's in Engine\n\tfor key, value := range chaosEngine.Spec.Experiments[0].Spec.Components.ENV {\n\t\t_, ok := envDetails.ENV[value.Name]\n\t\tif ok {\n\t\t\tchaosEngine.Spec.Experiments[0].Spec.Components.ENV[key].Value = envDetails.ENV[value.Name]\n\t\t}\n\t}\n}", "func (m *etcdMinion) setEnvironment(name string) error {\n\tlog.Printf(\"Setting environment to %s\\n\", name)\n\n\tif _, err := m.gitRepo.CheckoutDetached(name); err != nil {\n\t\treturn err\n\t}\n\n\thead, err := m.gitRepo.Head()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.Printf(\"Environment set to %s@%s\\n\", name, head)\n\n\treturn nil\n}", "func (b *Executable) Env(arg, value string) *Executable {\n\tif b.Environment == nil {\n\t\tb.Environment = make(map[string]string)\n\t}\n\tb.Environment[arg] = value\n\treturn b\n}", "func SetEnvironment(e *Environment) {\n\tenv = e\n}", "func GenerateEnvVars(application *model.Application, resourceDir string, appValues *templates.Application) error {\n\tappEnvVars := make(map[string]string, 0)\n\n\tfor _, appRes := range application.Resources {\n\t\t//elasticsearch-user:sit\n\t\tresDetails := strings.Split(appRes, sep)\n\t\tif len(resDetails) < 2 {\n\t\t\teMsg := fmt.Sprintf(\"application resource %s has missing template type, eg: cassandra/test1\", resDetails)\n\t\t\treturn errors.New(eMsg)\n\t\t}\n\t\tname := resDetails[0]\n\t\tenvType := resDetails[1]\n\t\tresource := &model.Resource{}\n\t\terr := GetResource(name, &resource, resourceDir)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tmatchEnvType := false\n\t\tfor _, resTemplate := range resource.Spec.ResourceTemplate {\n\t\t\t//Only using the context\n\t\t\tif resTemplate.Name == envType {\n\t\t\t\taddToEnvVars(name, appEnvVars, resTemplate.Element)\n\n\t\t\t\tif len(resTemplate.Infra) > 0 {\n\t\t\t\t\tinfra := strings.Split(resTemplate.Infra, sep)\n\t\t\t\t\tif len(infra) < 2 {\n\t\t\t\t\t\teMsg := fmt.Sprintf(\"resource infrastructure %s has missing template type, eg: cassandra-a/test\", infra)\n\t\t\t\t\t\treturn errors.New(eMsg)\n\t\t\t\t\t}\n\t\t\t\t\tinfraName := infra[0]\n\t\t\t\t\tinfraEnv := infra[1]\n\t\t\t\t\tinfrastructure := &model.Infrastructure{}\n\t\t\t\t\tGetInfrastructure(infraName, &infrastructure, resourceDir)\n\t\t\t\t\tmatchInfra := false\n\t\t\t\t\tfor _, infraTemplate := range infrastructure.Spec.Template {\n\t\t\t\t\t\tif infraEnv == infraTemplate.Name {\n\t\t\t\t\t\t\taddToEnvVars(name, appEnvVars, infraTemplate.Attributes)\n\t\t\t\t\t\t\tmatchInfra = true\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tif matchInfra == false {\n\t\t\t\t\t\tlog.Print(fmt.Sprintf(\"[WARN] could not find matching infra for env type %s of %s\", infraEnv, resTemplate.Infra))\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tmatchEnvType = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif matchEnvType == false {\n\t\t\tlog.Print(fmt.Sprintf(\"[WARN] could not find matching env type %s of app %s\", envType, application.Name))\n\t\t}\n\t}\n\tappValues.EnvVars = appEnvVars\n\treturn nil\n}", "func (s *Store) CreateEnvironment(environment *archer.Environment) error {\n\tif _, err := s.GetProject(environment.Project); err != nil {\n\t\treturn err\n\t}\n\n\tenvironmentPath := fmt.Sprintf(fmtEnvParamPath, environment.Project, environment.Name)\n\tdata, err := marshal(environment)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"serializing environment %s: %w\", environment.Name, err)\n\t}\n\n\t_, err = s.ssmClient.PutParameter(&ssm.PutParameterInput{\n\t\tName: aws.String(environmentPath),\n\t\tDescription: aws.String(fmt.Sprintf(\"The %s deployment stage\", environment.Name)),\n\t\tType: aws.String(ssm.ParameterTypeString),\n\t\tValue: aws.String(data),\n\t})\n\tif err != nil {\n\t\tif aerr, ok := err.(awserr.Error); ok {\n\t\t\tswitch aerr.Code() {\n\t\t\tcase ssm.ErrCodeParameterAlreadyExists:\n\t\t\t\treturn &ErrEnvironmentAlreadyExists{\n\t\t\t\t\tEnvironmentName: environment.Name,\n\t\t\t\t\tProjectName: environment.Project}\n\t\t\t}\n\t\t}\n\t\treturn fmt.Errorf(\"create environment %s in project %s: %w\", environment.Name, environment.Project, err)\n\t}\n\treturn nil\n}", "func (es *EnvStorage) Set(name, secret string) error {\n\treturn os.Setenv(es.makeEnvName(name), secret)\n}", "func (r *Reconciler) channelDeploymentEnvVars(channel *knativekafkav1alpha1.KafkaChannel) ([]corev1.EnvVar, error) {\n\n\t// Get The TopicName For Specified Channel\n\ttopicName := util.TopicName(channel)\n\n\t// Create The Channel Deployment EnvVars\n\tenvVars := []corev1.EnvVar{\n\t\t{\n\t\t\tName: env.MetricsPortEnvVarKey,\n\t\t\tValue: strconv.Itoa(r.environment.MetricsPort),\n\t\t},\n\t\t{\n\t\t\tName: env.HealthPortEnvVarKey,\n\t\t\tValue: strconv.Itoa(constants.HealthPort),\n\t\t},\n\t}\n\n\t// Get The Kafka Secret From The Kafka Admin Client\n\tkafkaSecret := r.adminClient.GetKafkaSecretName(topicName)\n\n\t// If The Kafka Secret Env Var Is Specified Then Append Relevant Env Vars\n\tif len(kafkaSecret) <= 0 {\n\n\t\t// Received Invalid Kafka Secret - Cannot Proceed\n\t\treturn nil, fmt.Errorf(\"invalid kafkaSecret for topic '%s'\", topicName)\n\n\t} else {\n\n\t\t// Append The Kafka Brokers As Env Var\n\t\tenvVars = append(envVars, corev1.EnvVar{\n\t\t\tName: env.KafkaBrokerEnvVarKey,\n\t\t\tValueFrom: &corev1.EnvVarSource{\n\t\t\t\tSecretKeyRef: &corev1.SecretKeySelector{\n\t\t\t\t\tLocalObjectReference: corev1.LocalObjectReference{Name: kafkaSecret},\n\t\t\t\t\tKey: constants.KafkaSecretDataKeyBrokers,\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\n\t\t// Append The Kafka Username As Env Var\n\t\tenvVars = append(envVars, corev1.EnvVar{\n\t\t\tName: env.KafkaUsernameEnvVarKey,\n\t\t\tValueFrom: &corev1.EnvVarSource{\n\t\t\t\tSecretKeyRef: &corev1.SecretKeySelector{\n\t\t\t\t\tLocalObjectReference: corev1.LocalObjectReference{Name: kafkaSecret},\n\t\t\t\t\tKey: constants.KafkaSecretDataKeyUsername,\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\n\t\t// Append The Kafka Password As Env Var\n\t\tenvVars = append(envVars, corev1.EnvVar{\n\t\t\tName: env.KafkaPasswordEnvVarKey,\n\t\t\tValueFrom: &corev1.EnvVarSource{\n\t\t\t\tSecretKeyRef: &corev1.SecretKeySelector{\n\t\t\t\t\tLocalObjectReference: corev1.LocalObjectReference{Name: kafkaSecret},\n\t\t\t\t\tKey: constants.KafkaSecretDataKeyPassword,\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t}\n\n\t// Return The Channel Deployment EnvVars Array\n\treturn envVars, nil\n}", "func overrideEnv(env []string, vars ...string) []string {\n\t// Setting the slice's capacity to length ensures that a new backing array\n\t// is allocated if len(vars) > 0.\n\treturn append(env[:len(env):len(env)], vars...)\n}", "func Set() error {\n\tvar err error\n\tmyEnv, err = godotenv.Read()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func Env(def, key string, fallbacks ...string) string {\n\tif val, found := LookupEnv(key, fallbacks...); found {\n\t\treturn val\n\t}\n\treturn def\n}", "func (d DependencyCacheLayer) OverrideEnv(name string, format string, args ...interface{}) error {\n\td.Logger.SubsequentLine(\"Writing %s\", name)\n\treturn d.CacheLayer.OverrideEnv(name, format, args...)\n}", "func (r *Reconciler) channelDeploymentEnvVars(secret *corev1.Secret) ([]corev1.EnvVar, error) {\n\n\t// Create The Channel Deployment EnvVars\n\tenvVars := []corev1.EnvVar{\n\t\t{\n\t\t\tName: system.NamespaceEnvKey,\n\t\t\tValue: commonconstants.KnativeEventingNamespace,\n\t\t},\n\t\t{\n\t\t\tName: commonenv.KnativeLoggingConfigMapNameEnvVarKey,\n\t\t\tValue: logging.ConfigMapName(),\n\t\t},\n\t\t{\n\t\t\tName: commonenv.ServiceNameEnvVarKey,\n\t\t\tValue: util.ChannelDnsSafeName(secret.Name),\n\t\t},\n\t\t{\n\t\t\tName: commonenv.MetricsPortEnvVarKey,\n\t\t\tValue: strconv.Itoa(r.environment.MetricsPort),\n\t\t},\n\t\t{\n\t\t\tName: commonenv.MetricsDomainEnvVarKey,\n\t\t\tValue: r.environment.MetricsDomain,\n\t\t},\n\t\t{\n\t\t\tName: commonenv.HealthPortEnvVarKey,\n\t\t\tValue: strconv.Itoa(constants.HealthPort),\n\t\t},\n\t}\n\n\t// Append The Kafka Brokers As Env Var\n\tenvVars = append(envVars, corev1.EnvVar{\n\t\tName: commonenv.KafkaBrokerEnvVarKey,\n\t\tValueFrom: &corev1.EnvVarSource{\n\t\t\tSecretKeyRef: &corev1.SecretKeySelector{\n\t\t\t\tLocalObjectReference: corev1.LocalObjectReference{Name: secret.Name},\n\t\t\t\tKey: constants.KafkaSecretDataKeyBrokers,\n\t\t\t},\n\t\t},\n\t})\n\n\t// Append The Kafka Username As Env Var\n\tenvVars = append(envVars, corev1.EnvVar{\n\t\tName: commonenv.KafkaUsernameEnvVarKey,\n\t\tValueFrom: &corev1.EnvVarSource{\n\t\t\tSecretKeyRef: &corev1.SecretKeySelector{\n\t\t\t\tLocalObjectReference: corev1.LocalObjectReference{Name: secret.Name},\n\t\t\t\tKey: constants.KafkaSecretDataKeyUsername,\n\t\t\t},\n\t\t},\n\t})\n\n\t// Append The Kafka Password As Env Var\n\tenvVars = append(envVars, corev1.EnvVar{\n\t\tName: commonenv.KafkaPasswordEnvVarKey,\n\t\tValueFrom: &corev1.EnvVarSource{\n\t\t\tSecretKeyRef: &corev1.SecretKeySelector{\n\t\t\t\tLocalObjectReference: corev1.LocalObjectReference{Name: secret.Name},\n\t\t\t\tKey: constants.KafkaSecretDataKeyPassword,\n\t\t\t},\n\t\t},\n\t})\n\n\t// Return The Channel Deployment EnvVars Array\n\treturn envVars, nil\n}", "func (s *EnvironmentService) Update(environment *Environment) (*Environment, error) {\n\tif environment == nil {\n\t\treturn nil, internal.CreateInvalidParameterError(constants.OperationUpdate, constants.ParameterEnvironment)\n\t}\n\n\tpath, err := services.GetUpdatePath(s, environment)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp, err := services.ApiUpdate(s.GetClient(), environment, new(Environment), path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resp.(*Environment), nil\n}", "func replaceEnvironmentVariables(str string, failMissing bool) string {\n\t// Variables must be in the format ${NAME}\n\t// Letters, numbers, and underscores are allowed\n\t// Variable name must start with a letter\n\t// Environment variable names will be converted to upper case to avoid ambiguity\n\tre := regexp.MustCompile(`\\$\\{[A-Za-z][][A-Za-z_0-9.]*\\}`)\n\tfor _, varName := range re.FindAllString(str, -1) {\n\t\tenvVarName := strings.TrimPrefix(varName, \"${\")\n\t\tenvVarName = strings.TrimSuffix(envVarName, \"}\")\n\t\tenvVar := os.Getenv(strings.ToUpper(envVarName))\n\t\tif len(envVar) == 0 {\n\t\t\tif failMissing {\n\t\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\t\"name\": envVarName,\n\t\t\t\t}).Fatal(\"Environment variable not defined\")\n\t\t\t} else {\n\t\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\t\"name\": envVarName,\n\t\t\t\t}).Warning(\"Environment variable not defined, skipping\")\n\t\t\t}\n\t\t} else {\n\t\t\tstr = strings.ReplaceAll(str, varName, envVar)\n\t\t}\n\t}\n\treturn str\n}", "func ReplaceEnv(variables []string, varName string, value string) []string {\n\tvar result []string\n\tfor _, e := range variables {\n\t\tpair := strings.Split(e, \"=\")\n\t\tif pair[0] != varName {\n\t\t\tresult = append(result, e)\n\t\t} else {\n\t\t\tresult = append(result, fmt.Sprintf(\"%s=%s\", varName, value))\n\t\t}\n\t}\n\n\treturn result\n}", "func NewCfnEnvironment_Override(c CfnEnvironment, scope awscdk.Construct, id *string, props *CfnEnvironmentProps) {\n\t_init_.Initialize()\n\n\t_jsii_.Create(\n\t\t\"monocdk.aws_appconfig.CfnEnvironment\",\n\t\t[]interface{}{scope, id, props},\n\t\tc,\n\t)\n}", "func setExperimentVar(chaosExperiment *v1alpha1.ChaosExperiment, testsDetails *types.TestDetails) {\n\n\t// contains all the envs\n\tenvDetails := ENVDetails{\n\t\tENV: map[string]string{},\n\t}\n\n\t// Modify the ExperimentImage\n\tchaosExperiment.Spec.Definition.Image = testsDetails.ExperimentImage\n\t// Modify experiment imagePullPolicy\n\tchaosExperiment.Spec.Definition.ImagePullPolicy = corev1.PullPolicy(testsDetails.ExperimentImagePullPolicy)\n\n\t// Get lib image\n\tvar libImage string\n\tfor _, value := range chaosExperiment.Spec.Definition.ENVList {\n\t\tif value.Name == \"LIB_IMAGE\" {\n\t\t\tlibImage = value.Value\n\t\t}\n\t}\n\n\t// Modify LIB Image\n\tif testsDetails.LibImage == \"\" && strings.Contains(libImage, \"go-runner\") {\n\t\ttestsDetails.LibImage = testsDetails.ExperimentImage\n\t} else {\n\t\ttestsDetails.LibImage = libImage\n\t}\n\n\t// Modify Args\n\tif testsDetails.Args != \"\" {\n\t\tchaosExperiment.Spec.Definition.Args = strings.Split(testsDetails.Args, \",\")\n\t}\n\n\t// Modify Image Command\n\tif testsDetails.Command != \"\" {\n\t\tchaosExperiment.Spec.Definition.Command = strings.Split(testsDetails.Command, \",\")\n\t}\n\n\t// Modify ENV's\n\tenvDetails.SetEnv(\"SEQUENCE\", testsDetails.Sequence).\n\t\tSetEnv(\"PODS_AFFECTED_PERC\", testsDetails.PodsAffectedPercentage).\n\t\tSetEnv(\"TARGET_PODS\", testsDetails.TargetPod).\n\t\tSetEnv(\"LIB\", testsDetails.Lib).\n\t\tSetEnv(\"LIB_IMAGE\", testsDetails.LibImage)\n\n\tlog.Info(\"[LIB Image]: Lib image is \" + testsDetails.LibImage + \" !!!\")\n\n\t// update all the values corresponding to keys from the ENV's in Experiment\n\tfor key, value := range chaosExperiment.Spec.Definition.ENVList {\n\t\t_, ok := envDetails.ENV[value.Name]\n\t\tif ok {\n\t\t\tchaosExperiment.Spec.Definition.ENVList[key].Value = envDetails.ENV[value.Name]\n\t\t}\n\t}\n}", "func ReplaceByEnv(envkey string, arg *string) {\n\tif envVar := os.Getenv(envkey); envVar != \"\" {\n\t\t*arg = envVar\n\t\t// logrus.Infof(\"update value with ENV.%s to %s\", envkey, envVar)\n\t}\n}", "func (a *EnvironmentSecretApiService) CreateEnvironmentSecretOverride(ctx _context.Context, environmentId string, secretId string) ApiCreateEnvironmentSecretOverrideRequest {\n\treturn ApiCreateEnvironmentSecretOverrideRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t\tenvironmentId: environmentId,\n\t\tsecretId: secretId,\n\t}\n}", "func (i *Installation) extractEnvValues() {\n\tInfof(\"Extracting the environment information from the file: %s\", i.EnvFile)\n\tenvs := environment(i.EnvFile)\n\ti.GPInitSystem.MasterPort = envs.PgPort\n\ti.GPInitSystem.MasterDir = envs.MasterDir\n\ti.GPCC.InstanceName = envs.GpccInstanceName\n\ti.GPCC.InstancePort = envs.GpccPort\n\ti.GPCC.GpPerfmonHome = envs.GpPerfmonHome\n}", "func (c *Client) EnvironmentExtend(envID string) error {\n\treturn c.envPutActionByID(\"extend\", envID)\n}", "func setEnvironmentVariables(t *testing.T, vars map[string]string) {\n\tfor k, v := range vars {\n\t\tt.Setenv(k, v)\n\t}\n}", "func (cfg *Config) UpdateFromEnvs() error {\n\tcc := *cfg\n\n\ttp1, vv1 := reflect.TypeOf(&cc).Elem(), reflect.ValueOf(&cc).Elem()\n\tfor i := 0; i < tp1.NumField(); i++ {\n\t\tjv := tp1.Field(i).Tag.Get(\"json\")\n\t\tif jv == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tjv = strings.Replace(jv, \",omitempty\", \"\", -1)\n\t\tjv = strings.Replace(jv, \"-\", \"_\", -1)\n\t\tjv = strings.ToUpper(strings.Replace(jv, \"-\", \"_\", -1))\n\t\tenv := envPfx + jv\n\t\tif os.Getenv(env) == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tsv := os.Getenv(env)\n\n\t\tfieldName := tp1.Field(i).Name\n\n\t\tswitch vv1.Field(i).Type().Kind() {\n\t\tcase reflect.String:\n\t\t\tvv1.Field(i).SetString(sv)\n\n\t\tcase reflect.Bool:\n\t\t\tbb, err := strconv.ParseBool(sv)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"failed to parse %q (%q, %v)\", sv, env, err)\n\t\t\t}\n\t\t\tvv1.Field(i).SetBool(bb)\n\n\t\tcase reflect.Int, reflect.Int32, reflect.Int64:\n\t\t\tif fieldName == \"WaitBeforeDown\" {\n\t\t\t\tdv, err := time.ParseDuration(sv)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"failed to parse %q (%q, %v)\", sv, env, err)\n\t\t\t\t}\n\t\t\t\tvv1.Field(i).SetInt(int64(dv))\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tiv, err := strconv.ParseInt(sv, 10, 64)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"failed to parse %q (%q, %v)\", sv, env, err)\n\t\t\t}\n\t\t\tvv1.Field(i).SetInt(iv)\n\n\t\tcase reflect.Uint, reflect.Uint32, reflect.Uint64:\n\t\t\tiv, err := strconv.ParseUint(sv, 10, 64)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"failed to parse %q (%q, %v)\", sv, env, err)\n\t\t\t}\n\t\t\tvv1.Field(i).SetUint(iv)\n\n\t\tcase reflect.Float32, reflect.Float64:\n\t\t\tfv, err := strconv.ParseFloat(sv, 64)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"failed to parse %q (%q, %v)\", sv, env, err)\n\t\t\t}\n\t\t\tvv1.Field(i).SetFloat(fv)\n\n\t\tcase reflect.Slice:\n\t\t\tss := strings.Split(sv, \",\")\n\t\t\tslice := reflect.MakeSlice(reflect.TypeOf([]string{}), len(ss), len(ss))\n\t\t\tfor i := range ss {\n\t\t\t\tslice.Index(i).SetString(ss[i])\n\t\t\t}\n\t\t\tvv1.Field(i).Set(slice)\n\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"%q (%v) is not supported as an env\", env, vv1.Field(i).Type())\n\t\t}\n\t}\n\t*cfg = cc\n\n\tav := *cc.ALBIngressController\n\ttp2, vv2 := reflect.TypeOf(&av).Elem(), reflect.ValueOf(&av).Elem()\n\tfor i := 0; i < tp2.NumField(); i++ {\n\t\tjv := tp2.Field(i).Tag.Get(\"json\")\n\t\tif jv == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tjv = strings.Replace(jv, \",omitempty\", \"\", -1)\n\t\tjv = strings.ToUpper(strings.Replace(jv, \"-\", \"_\", -1))\n\t\tenv := envPfxALB + jv\n\t\tif os.Getenv(env) == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tsv := os.Getenv(env)\n\n\t\tswitch vv2.Field(i).Type().Kind() {\n\t\tcase reflect.String:\n\t\t\tvv2.Field(i).SetString(sv)\n\n\t\tcase reflect.Bool:\n\t\t\tbb, err := strconv.ParseBool(sv)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"failed to parse %q (%q, %v)\", sv, env, err)\n\t\t\t}\n\t\t\tvv2.Field(i).SetBool(bb)\n\n\t\tcase reflect.Int, reflect.Int32, reflect.Int64:\n\t\t\tiv, err := strconv.ParseInt(sv, 10, 64)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"failed to parse %q (%q, %v)\", sv, env, err)\n\t\t\t}\n\t\t\tvv2.Field(i).SetInt(iv)\n\n\t\tcase reflect.Uint, reflect.Uint32, reflect.Uint64:\n\t\t\tiv, err := strconv.ParseUint(sv, 10, 64)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"failed to parse %q (%q, %v)\", sv, env, err)\n\t\t\t}\n\t\t\tvv2.Field(i).SetUint(iv)\n\n\t\tcase reflect.Float32, reflect.Float64:\n\t\t\tfv, err := strconv.ParseFloat(sv, 64)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"failed to parse %q (%q, %v)\", sv, env, err)\n\t\t\t}\n\t\t\tvv2.Field(i).SetFloat(fv)\n\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"%q (%v) is not supported as an env\", env, vv2.Field(i).Type())\n\t\t}\n\t}\n\tcfg.ALBIngressController = &av\n\n\treturn nil\n}", "func (s *EnvironmentsServiceOp) Get(env string) (*Environment, *Response, error) {\n\tpath := path.Join(environmentsPath, env)\n\treq, e := s.client.NewRequest(\"GET\", path, nil)\n\tif e != nil {\n\t\treturn nil, nil, e\n\t}\n\treturnedEnv := Environment{}\n\tresp, e := s.client.Do(req, &returnedEnv)\n\tif e != nil {\n\t\treturn nil, resp, e\n\t}\n\treturn &returnedEnv, resp, e\n}", "func Env(key, defaultValue string) string {\n\tvalue, defined := os.LookupEnv(key)\n\tif !defined {\n\t\treturn defaultValue\n\t}\n\n\treturn value\n}", "func Getenv(key string, fallbacks ...string) (value string) {\n\tvalue, _ = LookupEnv(key, fallbacks...)\n\treturn\n}", "func (o *InvestmentsHistoricalUpdateWebhook) SetEnvironment(v WebhookEnvironmentValues) {\n\to.Environment = v\n}", "func setupEnvironment(ctx context.Context) {\n\tEnv = os.Getenv(\"ENV\")\n\tif Env == \"\" {\n\t\tfmt.Printf(\"ENV not configured\\n\")\n\t}\n\tAppConfigPath = os.Getenv(\"APP_CONFIG_PATH\")\n\tif AppConfigPath == \"\" {\n\t\tfmt.Printf(\"APP_CONFIG_PATH not configured'n\")\n\t}\n\n\tparams, err := getParamsFromSSM(ctx)\n\tif err != nil {\n\t\tfmt.Printf(\"Failed to get from SSM: %v\\n\", err)\n\t}\n\n\tClients = params.Clients\n\tfor i, v := range Clients {\n\t\tClients[i] = strings.ToUpper(v)\n\t}\n\tfmt.Printf(\"Loaded %d devices\\n\", len(Clients))\n\n\tOpenhabURL = params.OpenhabURL\n\tauthString := fmt.Sprintf(\"%s:%s\", params.OpenhabUser, params.OpenhabPwd)\n\tOpenhabAuth = base64.StdEncoding.EncodeToString([]byte(authString))\n\n\tSecret = params.Secret\n}", "func (c *FakeNotebooksEnvironments) Update(ctx context.Context, notebooksEnvironment *v1alpha1.NotebooksEnvironment, opts v1.UpdateOptions) (result *v1alpha1.NotebooksEnvironment, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewUpdateAction(notebooksenvironmentsResource, c.ns, notebooksEnvironment), &v1alpha1.NotebooksEnvironment{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\treturn obj.(*v1alpha1.NotebooksEnvironment), err\n}", "func PostEnvironmentPing(c echo.Context, environmentService environment.Service) error {\n\tenvName := c.Param(\"envName\")\n\terr := validateEnvironmentName(envName)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn environmentService.Ping(envName)\n}", "func (client *Client) PatchApplicationUserProvidedEnvironmentVariables(appGUID string, envVars EnvironmentVariables) (EnvironmentVariables, Warnings, error) {\n\tbodyBytes, err := json.Marshal(envVars)\n\tif err != nil {\n\t\treturn EnvironmentVariables{}, nil, err\n\t}\n\n\trequest, err := client.newHTTPRequest(requestOptions{\n\t\tURIParams: internal.Params{\"app_guid\": appGUID},\n\t\tRequestName: internal.PatchApplicationUserProvidedEnvironmentVariablesRequest,\n\t\tBody: bytes.NewReader(bodyBytes),\n\t})\n\tif err != nil {\n\t\treturn EnvironmentVariables{}, nil, err\n\t}\n\n\tvar responseEnvVars EnvironmentVariables\n\tresponse := cloudcontroller.Response{\n\t\tResult: &responseEnvVars,\n\t}\n\terr = client.connection.Make(request, &response)\n\treturn responseEnvVars, response.Warnings, err\n}", "func EnvVarTest(resourceName string, sourceType string, envString string) {\n\n\tif sourceType == \"git\" {\n\t\t// checking the values of the env vars pairs in bc\n\t\tenvVars := runCmd(\"oc get bc \" + resourceName + \" -o go-template='{{range .spec.strategy.sourceStrategy.env}}{{.name}}{{.value}}{{end}}'\")\n\t\tExpect(envVars).To(Equal(envString))\n\t}\n\n\t// checking the values of the env vars pairs in dc\n\tenvVars := runCmd(\"oc get dc \" + resourceName + \" -o go-template='{{range .spec.template.spec.containers}}{{range .env}}{{.name}}{{.value}}{{end}}{{end}}'\")\n\tExpect(envVars).To(Equal(envString))\n}", "func toEnv(spec *engine.Spec, step *engine.Step) []v1.EnvVar {\n\tvar to []v1.EnvVar\n\tfor k, v := range step.Envs {\n\t\tto = append(to, v1.EnvVar{\n\t\t\tName: k,\n\t\t\tValue: v,\n\t\t})\n\t}\n\tto = append(to, v1.EnvVar{\n\t\tName: \"KUBERNETES_NODE\",\n\t\tValueFrom: &v1.EnvVarSource{\n\t\t\tFieldRef: &v1.ObjectFieldSelector{\n\t\t\t\tFieldPath: \"spec.nodeName\",\n\t\t\t},\n\t\t},\n\t})\n\tfor _, secret := range step.Secrets {\n\t\tsec, ok := engine.LookupSecret(spec, secret)\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\t\toptional := true\n\t\tto = append(to, v1.EnvVar{\n\t\t\tName: secret.Env,\n\t\t\tValueFrom: &v1.EnvVarSource{\n\t\t\t\tSecretKeyRef: &v1.SecretKeySelector{\n\t\t\t\t\tLocalObjectReference: v1.LocalObjectReference{\n\t\t\t\t\t\tName: sec.Metadata.UID,\n\t\t\t\t\t},\n\t\t\t\t\tKey: sec.Metadata.UID,\n\t\t\t\t\tOptional: &optional,\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t}\n\treturn to\n}", "func (c *Configuration) LoadEnvironmentVars(prefix string) error {\n\tif prefix != \"\" {\n\t\tprefix += \"_\"\n\t}\n\n\treturn c.config.Load(env.Provider(prefix, \".\", func(s string) string {\n\t\tmapKey := strings.Replace(strings.ToLower(strings.TrimPrefix(s, prefix)), \"_\", \".\", -1)\n\t\tif !c.config.Exists(mapKey) {\n\t\t\t// only accept values from env vars that already exist in the config\n\t\t\treturn \"\"\n\t\t}\n\t\treturn mapKey\n\t}), nil)\n}", "func InitializationVariableEnvironment() {\n\tlog.Println(\"Start reading variable environment\")\n\n\tProject.Mode = os.Getenv(\"MODE\")\n\tProject.JWT.Key = os.Getenv(\"JWT_KEY\")\n\tProject.Crypto.Key = os.Getenv(\"CRYPTO_KEY\") // TODO length must be 32\n\tProject.Crypto.IV = os.Getenv(\"CRYPTO_IV\") // TODO length must be 16\n\n\tswitch Project.Mode {\n\tcase \"development\":\n\t\tMongo.adaptor = \"mongodb\"\n\tcase \"staging\", \"production\":\n\t\tMongo.adaptor = \"mongodb+srv\"\n\tdefault:\n\t\tlog.Fatalln(\"Invalid Mode\")\n\t}\n\n\t// MONGODB section\n\tMongo.User = os.Getenv(\"MONGO_USER\")\n\tMongo.Password = os.Getenv(\"MONGO_PASSWORD\")\n\tMongo.Host = os.Getenv(\"MONGO_HOST\")\n\tMongo.AuthDB = os.Getenv(\"MONGO_AUTH_DATABASE\")\n\tMongo.DB = os.Getenv(\"MONGO_DATABASE\")\n\tMongo.URI = fmt.Sprintf(\n\t\t\"%s://%s:%s@%s/%s?retryWrites=true&w=majority\",\n\t\tMongo.adaptor,\n\t\tMongo.User,\n\t\tMongo.Password,\n\t\tMongo.Host,\n\t\tMongo.AuthDB,\n\t)\n\n\tlog.Println(\"Finish reading variable environment\")\n}", "func (a *Client) UpdateInfraEnv(ctx context.Context, params *UpdateInfraEnvParams) (*UpdateInfraEnvCreated, error) {\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"UpdateInfraEnv\",\n\t\tMethod: \"PATCH\",\n\t\tPathPattern: \"/v2/infra-envs/{infra_env_id}\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &UpdateInfraEnvReader{formats: a.formats},\n\t\tAuthInfo: a.authInfo,\n\t\tContext: ctx,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*UpdateInfraEnvCreated), nil\n\n}", "func (o *V0037JobProperties) SetEnvironment(v map[string]interface{}) {\n\to.Environment = v\n}" ]
[ "0.5812909", "0.5790658", "0.576041", "0.5611268", "0.5522273", "0.5518604", "0.549987", "0.5493906", "0.54892653", "0.54662716", "0.54650384", "0.54612553", "0.5447039", "0.54098463", "0.5391029", "0.5381192", "0.53804636", "0.537156", "0.53701836", "0.5362287", "0.5356194", "0.53474337", "0.5328302", "0.5298326", "0.52761", "0.5271033", "0.52520996", "0.5235082", "0.52315825", "0.5209015", "0.51870024", "0.51840496", "0.5182596", "0.51725286", "0.5163287", "0.515717", "0.5154116", "0.5144584", "0.51405656", "0.5130485", "0.51166016", "0.5115603", "0.51142794", "0.5105008", "0.51019", "0.5096556", "0.508902", "0.50817394", "0.5073465", "0.50673896", "0.50483644", "0.50388426", "0.5037441", "0.50336784", "0.50318485", "0.50238055", "0.50101286", "0.5007267", "0.5002171", "0.4985495", "0.49813607", "0.4970862", "0.49674308", "0.49638316", "0.49619687", "0.49581265", "0.495242", "0.49484286", "0.49461496", "0.49428946", "0.49399528", "0.49374565", "0.492091", "0.49191022", "0.49156302", "0.4914818", "0.49147686", "0.49116343", "0.4910567", "0.4910356", "0.49091652", "0.49043158", "0.4897789", "0.48954275", "0.4894427", "0.48918626", "0.4874255", "0.48665708", "0.48660567", "0.48655015", "0.48638195", "0.48630038", "0.4852215", "0.4850291", "0.48494515", "0.4848967", "0.4846299", "0.4846218", "0.48412275", "0.4832985" ]
0.76379824
0
Scale changes the number of instances (replicas) for the application's Deployment.
func (a *Workload) Scale(ctx context.Context, instances int32) error { return retry.RetryOnConflict(retry.DefaultRetry, func() error { // Retrieve the latest version of Deployment before attempting update // RetryOnConflict uses exponential backoff to avoid exhausting the apiserver deployment, err := a.Deployment(ctx) if err != nil { return err } deployment.Spec.Replicas = &instances _, err = a.cluster.Kubectl.AppsV1().Deployments(a.app.Org).Update( ctx, deployment, metav1.UpdateOptions{}) return err }) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Scale(namespace string, app string, n *int32) error {\n\tc, err := k8s.NewInClusterClient()\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to get k8s client\")\n\t}\n\n\td, err := c.ExtensionsV1Beta1().GetDeployment(ctx, app, namespace)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to get deployment\")\n\t}\n\n\td.Spec.Replicas = n\n\n\t_, err = c.ExtensionsV1Beta1().UpdateDeployment(ctx, d)\n\treturn errors.Wrap(err, \"failed to scale deployment\")\n}", "func (c *KubeTestPlatform) Scale(name string, replicas int32) error {\n\tapp := c.AppResources.FindActiveResource(name)\n\tappManager := app.(*kube.AppManager)\n\n\tif err := appManager.ScaleDeploymentReplica(replicas); err != nil {\n\t\treturn err\n\t}\n\n\t_, err := appManager.WaitUntilDeploymentState(appManager.IsDeploymentDone)\n\n\treturn err\n}", "func (dc *DeploymentController) scale(ctx context.Context, deployment *apps.Deployment, newRS *apps.ReplicaSet, oldRSs []*apps.ReplicaSet) error {\n\t// If there is only one active replica set then we should scale that up to the full count of the\n\t// deployment. If there is no active replica set, then we should scale up the newest replica set.\n\tif activeOrLatest := deploymentutil.FindActiveOrLatest(newRS, oldRSs); activeOrLatest != nil {\n\t\tif *(activeOrLatest.Spec.Replicas) == *(deployment.Spec.Replicas) {\n\t\t\treturn nil\n\t\t}\n\t\t_, _, err := dc.scaleReplicaSetAndRecordEvent(ctx, activeOrLatest, *(deployment.Spec.Replicas), deployment)\n\t\treturn err\n\t}\n\n\t// If the new replica set is saturated, old replica sets should be fully scaled down.\n\t// This case handles replica set adoption during a saturated new replica set.\n\tif deploymentutil.IsSaturated(deployment, newRS) {\n\t\tfor _, old := range controller.FilterActiveReplicaSets(oldRSs) {\n\t\t\tif _, _, err := dc.scaleReplicaSetAndRecordEvent(ctx, old, 0, deployment); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}\n\n\t// There are old replica sets with pods and the new replica set is not saturated.\n\t// We need to proportionally scale all replica sets (new and old) in case of a\n\t// rolling deployment.\n\tif deploymentutil.IsRollingUpdate(deployment) {\n\t\tallRSs := controller.FilterActiveReplicaSets(append(oldRSs, newRS))\n\t\tallRSsReplicas := deploymentutil.GetReplicaCountForReplicaSets(allRSs)\n\n\t\tallowedSize := int32(0)\n\t\tif *(deployment.Spec.Replicas) > 0 {\n\t\t\tallowedSize = *(deployment.Spec.Replicas) + deploymentutil.MaxSurge(*deployment)\n\t\t}\n\n\t\t// Number of additional replicas that can be either added or removed from the total\n\t\t// replicas count. These replicas should be distributed proportionally to the active\n\t\t// replica sets.\n\t\tdeploymentReplicasToAdd := allowedSize - allRSsReplicas\n\n\t\t// The additional replicas should be distributed proportionally amongst the active\n\t\t// replica sets from the larger to the smaller in size replica set. Scaling direction\n\t\t// drives what happens in case we are trying to scale replica sets of the same size.\n\t\t// In such a case when scaling up, we should scale up newer replica sets first, and\n\t\t// when scaling down, we should scale down older replica sets first.\n\t\tvar scalingOperation string\n\t\tswitch {\n\t\tcase deploymentReplicasToAdd > 0:\n\t\t\tsort.Sort(controller.ReplicaSetsBySizeNewer(allRSs))\n\t\t\tscalingOperation = \"up\"\n\n\t\tcase deploymentReplicasToAdd < 0:\n\t\t\tsort.Sort(controller.ReplicaSetsBySizeOlder(allRSs))\n\t\t\tscalingOperation = \"down\"\n\t\t}\n\n\t\t// Iterate over all active replica sets and estimate proportions for each of them.\n\t\t// The absolute value of deploymentReplicasAdded should never exceed the absolute\n\t\t// value of deploymentReplicasToAdd.\n\t\tdeploymentReplicasAdded := int32(0)\n\t\tnameToSize := make(map[string]int32)\n\t\tlogger := klog.FromContext(ctx)\n\t\tfor i := range allRSs {\n\t\t\trs := allRSs[i]\n\n\t\t\t// Estimate proportions if we have replicas to add, otherwise simply populate\n\t\t\t// nameToSize with the current sizes for each replica set.\n\t\t\tif deploymentReplicasToAdd != 0 {\n\t\t\t\tproportion := deploymentutil.GetProportion(logger, rs, *deployment, deploymentReplicasToAdd, deploymentReplicasAdded)\n\n\t\t\t\tnameToSize[rs.Name] = *(rs.Spec.Replicas) + proportion\n\t\t\t\tdeploymentReplicasAdded += proportion\n\t\t\t} else {\n\t\t\t\tnameToSize[rs.Name] = *(rs.Spec.Replicas)\n\t\t\t}\n\t\t}\n\n\t\t// Update all replica sets\n\t\tfor i := range allRSs {\n\t\t\trs := allRSs[i]\n\n\t\t\t// Add/remove any leftovers to the largest replica set.\n\t\t\tif i == 0 && deploymentReplicasToAdd != 0 {\n\t\t\t\tleftover := deploymentReplicasToAdd - deploymentReplicasAdded\n\t\t\t\tnameToSize[rs.Name] = nameToSize[rs.Name] + leftover\n\t\t\t\tif nameToSize[rs.Name] < 0 {\n\t\t\t\t\tnameToSize[rs.Name] = 0\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// TODO: Use transactions when we have them.\n\t\t\tif _, _, err := dc.scaleReplicaSet(ctx, rs, nameToSize[rs.Name], deployment, scalingOperation); err != nil {\n\t\t\t\t// Return as soon as we fail, the deployment is requeued\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func (c *CanaryDeployer) Scale(cd *flaggerv1.Canary, replicas int32) error {\n\ttargetName := cd.Spec.TargetRef.Name\n\tdep, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(targetName, metav1.GetOptions{})\n\tif err != nil {\n\t\tif errors.IsNotFound(err) {\n\t\t\treturn fmt.Errorf(\"deployment %s.%s not found\", targetName, cd.Namespace)\n\t\t}\n\t\treturn fmt.Errorf(\"deployment %s.%s query error %v\", targetName, cd.Namespace, err)\n\t}\n\n\tdepCopy := dep.DeepCopy()\n\tdepCopy.Spec.Replicas = int32p(replicas)\n\n\t_, err = c.kubeClient.AppsV1().Deployments(dep.Namespace).Update(depCopy)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"scaling %s.%s to %v failed: %v\", depCopy.GetName(), depCopy.Namespace, replicas, err)\n\t}\n\treturn nil\n}", "func (c mockK8sClient) Scale(resource *kubernetes.Workload, replicas int32) error {\n\tc.Counts.NumPods = replicas\n\treturn nil\n}", "func (k *kubectlContext) Scale(resource string, scale uint) error {\n\tout, err := k.do(\"scale\", fmt.Sprintf(\"--replicas=%d\", scale), resource)\n\tk.t.Log(string(out))\n\treturn err\n}", "func ScaleDeployment(ctx context.Context, c client.Client, key client.ObjectKey, replicas int32) error {\n\tdeployment := &appsv1.Deployment{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: key.Name,\n\t\t\tNamespace: key.Namespace,\n\t\t},\n\t}\n\n\treturn scaleResource(ctx, c, deployment, replicas)\n}", "func (s *Service) scale(replicas int) {\n\tlog.WithField(\"replicas\", replicas).Debug(\"Service scaling\")\n\ts.state = StateScaling\n\tif s.CurrentReplicas != replicas {\n\t\ts.auklet.scaleService(s.ServiceID, replicas)\n\t\ts.auklet.Lock()\n\t\ts.auklet.metrics[MetricServiceScaleEventsTotal].(prometheus.Counter).Inc()\n\t\tif replicas > s.CurrentReplicas {\n\t\t\ts.auklet.serviceMetrics[s.ServiceID][MetricScaleUpEventsCount].(prometheus.Counter).Inc()\n\t\t} else {\n\t\t\ts.auklet.serviceMetrics[s.ServiceID][MetricScaleDownEventsCount].(prometheus.Counter).Inc()\n\t\t}\n\t\ts.auklet.Unlock()\n\t}\n\n\t// after scaling return to stable state\n\ts.stable()\n}", "func KubeScale(count string, deployment string) string {\n\tvar outputstring string\n\tif count == \"\" || deployment == \"\" {\n\t\toutputstring = \"\"\n\t} else if strings.HasSuffix(deployment, \".yaml\") {\n\t\toutputstring = fmt.Sprintf(\"scale --replicas=%s -f %s\", count, deployment)\n\t} else {\n\t\toutputstring = fmt.Sprintf(\"scale --replicas=%s %s\", count, deployment)\n\t}\n\treturn KubeCommand(outputstring)\n}", "func (m *Manager) Scale(shardIDs []int, shardCount int) (err error) {\n\tsg, err := NewShardGroup(m, shardIDs, shardCount)\n\tif err != nil {\n\t\treturn\n\t}\n\n\terr = sg.Start()\n\treturn\n}", "func (tfs *tiflashScaler) Scale(tc *v1alpha1.TidbCluster, oldSet *apps.StatefulSet, newSet *apps.StatefulSet) error {\n\tscaling, _, _, _ := scaleOne(oldSet, newSet)\n\tif scaling > 0 {\n\t\treturn tfs.ScaleOut(tc, oldSet, newSet)\n\t} else if scaling < 0 {\n\t\treturn tfs.ScaleIn(tc, oldSet, newSet)\n\t}\n\t// we only sync auto scaler annotations when we are finishing syncing scaling\n\treturn tfs.SyncAutoScalerAnn(tc, oldSet)\n}", "func (s *Service) Scale(ctx context.Context, scale int, timeout int) error {\n\tif s.specificiesHostPort() {\n\t\tlogrus.Warnf(\"The \\\"%s\\\" service specifies a port on the host. If multiple containers for this service are created on a single host, the port will clash.\", s.Name())\n\t}\n\n\tcontainers, err := s.collectContainers(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(containers) > scale {\n\t\tfoundCount := 0\n\t\tfor _, c := range containers {\n\t\t\tfoundCount++\n\t\t\tif foundCount > scale {\n\t\t\t\ttimeout = s.stopTimeout(timeout)\n\t\t\t\tif err := c.Stop(ctx, timeout); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\t// FIXME(vdemeester) remove volume in scale by default ?\n\t\t\t\tif err := c.Remove(ctx, false); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(containers) < scale {\n\t\terr := s.ensureImageExists(ctx, false, false)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif _, err = s.constructContainers(ctx, scale); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn s.up(ctx, \"\", false, options.Up{})\n}", "func (s *K8sSvc) ScaleService(ctx context.Context, cluster string, service string, desiredCount int64) error {\n\trequuid := utils.GetReqIDFromContext(ctx)\n\n\t// get statefulset\n\tstatefulset, err := s.cliset.AppsV1beta2().StatefulSets(s.namespace).Get(service, metav1.GetOptions{})\n\tif err != nil {\n\t\tglog.Errorln(\"get statefulset error\", err, \"requuid\", requuid, \"service\", service, \"namespace\", s.namespace)\n\t\treturn err\n\t}\n\n\tglog.Infoln(\"get statefulset for service\", service, \"requuid\", requuid, statefulset.Status)\n\n\t// update statefulset Replicas\n\tstatefulset.Spec.Replicas = utils.Int32Ptr(int32(desiredCount))\n\t_, err = s.cliset.AppsV1beta2().StatefulSets(s.namespace).Update(statefulset)\n\tif err != nil {\n\t\tglog.Errorln(\"update statefulset error\", err, \"requuid\", requuid, \"service\", service, \"namespace\", s.namespace)\n\t\treturn err\n\t}\n\n\tglog.Infoln(\"ScaleService complete\", service, \"desiredCount\", desiredCount, \"requuid\", requuid)\n\treturn nil\n}", "func (w *Worker) scaleDeployment(dep types.Deployment, replicas int64) {\n\tlockRes := strconv.FormatInt(dep.ID, 10)\n\terr := w.distLock.Lock(lockRes)\n\tif err != nil {\n\t\tw.log.Error(\"Failed to acquire deployment lock\", err)\n\t\treturn\n\t}\n\tscaledOk, stdout := w.kubectl.ScaleDeployment(dep.K8SName, replicas)\n\terr = w.distLock.Unlock(lockRes)\n\tif err != nil {\n\t\tw.log.Error(\"Failed to release deployment lock\", err)\n\t}\n\tdep.Replicas = replicas\n\terr = w.recordRevision(dep, stdout)\n\tif err != nil {\n\t\tw.log.Error(\"Failed to record revision\", err)\n\t}\n\tif scaledOk == true {\n\t\terr = w.databaseClient.SaveDeployment(&dep)\n\t\tif err != nil {\n\t\t\tw.log.Error(\"Failed to save deployment to db\", err)\n\t\t\treturn\n\t\t}\n\t}\n}", "func Scale(appName string, webCount int, workerCount int) error {\n\targs := []string{\"dokku\", \"ps:scale\", appName}\n\n\tif webCount > 0 {\n\t\twebPart := fmt.Sprintf(\"web=%v\", webCount)\n\t\targs = append(args, webPart)\n\t}\n\n\tif workerCount > 0 {\n\t\tworkerPart := fmt.Sprintf(\"worker=%v\", workerCount)\n\t\targs = append(args, workerPart)\n\t}\n\n\tlog.GeneralLogger.Println(args)\n\tcmd := common.NewShellCmd(strings.Join(args, \" \"))\n\tcmd.ShowOutput = false\n\tout, err := cmd.Output()\n\n\tif err != nil {\n\t\tlog.ErrorLogger.Println(\"Dokku ps:scale error:\", err.Error())\n\t\tlog.ErrorLogger.Println(\"Dokku ps:scale output:\", string(out))\n\t\treturn err\n\t}\n\tlog.GeneralLogger.Println(\"Dokku ps:scale output:\", string(out))\n\treturn nil\n}", "func (sc *ServiceController) Scale(role string, cardinal int) (bool, string) {\n\n\troleBody := make(map[string]interface{})\n\n\troleBody[\"cardinality\"] = 2\n\troleBody[\"force\"] = true\n\n\treturn sc.UpdateRole(role, roleBody)\n}", "func scaleResource(ctx context.Context, c client.Client, obj client.Object, replicas int32) error {\n\tpatch := []byte(fmt.Sprintf(`{\"spec\":{\"replicas\":%d}}`, replicas))\n\n\t// TODO: replace this with call to scale subresource once controller-runtime supports it\n\t// see: https://github.com/kubernetes-sigs/controller-runtime/issues/172\n\treturn c.Patch(ctx, obj, client.RawPatch(types.MergePatchType, patch))\n}", "func TestScaleDeployment(t *testing.T) {\n\tt.Log(\"Scale deployment\")\n\tclientset, err := k8sutils.MustGetClientset()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tctx := context.Background()\n\t// Create namespace if it doesn't exist\n\tnamespaceExists, err := k8sutils.NamespaceExists(ctx, clientset, namespace)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif !namespaceExists {\n\t\terr = k8sutils.MustCreateNamespace(ctx, clientset, namespace)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n\n\tdeployment, err := k8sutils.MustParseDeployment(noopDeploymentMap[*osType])\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdeploymentsClient := clientset.AppsV1().Deployments(namespace)\n\terr = k8sutils.MustScaleDeployment(ctx, deploymentsClient, deployment, clientset, namespace, podLabelSelector, *replicas, *skipWait)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n}", "func (mg *Groups) Scale(instances int, force bool) error {\n\n\tif mg.group != nil && len(mg.group.ID) > 0 {\n\t\tif appClient := application.New(mg.client); appClient != nil {\n\n\t\t\tcallbackFunc := func(appID string) error {\n\n\t\t\t\tif err := appClient.Get(appID).Scale(instances, force); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn mg.traverseGroupsWithAppID(mg.group, callbackFunc)\n\t\t}\n\t\treturn fmt.Errorf(\"unnable to connect\")\n\t}\n\treturn errors.New(\"group cannot be null nor empty\")\n}", "func (b *Base) Scale(w http.ResponseWriter, r *http.Request) {\n\tb.log.Printf(\"%s %s -> %s\", r.Method, r.URL.Path, r.RemoteAddr)\n\n\tsOptions, pOptions, kOptions, oOptions := render.SetDefaultScaleOptions()\n\n\tpv := render.PageVars{\n\t\tTitle: \"Practice Scales and Arpeggios\", // default scale initially displayed is A Major\n\t\tScalearp: \"Scale\",\n\t\tPitch: \"Major\",\n\t\tKey: \"A\",\n\t\tScaleImgPath: \"img/scale/major/a1.png\",\n\t\tGifPath: \"\",\n\t\tAudioPath: \"mp3/scale/major/a1.mp3\",\n\t\tAudioPath2: \"mp3/drone/a1.mp3\",\n\t\tLeftLabel: \"Listen to Major scale\",\n\t\tRightLabel: \"Listen to Drone\",\n\t\tScaleOptions: sOptions,\n\t\tPitchOptions: pOptions,\n\t\tKeyOptions: kOptions,\n\t\tOctaveOptions: oOptions,\n\t}\n\n\tif err := render.Render(w, \"scale.html\", pv); err != nil {\n\t\tb.log.Printf(\"%s %s -> %s : ERROR : %v\", r.Method, r.URL.Path, r.RemoteAddr, err)\n\t\treturn\n\t}\n}", "func (o ContainerServiceOutput) Scale() pulumi.IntOutput {\n\treturn o.ApplyT(func(v *ContainerService) pulumi.IntOutput { return v.Scale }).(pulumi.IntOutput)\n}", "func (s *ScalingService) ScaleUp() error {\n\tklog.Infoln(\"Scaling up the controller\")\n\treturn s.scaleTo(1)\n}", "func (c *Client) ScaleMarathonApp(appID string, instances int) {\n\tdata := MarathonAppInstances{instances}\n\treq, err := c.newRequest(\"PUT\", fmt.Sprintf(\"/service/marathon/v2/apps/%s\", appID), data)\n\tif err != nil {\n\t\tlog.Errorln(err)\n\t}\n\n\tbody, err := c.do(req)\n\tif err != nil {\n\t\tlog.Errorln(err)\n\t}\n\n\tvar resp MarathonScaleResult\n\terr = json.Unmarshal([]byte(body), &resp)\n\tif err != nil {\n\t\tlog.Errorln(err)\n\t} else {\n\t\tlog.Infof(\"Successfully scaled app %s: version %s, deploymentId %s\",\n\t\t\tappID, resp.Version, resp.DeploymentID)\n\t}\n\n\tlog.Infoln(resp)\n}", "func (c *nodePools) Scale(id string, req *types.NodePoolScaleRequest) (*types.NodePool, error) {\n\tpath := fmt.Sprintf(\"/v3/organizations/%s/clusters/%s/node-pools/%s\", c.organizationID, c.clusterID, id)\n\tvar out types.NodePool\n\treturn &out, c.client.Patch(path, req, &out)\n}", "func (c *ProcessClient) Scale(ctx context.Context, guid string, scale *resource.ProcessScale) (*resource.Process, error) {\n\tvar process resource.Process\n\t_, err := c.client.post(ctx, path.Format(\"/v3/processes/%s/actions/scale\", guid), scale, &process)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &process, nil\n}", "func (this *Decider) OnScale(command common.Command) error {\n\tappMetrical := AppMetrical{}\n\terr := json.Unmarshal([]byte(command.Body), &appMetrical)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tappInfo, strategyName := this.getAppInfo(appMetrical.App)\n\tif appInfo == nil {\n\t\tlog.Printf(\"can not get app info, may be the strategy is disabled.\")\n\t\treturn nil\n\t}\n\n\tscaleNumber, err := getScaleNumber(appMetrical.Metrical, appInfo.AppConf)\n\tif err != nil {\n\t\tlog.Printf(\"get scale number error [%s] : %s\", appMetrical.App, err)\n\t\treturn err\n\t}\n\n\tlog.Printf(\"get scale number: %d\", scaleNumber)\n\n\tif scaleNumber <= 0 {\n\t\t// Do nothing except set the new metrical\n\t\treturn nil\n\t} else if scaleNumber > 0 {\n\t\tappScales := this.getAppScales(strategyName, appInfo, scaleNumber)\n\t\tfmt.Println(\"===need scale=====\", appScales)\n\n\t\tmetricalAppScales := []common.MetricalAppScale{}\n\n\t\tfor _, app := range appScales {\n\t\t\taInfo, _ := this.getAppInfo(app.App)\n\t\t\tmetricalAppScales = append(metricalAppScales,\n\t\t\t\tcommon.MetricalAppScale{app.App, app.Number, aInfo.AppConf.MinNum})\n\t\t}\n\n\t\tmetricalAppScales = append(metricalAppScales,\n\t\t\tcommon.MetricalAppScale{appInfo.AppConf.App,\n\t\t\t\tscaleNumber, appInfo.AppConf.MinNum})\n\n\t\tmetricalAppScaleHosts, e := this.Client.MetricalScaleApps(metricalAppScales)\n\n\t\tif e != nil {\n\t\t\tlog.Printf(\"get metrical app scale hosts failed [%s]\", e)\n\t\t\treturn e\n\t\t}\n\t\tfmt.Println(\"get metrical app scale hosts\", metricalAppScaleHosts)\n\t\t// publish messages to apps\n\t\tpublishMessagesToApps(metricalAppScaleHosts, appInfo.AppConf.App)\n\t}\n\n\tdefer func() {\n\t\t// update current metrical\n\t\t(*appInfo).CurrentMetrical = appMetrical.Metrical\n\t}()\n\n\treturn nil\n}", "func (r *ScaleREST) Update(ctx kapi.Context, name string, objInfo rest.UpdatedObjectInfo) (runtime.Object, bool, error) {\n\tdeploymentConfig, err := r.registry.GetDeploymentConfig(ctx, name)\n\tif err != nil {\n\t\treturn nil, false, errors.NewNotFound(extensions.Resource(\"scale\"), name)\n\t}\n\n\told := api.ScaleFromConfig(deploymentConfig)\n\tobj, err := objInfo.UpdatedObject(ctx, old)\n\tif err != nil {\n\t\treturn nil, false, err\n\t}\n\n\tscale, ok := obj.(*extensions.Scale)\n\tif !ok {\n\t\treturn nil, false, errors.NewBadRequest(fmt.Sprintf(\"wrong object passed to Scale update: %v\", obj))\n\t}\n\n\tif errs := extvalidation.ValidateScale(scale); len(errs) > 0 {\n\t\treturn nil, false, errors.NewInvalid(extensions.Kind(\"Scale\"), scale.Name, errs)\n\t}\n\n\tdeploymentConfig.Spec.Replicas = scale.Spec.Replicas\n\tif err := r.registry.UpdateDeploymentConfig(ctx, deploymentConfig); err != nil {\n\t\treturn nil, false, err\n\t}\n\n\treturn scale, false, nil\n}", "func (s *Scheduler) ScaleProcess(app, process string, desired int) error {\n\tservices, err := s.stackBuilder.Services(app)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// If there's no matching ECS service for this process, return an error.\n\tif _, ok := services[process]; !ok {\n\t\treturn &ProcessNotFoundError{Process: process}\n\t}\n\n\t_, err = s.ecs.UpdateService(&ecs.UpdateServiceInput{\n\t\tCluster: aws.String(s.Cluster),\n\t\tDesiredCount: aws.Int64(int64(desired)),\n\t\tService: aws.String(services[process]),\n\t})\n\treturn err\n}", "func ScaleFromDeployment(deployment *Deployment) *Scale {\n\treturn &Scale{\n\t\tObjectMeta: api.ObjectMeta{\n\t\t\tName: deployment.Name,\n\t\t\tNamespace: deployment.Namespace,\n\t\t\tCreationTimestamp: deployment.CreationTimestamp,\n\t\t},\n\t\tSpec: ScaleSpec{\n\t\t\tReplicas: deployment.Spec.Replicas,\n\t\t},\n\t\tStatus: ScaleStatus{\n\t\t\tReplicas: deployment.Status.Replicas,\n\t\t\tSelector: deployment.Spec.Selector,\n\t\t},\n\t}\n}", "func (aip *AzureInfrastructureProvider) Scale(request entities.ScaleRequest) (entities.InfrastructureOperation, derrors.Error) {\n\treturn NewScalerOperation(aip.credentials, request, aip.config)\n}", "func ScaleStatefulSet(ctx context.Context, sts *appsv1.StatefulSet, amount int32, kubeClient kubernetes.Interface) error {\n\tupdatedSts := sts.DeepCopy()\n\tupdatedReplicas := *updatedSts.Spec.Replicas + amount\n\tif updatedReplicas < 0 {\n\t\treturn errors.New(\"error, can't scale statefulset below 0 replicas\")\n\t}\n\tupdatedSts.Spec.Replicas = &updatedReplicas\n\terr := PatchStatefulSet(ctx, sts, updatedSts, kubeClient)\n\treturn err\n}", "func (this *Decider) ScaleAction(command common.Command) error {\n\tmessage := common.InformScaleDownAppMessage{}\n\terr := json.Unmarshal([]byte(command.Body), &message)\n\tif err != nil {\n\t}\n\n\tthis.Client.MetricalScaleAppsAction(message)\n\treturn err\n}", "func ScaleStatefulSet(ctx context.Context, c client.Client, key client.ObjectKey, replicas int32) error {\n\tstatefulset := &appsv1.StatefulSet{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: key.Name,\n\t\t\tNamespace: key.Namespace,\n\t\t},\n\t}\n\n\treturn scaleResource(ctx, c, statefulset, replicas)\n}", "func ScaleStatefulSet(ctx context.Context, t ginkgo.GinkgoTInterface, options *k8s.KubectlOptions,\n\tcl kubernetes.Interface, namespace string, replicas int32) error {\n\tstatefulSet, err := cl.AppsV1().StatefulSets(namespace).Get(ctx, StatefulSetName, metav1.GetOptions{})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tstatefulSet.Spec.Replicas = &replicas\n\t_, err = cl.AppsV1().StatefulSets(namespace).Update(ctx, statefulSet, metav1.UpdateOptions{})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tWaitDemoApp(t, options, int(replicas))\n\treturn nil\n}", "func (o ContainerOutput) Scale() pulumi.IntOutput {\n\treturn o.ApplyT(func(v *Container) pulumi.IntOutput { return v.Scale }).(pulumi.IntOutput)\n}", "func (t autoscale) AutoScale() {\n\ttimer := time.NewTicker(t.rate)\n\n\tfor {\n\t\tselect {\n\t\tcase event := <-t.app.errorEvents:\n\t\t\t// error event occurred disable\n\t\t\tlog.Println(\"Error Event:\", event)\n\t\t\ttimer.Stop()\n\t\tcase <-timer.C:\n\t\t\tlog.Println(\"Autoscaling Stress\")\n\t\t\tt.app.Increase()\n\t\tcase <-t.resume:\n\t\t\ttimer = time.NewTicker(t.rate)\n\t\t}\n\t}\n}", "func (tr *trooper) setScale(scale float64) { tr.part.SetScale(scale, scale, scale) }", "func applyScaleMeta(meta *runapi.ObjectMeta, scaleType string, scaleValue int) {\n\tif scaleValue > 0 {\n\t\tmeta.Annotations[\"autoscaling.knative.dev\"+scaleType] = strconv.Itoa(scaleValue)\n\t}\n}", "func (mw *MagickWand) Scale(cols, rows uint) error {\n\treturn mw.ScaleImage(cols, rows)\n}", "func (schedule *Schedule) scaleStage(position int, numToScale int, program string, masterAddress string) {\r\n\tnumScaled := 0\r\n\tfmt.Println(numScaled, \"|\", numToScale)\r\n\tfor numScaled < numToScale {\r\n\t\tif position == -1 {\r\n\t\t\treturn\r\n\t\t}\r\n\t\t// For now, only scale on free nodes\r\n\t\tvar newWorker *types.Worker\r\n\t\tif schedule.freeNodeList.Length() >= 1 {\r\n\t\t\tnewWorker = schedule.AssignWorkerToFreeNode(position)\r\n\t\t} else {\r\n\t\t\tnewWorker = schedule.AssignWorkerToUnderutilizedNode(position)\r\n\t\t\tif newWorker == nil {\r\n\t\t\t\treturn\r\n\t\t\t}\r\n\t\t}\r\n\t\tschedule.startWorker(newWorker, program, masterAddress)\r\n\t\tfmt.Println(\"Waiting for worker to send info...\")\r\n\t\tif err := schedule.waitForWorkerToSendInfo(newWorker); err != nil {\r\n\t\t\tpanic(err)\r\n\t\t}\r\n\t\tfmt.Println(\"Done waiting for worker to send info...\")\r\n\t\tschedule.setUpNewWorkerCommunication(newWorker)\r\n\t\tnumScaled++\r\n\t}\r\n\tschedule.StageList.FindByPosition(position).Scaled = true\r\n}", "func UpdateMinScale(template *servingv1alpha1.RevisionTemplateSpec, min int) error {\n\treturn UpdateRevisionTemplateAnnotation(template, autoscaling.MinScaleAnnotationKey, strconv.Itoa(min))\n}", "func PodAutoscalerChaosInDeployment(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, appsUnderTest []experimentTypes.ApplicationUnderTest, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {\n\n\t// Scale Application\n\tretryErr := retries.RetryOnConflict(retries.DefaultRetry, func() error {\n\t\tfor _, app := range appsUnderTest {\n\t\t\t// Retrieve the latest version of Deployment before attempting update\n\t\t\t// RetryOnConflict uses exponential backoff to avoid exhausting the apiserver\n\t\t\tappUnderTest, err := appsv1DeploymentClient.Get(app.AppName, metav1.GetOptions{})\n\t\t\tif err != nil {\n\t\t\t\treturn errors.Errorf(\"Failed to get latest version of Application Deployment, err: %v\", err)\n\t\t\t}\n\t\t\t// modifying the replica count\n\t\t\tappUnderTest.Spec.Replicas = int32Ptr(int32(experimentsDetails.Replicas))\n\t\t\tlog.Infof(\"Updating deployment %s to number of replicas %d\", appUnderTest.ObjectMeta.Name, experimentsDetails.Replicas)\n\t\t\t_, err = appsv1DeploymentClient.Update(appUnderTest)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n\tif retryErr != nil {\n\t\treturn errors.Errorf(\"Unable to scale the deployment, err: %v\", retryErr)\n\t}\n\tlog.Info(\"Application Started Scaling\")\n\n\terr = DeploymentStatusCheck(experimentsDetails, clients, appsUnderTest, resultDetails, eventsDetails, chaosDetails)\n\tif err != nil {\n\t\treturn errors.Errorf(\"Status Check failed, err: %v\", err)\n\t}\n\n\treturn nil\n}", "func (c ClientAsyncImpl) ScaleAsync(channel chan<- error, resource *Workload, replicas int32) {\n\tchannel <- c.syncClient.Scale(resource, replicas)\n}", "func (i *TiFlashInstance) ScaleConfig(\n\te executor.Executor,\n\ttopo Topology,\n\tclusterName,\n\tclusterVersion,\n\tdeployUser string,\n\tpaths meta.DirPaths,\n) error {\n\ts := i.topo\n\tdefer func() {\n\t\ti.topo = s\n\t}()\n\ti.topo = mustBeClusterTopo(topo)\n\treturn i.InitConfig(e, clusterName, clusterVersion, deployUser, paths)\n}", "func TestServiceScale(t *testing.T) {\n\tt.Parallel()\n\n\ts := NewSystemTest(t, \"cases/service_scale\", nil)\n\ts.Terraform.Apply()\n\tdefer s.Terraform.Destroy()\n\n\tserviceID := s.Terraform.Output(\"service_id\")\n\n\ts.Layer0.ScaleService(serviceID, 3)\n\ttestutils.WaitFor(t, time.Second*10, time.Minute*5, func() bool {\n\t\tlog.Debugf(\"Waiting for service to scale up\")\n\t\tservice := s.Layer0.GetService(serviceID)\n\t\treturn service.RunningCount == 3\n\t})\n\n\ts.Layer0.ScaleService(serviceID, 1)\n\ttestutils.WaitFor(t, time.Second*10, time.Minute*5, func() bool {\n\t\tlog.Debugf(\"Waiting for service to scale down\")\n\t\tservice := s.Layer0.GetService(serviceID)\n\t\treturn service.RunningCount == 1\n\t})\n}", "func (c *ConfigMapScaler) ExecuteScale(context *contextinternal.Context,\n\tclusterStateRegistry *clusterstate.ClusterStateRegistry,\n\tsd *ScaleDown, nodes []*corev1.Node, options ScaleUpOptions,\n\tcandidates ScaleDownCandidates,\n\tnodeNameToNodeInfo map[string]*schedulernodeinfo.NodeInfo) error {\n\n\terr := ExecuteScaleUp(context, clusterStateRegistry, options, c.maxBulkScaleUpCount)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(options) > 0 {\n\t\tklog.Infof(\"Scaling up node groups now, skip scaling down progress\")\n\t\treturn nil\n\t}\n\n\terr = ExecuteScaleDown(context, sd, nodes, candidates, nodeNameToNodeInfo)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (i *TiFlashInstance) ScaleConfig(\n\tctx context.Context,\n\te ctxt.Executor,\n\ttopo Topology,\n\tclusterName,\n\tclusterVersion,\n\tdeployUser string,\n\tpaths meta.DirPaths,\n) error {\n\ts := i.topo\n\tdefer func() {\n\t\ti.topo = s\n\t}()\n\ti.topo = mustBeClusterTopo(topo)\n\treturn i.InitConfig(ctx, e, clusterName, clusterVersion, deployUser, paths)\n}", "func (this *Deployment) deploy() error {\n\tif len(this.Application.Processes) == 0 {\n\t\treturn fmt.Errorf(\"No processes scaled up, adjust with `ps:scale procType=#` before deploying\")\n\t}\n\n\ttitleLogger := NewFormatter(this.Logger, GREEN)\n\tdimLogger := NewFormatter(this.Logger, DIM)\n\n\te := Executor{dimLogger}\n\n\tthis.autoDetectRevision()\n\n\terr := writeDeployScripts()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tremoveDynos, allocatingNewDynos, err := this.calculateDynosToDestroy()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif allocatingNewDynos {\n\t\tavailableNodes, err := this.syncNodes()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Now we've successfully sync'd and we have a list of nodes available to deploy to.\n\t\taddDynos, err := this.startDynos(availableNodes, titleLogger)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfmt.Fprintf(titleLogger, \"Arbitrary sleeping for 30s to allow dynos to warm up before syncing load balancers\\n\")\n\t\ttime.Sleep(30 * time.Second)\n\n\t\terr = this.Server.SyncLoadBalancers(&e, addDynos, removeDynos)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif !this.ScalingOnly {\n\t\t// Update releases.\n\t\treleases, err := getReleases(this.Application.Name)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Prepend the release (releases are in descending order)\n\t\treleases = append([]Release{{\n\t\t\tVersion: this.Version,\n\t\t\tRevision: this.Revision,\n\t\t\tDate: time.Now(),\n\t\t\tConfig: this.Application.Environment,\n\t\t}}, releases...)\n\t\t// Only keep around the latest 15 (older ones are still in S3)\n\t\tif len(releases) > 15 {\n\t\t\treleases = releases[:15]\n\t\t}\n\t\terr = setReleases(this.Application.Name, releases)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\t// Trigger old dynos to shutdown.\n\t\tfor _, removeDyno := range removeDynos {\n\t\t\tfmt.Fprintf(titleLogger, \"Shutting down dyno: %v\\n\", removeDyno.Container)\n\t\t\tgo func(rd Dyno) {\n\t\t\t\trd.Shutdown(&Executor{os.Stdout})\n\t\t\t}(removeDyno)\n\t\t}\n\t}\n\n\treturn nil\n}", "func ScaleService(cluster, service string, desiredCount int) (*ecs.Service, error) {\n\tinput := &ecs.UpdateServiceInput{}\n\tinput.SetCluster(cluster)\n\tinput.SetService(service)\n\tinput.SetDesiredCount(int64(desiredCount))\n\tsvc := assertECS()\n\toutput, err := svc.UpdateService(input)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn output.Service, nil\n}", "func (s *ScalingService) ScaleDown() error {\n\tklog.Infoln(\"Scaling down the controller\")\n\treturn s.scaleTo(0)\n}", "func updateTacIfTiKVScale(tc *v1alpha1.TidbCluster, sldb *sldbv1.ServerlessDB, recommendedReplicas int32) error {\n\tsldb.Annotations[utils.AnnTiKVLastAutoScalingTimestamp] = fmt.Sprintf(\"%d\", time.Now().Unix())\n\ttc.Spec.TiKV.Replicas = recommendedReplicas\n\treturn nil\n}", "func (s *TaskSet) SetScale(v *Scale) *TaskSet {\n\ts.Scale = v\n\treturn s\n}", "func (ki *KernelInfo) Scale(scale float64, normalizeType KernelNormalizeType) {\n\tC.ScaleKernelInfo(ki.info, C.double(scale), C.GeometryFlags(normalizeType))\n\truntime.KeepAlive(ki)\n}", "func (a ClustersAPI) Resize(clusterID string, clusterSize models.ClusterSize) error {\n\tdata := struct {\n\t\tClusterID string `json:\"cluster_id,omitempty\" url:\"cluster_id,omitempty\"`\n\t\tmodels.ClusterSize\n\t}{\n\t\tclusterID,\n\t\tclusterSize,\n\t}\n\t_, err := a.Client.performQuery(http.MethodPost, \"/clusters/resize\", data, nil)\n\treturn err\n}", "func ScaleResources(resourceName corev1.ResourceName, quantity *resource.Quantity, factor float32) {\n\tswitch resourceName {\n\tcase corev1.ResourceCPU:\n\t\t// use millis\n\t\tquantity.SetScaled(int64(float32(quantity.MilliValue())*factor), resource.Milli)\n\tcase corev1.ResourceMemory:\n\t\t// use mega\n\t\tquantity.SetScaled(int64(float32(quantity.ScaledValue(resource.Mega))*factor), resource.Mega)\n\tdefault:\n\t\tquantity.Set(int64(float32(quantity.Value()) * factor))\n\t}\n}", "func (f *Framework) ScaleFleet(t *testing.T, log *logrus.Entry, flt *agonesv1.Fleet, replicas int32) {\n\tfleets := f.AgonesClient.AgonesV1().Fleets(f.Namespace)\n\tctx := context.Background()\n\n\trequire.Eventuallyf(t, func() bool {\n\t\tflt, err := fleets.Get(ctx, flt.ObjectMeta.Name, metav1.GetOptions{})\n\t\tif err != nil {\n\t\t\tlog.WithError(err).Info(\"Could not get Fleet\")\n\t\t\treturn false\n\t\t}\n\n\t\tfltCopy := flt.DeepCopy()\n\t\tfltCopy.Spec.Replicas = replicas\n\t\t_, err = fleets.Update(ctx, fltCopy, metav1.UpdateOptions{})\n\t\tif err != nil {\n\t\t\tlog.WithError(err).Info(\"Could not scale Fleet\")\n\t\t\treturn false\n\t\t}\n\n\t\treturn true\n\t}, 5*time.Minute, time.Second, \"Could not scale Fleet %s\", flt.ObjectMeta.Name)\n}", "func (c *delegatingScaleInterface) Update(kind string, scale *extensionsv1beta1.Scale) (result *extensionsv1beta1.Scale, err error) {\n\tswitch {\n\tcase kind == \"DeploymentConfig\":\n\t\treturn c.dcs.UpdateScale(scale.Name, scale)\n\t\t// TODO: This is borked because the interface for Update is broken. Kind is insufficient.\n\tdefault:\n\t\treturn c.scales.Update(kind, scale)\n\t}\n}", "func (c *delegatingScaleInterface) Update(kind string, scale *extensions.Scale) (result *extensions.Scale, err error) {\n\tswitch {\n\tcase kind == \"DeploymentConfig\":\n\t\treturn c.dcs.UpdateScale(scale)\n\tcase latest.OriginKind(kind, \"\"):\n\t\treturn nil, errors.NewBadRequest(fmt.Sprintf(\"Kind %s has no Scale subresource\", kind))\n\tdefault:\n\t\treturn c.scales.Update(kind, scale)\n\t}\n}", "func aeScale(interval time.Duration, n int) time.Duration {\n\t// Don't scale until we cross the threshold\n\tif n <= aeScaleThreshold {\n\t\treturn interval\n\t}\n\n\tmultiplier := math.Ceil(math.Log2(float64(n))-math.Log2(aeScaleThreshold)) + 1.0\n\treturn time.Duration(multiplier) * interval\n}", "func Autoscale(max int) Option {\n\treturn func(opts workerOpts) workerOpts {\n\t\tif max == 0 {\n\t\t\tmax = runtime.NumCPU()\n\t\t}\n\n\t\topts.autoscaleMax = max\n\t\treturn opts\n\t}\n}", "func numReplicas(deployment *appsv1.Deployment, minReplicas, maxReplicas *int) int32 {\n\tif *minReplicas == 0 && *maxReplicas == 0 {\n\t\t// first, check if an app _should_ be scaled to zero by setting min = max = 0\n\t\treturn 0\n\t} else if *minReplicas == *maxReplicas {\n\t\t// if min == max, the autoscaler is disabled - scale to the desired number of replicas in the application spec\n\t\treturn int32(*minReplicas)\n\t} else if deployment != nil && deployment.Spec.Replicas != nil {\n\t\t// if a deployment already exists, use that deployment's number of replicas,\n\t\t// unless the minimum allowed replica count is below that of the application spec.\n\t\treturn max(int32(*minReplicas), *deployment.Spec.Replicas)\n\t} else {\n\t\t// if this is a new deployment, fall back to the lowest number of replicas allowed in the application spec.\n\t\treturn int32(*minReplicas)\n\t}\n}", "func Scale(s Frac, m M) M {\n\tm = CopyMatrix(m)\n\n\tfor r := 1; r <= m.Rows(); r++ {\n\t\tm.MultiplyRow(r, s)\n\t}\n\n\treturn m\n}", "func (r *ScaleREST) Get(ctx kapi.Context, name string) (runtime.Object, error) {\n\tdeploymentConfig, err := r.registry.GetDeploymentConfig(ctx, name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn api.ScaleFromConfig(deploymentConfig), nil\n}", "func UpdateMaxScale(template *servingv1alpha1.RevisionTemplateSpec, max int) error {\n\treturn UpdateRevisionTemplateAnnotation(template, autoscaling.MaxScaleAnnotationKey, strconv.Itoa(max))\n}", "func WaitUntilDeploymentScaledToDesiredReplicas(ctx context.Context, client client.Client, namespace, name string, desiredReplicas int32) error {\n\treturn retry.UntilTimeout(ctx, 5*time.Second, 300*time.Second, func(ctx context.Context) (done bool, err error) {\n\t\tdeployment := &appsv1.Deployment{}\n\t\tif err := client.Get(ctx, kutil.Key(namespace, name), deployment); err != nil {\n\t\t\treturn retry.SevereError(err)\n\t\t}\n\n\t\tif deployment.Generation != deployment.Status.ObservedGeneration {\n\t\t\treturn retry.MinorError(fmt.Errorf(\"%q not observed at latest generation (%d/%d)\", name,\n\t\t\t\tdeployment.Status.ObservedGeneration, deployment.Generation))\n\t\t}\n\n\t\tif deployment.Spec.Replicas == nil || *deployment.Spec.Replicas != desiredReplicas {\n\t\t\treturn retry.SevereError(fmt.Errorf(\"waiting for deployment %q to scale failed. spec.replicas does not match the desired replicas\", name))\n\t\t}\n\n\t\tif deployment.Status.Replicas == desiredReplicas && deployment.Status.AvailableReplicas == desiredReplicas {\n\t\t\treturn retry.Ok()\n\t\t}\n\n\t\treturn retry.MinorError(fmt.Errorf(\"deployment %q currently has '%d' replicas. Desired: %d\", name, deployment.Status.AvailableReplicas, desiredReplicas))\n\t})\n}", "func (dev *pwm_context) Scale(value int) error {\n\tif dev.period == -1 {\n\t\tif err := dev.ReadPeriod(); err != nil {\n\t\t\treturn fmt.Errorf(\"pwm: error running Scale: %s\", err)\n\t\t}\n\t}\n\n\tduty := (float64(value) - dev.min) / dev.span\n\tfmt.Printf(\"pwm: Scaling pin[%d] from value: %d to duty: %f\\n\", dev.pin, value, duty)\n\treturn dev.WriteDuty(int(float64(dev.period) * duty))\n}", "func (s *UpdateTaskSetInput) SetScale(v *Scale) *UpdateTaskSetInput {\n\ts.Scale = v\n\treturn s\n}", "func (k *Instance) ScaleSharedAppsToZero() error {\n\tsharedDeps, sharedSS, err := k.GetPXSharedApps()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlogrus.Infof(\"found %d deployments and %d statefulsets to scale down\", len(sharedDeps), len(sharedSS))\n\n\tvar valZero int32\n\tfor _, d := range sharedDeps {\n\t\tdeploymentName := d.Name\n\t\tdeploymentNamespace := d.Namespace\n\t\tlogrus.Infof(\"scaling down deployment: [%s] %s\", deploymentNamespace, deploymentName)\n\n\t\tt := func() (interface{}, bool, error) {\n\t\t\tdCopy, err := k.kubeClient.AppsV1().Deployments(deploymentNamespace).Get(context.TODO(), deploymentName, metav1.GetOptions{})\n\t\t\tif err != nil {\n\t\t\t\tif errors.IsNotFound(err) {\n\t\t\t\t\treturn nil, false, nil // done as deployment is deleted\n\t\t\t\t}\n\n\t\t\t\treturn nil, true, err\n\t\t\t}\n\n\t\t\tif *dCopy.Spec.Replicas == 0 {\n\t\t\t\tlogrus.Infof(\"app [%s] %s is already scaled down to 0\", dCopy.Namespace, dCopy.Name)\n\t\t\t\treturn nil, false, nil\n\t\t\t}\n\n\t\t\tdCopy = dCopy.DeepCopy()\n\n\t\t\t// save current replica count in annotations so it can be used later on to restore the replicas\n\t\t\tdCopy.Annotations[replicaMemoryKey] = fmt.Sprintf(\"%d\", *dCopy.Spec.Replicas)\n\t\t\tdCopy.Spec.Replicas = &valZero\n\n\t\t\t_, err = k.kubeClient.AppsV1().Deployments(dCopy.Namespace).Update(context.TODO(), dCopy, metav1.UpdateOptions{})\n\t\t\tif err != nil {\n\t\t\t\treturn nil, true, err\n\t\t\t}\n\n\t\t\treturn nil, false, nil\n\t\t}\n\n\t\tif _, err := task.DoRetryWithTimeout(t, deploymentUpdateTimeout, defaultRetryInterval); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tfor _, s := range sharedSS {\n\t\tlogrus.Infof(\"scaling down statefulset: [%s] %s\", s.Namespace, s.Name)\n\n\t\tt := func() (interface{}, bool, error) {\n\t\t\tsCopy, err := k.kubeClient.AppsV1().StatefulSets(s.Namespace).Get(context.TODO(), s.Name, metav1.GetOptions{})\n\t\t\tif err != nil {\n\t\t\t\tif errors.IsNotFound(err) {\n\t\t\t\t\treturn nil, false, nil // done as statefulset is deleted\n\t\t\t\t}\n\n\t\t\t\treturn nil, true, err\n\t\t\t}\n\n\t\t\tsCopy = sCopy.DeepCopy()\n\t\t\t// save current replica count in annotations so it can be used later on to restore the replicas\n\t\t\tsCopy.Annotations[replicaMemoryKey] = fmt.Sprintf(\"%d\", *sCopy.Spec.Replicas)\n\t\t\tsCopy.Spec.Replicas = &valZero\n\n\t\t\t_, err = k.kubeClient.AppsV1().StatefulSets(sCopy.Namespace).Update(context.TODO(), sCopy, metav1.UpdateOptions{})\n\t\t\tif err != nil {\n\t\t\t\treturn nil, true, err\n\t\t\t}\n\n\t\t\treturn nil, false, nil\n\t\t}\n\n\t\tif _, err := task.DoRetryWithTimeout(t, statefulSetUpdateTimeout, defaultRetryInterval); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (p *P1D) Scale(factor float64) {\n\tp.bng.scaleW(factor)\n}", "func (s *CreateTaskSetInput) SetScale(v *Scale) *CreateTaskSetInput {\n\ts.Scale = v\n\treturn s\n}", "func (i *MonitorInstance) ScaleConfig(\n\tctx context.Context,\n\te ctxt.Executor,\n\ttopo Topology,\n\tclusterName string,\n\tclusterVersion string,\n\tdeployUser string,\n\tpaths meta.DirPaths,\n) error {\n\ts := i.topo\n\tdefer func() { i.topo = s }()\n\ti.topo = topo\n\treturn i.InitConfig(ctx, e, clusterName, clusterVersion, deployUser, paths)\n}", "func (pa *PodAutoscaler) ScaleBounds(asConfig *autoscalerconfig.Config) (int32, int32) {\n\tvar min int32\n\tif pa.Spec.Reachability != ReachabilityUnreachable {\n\t\tmin = asConfig.MinScale\n\t\tif paMin, ok := pa.annotationInt32(autoscaling.MinScaleAnnotation); ok {\n\t\t\tmin = paMin\n\t\t}\n\t}\n\n\tmax := asConfig.MaxScale\n\tif paMax, ok := pa.annotationInt32(autoscaling.MaxScaleAnnotation); ok {\n\t\tmax = paMax\n\t}\n\n\treturn min, max\n}", "func (o *ReplaceAppsV1NamespacedReplicaSetScaleCreated) SetPayload(payload *models.IoK8sAPIAutoscalingV1Scale) {\n\to.Payload = payload\n}", "func (ex *ExampleMNIST3D) scaleSprites() {\n\n\tfor i := 0; i < len(ex.sprites); i++ {\n\t\tex.sprites[i].SetScale(ex.spriteScale, ex.spriteScale, ex.spriteScale)\n\t}\n}", "func (resizer *DeploymentConfigResizer) Resize(namespace, name string, newSize uint, preconditions *kubectl.ResizePrecondition, retry, waitForReplicas *kubectl.RetryParams) error {\n\tif preconditions == nil {\n\t\tpreconditions = &kubectl.ResizePrecondition{-1, \"\"}\n\t}\n\tif retry == nil {\n\t\t// Make it try only once, immediately\n\t\tretry = &kubectl.RetryParams{Interval: time.Millisecond, Timeout: time.Millisecond}\n\t}\n\tcond := kubectl.ResizeCondition(resizer, preconditions, namespace, name, newSize)\n\tif err := wait.Poll(retry.Interval, retry.Timeout, cond); err != nil {\n\t\treturn err\n\t}\n\tif waitForReplicas != nil {\n\t\trc := &kapi.ReplicationController{ObjectMeta: kapi.ObjectMeta{Namespace: namespace, Name: rcName}}\n\t\treturn wait.Poll(waitForReplicas.Interval, waitForReplicas.Timeout,\n\t\t\tresizer.c.ControllerHasDesiredReplicas(rc))\n\t}\n\treturn nil\n}", "func (w *windowImpl) Scale(dr image.Rectangle, src screen.Texture, sr image.Rectangle, op draw.Op, opts *screen.DrawOptions) {\n\tpanic(\"not implemented\") // TODO: Implement\n}", "func (this *Transformable) Scale(factor Vector2f) {\n\tC.sfTransformable_scale(this.cptr, factor.toC())\n}", "func (as *AutoScaling) SetDesiredCapacity(rp SetDesiredCapacityRequestParams) (resp *SimpleResp, err error) {\n\tresp = &SimpleResp{}\n\tparams := makeParams(\"SetDesiredCapacity\")\n\tparams[\"AutoScalingGroupName\"] = rp.AutoScalingGroupName\n\tparams[\"DesiredCapacity\"] = strconv.FormatInt(rp.DesiredCapacity, 10)\n\tif rp.HonorCooldown {\n\t\tparams[\"HonorCooldown\"] = \"true\"\n\t}\n\terr = as.query(params, resp)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn resp, nil\n}", "func (canvas *Canvas) Scale(x, y float32) {\n\twriteCommand(canvas.contents, \"cm\", x, 0, 0, y, 0, 0)\n}", "func Scale(value float64) *SimpleElement { return newSEFloat(\"scale\", value) }", "func (e Engine) scaleNodePoolToCount(nodePool *godo.KubernetesNodePool, numNodes int) error {\n\t// create a request to scale node pool\n\t// both name and count are required fields\n\treq := godo.KubernetesNodePoolUpdateRequest{\n\t\tName: nodePool.Name,\n\t\tCount: numNodes,\n\t}\n\tlog.Infof(\"Requesting DigitalOcean to scale node pool %s to %s\", req.Name, req.Count)\n\n\t_, _, err := e.client.Kubernetes.UpdateNodePool(context.Background(), e.config.ClusterID, nodePool.ID, &req)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"error scaling DigitalOcean node pool\")\n\t}\n\n\treturn nil\n}", "func (t *Tree) Scale(s float32) {\n\tif t.Leaf != nil {\n\t\tfor i, x := range t.Leaf.OutputDelta {\n\t\t\tt.Leaf.OutputDelta[i] = x * s\n\t\t}\n\t} else {\n\t\tt.Branch.FalseBranch.Scale(s)\n\t\tt.Branch.TrueBranch.Scale(s)\n\t}\n}", "func AutoscalerRecoveryInDeployment(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, appsUnderTest []experimentTypes.ApplicationUnderTest) error {\n\n\t// Scale back to initial number of replicas\n\tretryErr := retries.RetryOnConflict(retries.DefaultRetry, func() error {\n\t\t// Retrieve the latest version of Deployment before attempting update\n\t\t// RetryOnConflict uses exponential backoff to avoid exhausting the apiserver\n\t\tfor _, app := range appsUnderTest {\n\t\t\tappUnderTest, err := appsv1DeploymentClient.Get(app.AppName, metav1.GetOptions{})\n\t\t\tif err != nil {\n\t\t\t\treturn errors.Errorf(\"Failed to find the latest version of Application Deployment with name %v, err: %v\", app.AppName, err)\n\t\t\t}\n\n\t\t\tappUnderTest.Spec.Replicas = int32Ptr(int32(app.ReplicaCount)) // modify replica count\n\t\t\t_, err = appsv1DeploymentClient.Update(appUnderTest)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n\tif retryErr != nil {\n\t\treturn errors.Errorf(\"Unable to rollback the deployment, err: %v\", retryErr)\n\t}\n\tlog.Info(\"[Info]: Application pod started rolling back\")\n\n\terr = retry.\n\t\tTimes(uint(experimentsDetails.Timeout / experimentsDetails.Delay)).\n\t\tWait(time.Duration(experimentsDetails.Delay) * time.Second).\n\t\tTry(func(attempt uint) error {\n\t\t\tfor _, app := range appsUnderTest {\n\t\t\t\tapplicationDeploy, err := appsv1DeploymentClient.Get(app.AppName, metav1.GetOptions{})\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn errors.Errorf(\"Unable to find the deployment with name %v, err: %v\", app.AppName, err)\n\t\t\t\t}\n\t\t\t\tif int(applicationDeploy.Status.AvailableReplicas) != app.ReplicaCount {\n\t\t\t\t\tlog.Infof(\"Application Available Replica Count is: %v\", applicationDeploy.Status.AvailableReplicas)\n\t\t\t\t\treturn errors.Errorf(\"Unable to rollback to older replica count, err: %v\", err)\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\tlog.Info(\"[RollBack]: Application Pod roll back to initial number of replicas\")\n\n\treturn nil\n}", "func (b *IconButton) Scale(scale float32) *IconButton {\n\tb.size = b.th.TextSize.Scale(scale * 0.72)\n\treturn b\n}", "func ScaleEtcd(ctx context.Context, c client.Client, key client.ObjectKey, replicas int) error {\n\tetcd := &druidv1alpha1.Etcd{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: key.Name,\n\t\t\tNamespace: key.Namespace,\n\t\t},\n\t}\n\n\treturn scaleResource(ctx, c, etcd, int32(replicas))\n}", "func Scale(w, h int) int {\n\ta := w / WIDTH\n\tb := h / HEIGHT\n\tif a < b {\n\t\treturn a\n\t}\n\treturn b\n}", "func (o *ReplaceAppsV1NamespacedReplicaSetScaleOK) SetPayload(payload *models.IoK8sAPIAutoscalingV1Scale) {\n\to.Payload = payload\n}", "func (b *IconButton) Scale(scale float32) *IconButton {\n\tb.size = b.Theme.TextSize.Scale(scale * 0.72)\n\treturn b\n}", "func (k *Instance) RestoreScaledAppsReplicas() error {\n\tsharedDeps, sharedSS, err := k.GetPXSharedApps()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlogrus.Infof(\"found %d deployments and %d statefulsets to restore\", len(sharedDeps), len(sharedSS))\n\n\tfor _, d := range sharedDeps {\n\t\tdeploymentName := d.Name\n\t\tdeploymentNamespace := d.Namespace\n\t\tlogrus.Infof(\"restoring app: [%s] %s\", d.Namespace, d.Name)\n\t\tt := func() (interface{}, bool, error) {\n\t\t\tdCopy, err := k.kubeClient.AppsV1().Deployments(deploymentNamespace).Get(context.TODO(), deploymentName, metav1.GetOptions{})\n\t\t\tif err != nil {\n\t\t\t\tif errors.IsNotFound(err) {\n\t\t\t\t\treturn nil, false, nil // done as deployment is deleted\n\t\t\t\t}\n\n\t\t\t\treturn nil, true, err\n\t\t\t}\n\n\t\t\tdCopy = dCopy.DeepCopy()\n\t\t\tif dCopy.Annotations == nil {\n\t\t\t\treturn nil, false, nil // done as this is not an app we touched\n\t\t\t}\n\n\t\t\tval, present := dCopy.Annotations[replicaMemoryKey]\n\t\t\tif !present || len(val) == 0 {\n\t\t\t\tlogrus.Infof(\"not restoring app: [%s] %s as no annotation found to track replica count\", deploymentNamespace, deploymentName)\n\t\t\t\treturn nil, false, nil // done as this is not an app we touched\n\t\t\t}\n\n\t\t\tparsedVal := intstr.Parse(val)\n\t\t\tif parsedVal.Type != intstr.Int {\n\t\t\t\treturn nil, false /*retry won't help */, fmt.Errorf(\"failed to parse saved replica count: %v\", val)\n\t\t\t}\n\n\t\t\tdelete(dCopy.Annotations, replicaMemoryKey)\n\t\t\tdCopy.Spec.Replicas = &parsedVal.IntVal\n\n\t\t\t_, err = k.kubeClient.AppsV1().Deployments(dCopy.Namespace).Update(context.TODO(), dCopy, metav1.UpdateOptions{})\n\t\t\tif err != nil {\n\t\t\t\treturn nil, true, err\n\t\t\t}\n\n\t\t\treturn nil, false, nil\n\t\t}\n\n\t\tif _, err := task.DoRetryWithTimeout(t, deploymentUpdateTimeout, defaultRetryInterval); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tfor _, s := range sharedSS {\n\t\tlogrus.Infof(\"restoring app: [%s] %s\", s.Namespace, s.Name)\n\n\t\tt := func() (interface{}, bool, error) {\n\t\t\tsCopy, err := k.kubeClient.AppsV1().StatefulSets(s.Namespace).Get(context.TODO(), s.Name, metav1.GetOptions{})\n\t\t\tif err != nil {\n\t\t\t\tif errors.IsNotFound(err) {\n\t\t\t\t\treturn nil, false, nil // done as statefulset is deleted\n\t\t\t\t}\n\n\t\t\t\treturn nil, true, err\n\t\t\t}\n\n\t\t\tsCopy = sCopy.DeepCopy()\n\t\t\tif sCopy.Annotations == nil {\n\t\t\t\treturn nil, false, nil // done as this is not an app we touched\n\t\t\t}\n\n\t\t\tval, present := sCopy.Annotations[replicaMemoryKey]\n\t\t\tif !present || len(val) == 0 {\n\t\t\t\treturn nil, false, nil // done as this is not an app we touched\n\t\t\t}\n\n\t\t\tparsedVal := intstr.Parse(val)\n\t\t\tif parsedVal.Type != intstr.Int {\n\t\t\t\treturn nil, false, fmt.Errorf(\"failed to parse saved replica count: %v\", val)\n\t\t\t}\n\n\t\t\tdelete(sCopy.Annotations, replicaMemoryKey)\n\t\t\tsCopy.Spec.Replicas = &parsedVal.IntVal\n\n\t\t\t_, err = k.kubeClient.AppsV1().StatefulSets(sCopy.Namespace).Update(context.TODO(), sCopy, metav1.UpdateOptions{})\n\t\t\tif err != nil {\n\t\t\t\treturn nil, true, err\n\t\t\t}\n\n\t\t\treturn nil, false, nil\n\t\t}\n\n\t\tif _, err := task.DoRetryWithTimeout(t, statefulSetUpdateTimeout, defaultRetryInterval); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (c *Client) ScaleUpCluster(args *ScaleUpClusterArgs) (*ScaleUpClusterResponse, error) {\n\tvar params map[string]string\n\tif args != nil {\n\t\tparams = map[string]string{\n\t\t\t\"clientToken\": c.GenerateClientToken(),\n\t\t\t\"scalingUp\": \"\",\n\t\t}\n\t}\n\tpostContent, err := json.Marshal(args)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tutil.Debug(\"\", fmt.Sprintf(\"ScaleUpCluster Post body: %s\", string(postContent)))\n\treq, err := bce.NewRequest(\"POST\", c.GetURL(\"v1/cluster\", params), bytes.NewBuffer(postContent))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresp, err := c.SendRequest(req, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbodyContent, err := resp.GetBodyContent()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar scResp *ScaleUpClusterResponse\n\terr = json.Unmarshal(bodyContent, &scResp)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn scResp, nil\n}", "func (schedulerClient *Scheduler) AutoScale(ignoreTimeSchedule bool){\n\tcalender := InitAutoScalingCalender()\n\ttimeZone := os.Getenv(\"TIME_ZONE\")\n\tif timeZone == \"\"{\n\t\ttimeZone = \"America/Edmonton\"\n\t}\n\tloc ,_ := time.LoadLocation(timeZone)\n\tlog.Println(\"Time Zone is set to \",loc.String())\n\tfor {\n\t\t// Check if auto scaling on\n\t\tif AutoScaleByTime(calender,time.Now().In(loc)) || ignoreTimeSchedule {\n\t\t\t// Get the list of nodes in\tthe workerPool\n\t\t\tnodesList := schedulerClient.clusterClient.getWorkersNodesIP(schedulerClient.workerPool)\n\t\t\t// Get the list of pods with matching node selector\n\t\t\tnodeSelector := make(map[string]string)\n\t\t\tnodeSelector[\"pool\"] = schedulerClient.workerPool\n\t\t\tpodsList := schedulerClient.GetPodListWithLabels(schedulerClient.nameSpace,nodeSelector)\n\t\t\t// podList nil means there is error getting the pod\n\t\t\tif podsList == nil {\n\t\t\t\tlog.Println(\"Can't get pod list in the workerPool, skip this round\")\n\t\t\t\ttime.Sleep(schedulerClient.timeInterval * time.Second) //check after the interval\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// It is a bit abnormal to have 0 pod in the cluster, but it is not fatal\n\t\t\tif len(podsList) == 0 {\n\t\t\t\tlog.Println(\"Warning: podList is empty\")\n\t\t\t}\n\t\t\t// In practice, the worker pool can't be empty\n\t\t\tif len(nodesList) == 0 {\n\t\t\t\tlog.Println(\"worker pool should not have 0 nodes, skip this round\")\n\t\t\t\ttime.Sleep(schedulerClient.timeInterval * time.Second) //check after the interval\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t//Find unused nodes\n\t\t\tunusedNodes := FindUnusedNodes(podsList,nodesList)\n\t\t\t//Log message\n\t\t\tschedulerClient.DebugMessage(nodesList,podsList,unusedNodes)\n\t\t\t// Make decision to scale in or out the workerPool, using v1 algorithm\n\t\t\tscaleOut,scaleIn,err := SparkAlgoV1(len(nodesList),int(math.Max(0,float64(len(unusedNodes)-FindPendingNodes(podsList)))),schedulerClient.extraNode,\n\t\t\t\tschedulerClient.minNode,schedulerClient.maxNode)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t\ttime.Sleep(schedulerClient.timeInterval * time.Second) //check after the interval\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif scaleIn{\n\t\t\t\tremoveIndex := rand.Intn(len(unusedNodes)) //randomly pick a node to drop\n\t\t\t\tschedulerClient.ScaleIn(schedulerClient.workerPool,unusedNodes[removeIndex]) //remove the pod\n\t\t\t}else if scaleOut{\n\t\t\t\tschedulerClient.ScaleOut(schedulerClient.workerPool)\n\t\t\t}\n\t\t}else{\n\t\t\t// Auto scaling mode off, turn on maximum number of allowed worker nodes\n\t\t\tlog.Println(\"Cluster AutoScaling is OFF, set the nodes number to max\")\n\t\t\tnodesList := schedulerClient.clusterClient.getWorkersNodesIP(schedulerClient.workerPool)\n\t\t\tif len(nodesList) == 0 {\n\t\t\t\tlog.Println(\"Warning: Node list is empty, skip this round\")\n\t\t\t\ttime.Sleep(schedulerClient.timeInterval * time.Second) //check after the interval\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif len(nodesList) < schedulerClient.maxNode {\n\t\t\t\tschedulerClient.ScaleOut(schedulerClient.workerPool)\n\t\t\t}\n\t\t}\n\t\ttime.Sleep(schedulerClient.timeInterval * time.Second) //check after the interval\n\t}\n}", "func WaitUntilDeploymentScaledToDesiredReplicas(ctx context.Context, client client.Client, key types.NamespacedName, desiredReplicas int32) error {\n\treturn retry.UntilTimeout(ctx, 5*time.Second, 300*time.Second, func(ctx context.Context) (done bool, err error) {\n\t\tdeployment := &appsv1.Deployment{}\n\t\tif err := client.Get(ctx, key, deployment); err != nil {\n\t\t\treturn retry.SevereError(err)\n\t\t}\n\n\t\tif deployment.Generation != deployment.Status.ObservedGeneration {\n\t\t\treturn retry.MinorError(fmt.Errorf(\"%q not observed at latest generation (%d/%d)\", key.Name,\n\t\t\t\tdeployment.Status.ObservedGeneration, deployment.Generation))\n\t\t}\n\n\t\tif deployment.Spec.Replicas == nil || *deployment.Spec.Replicas != desiredReplicas {\n\t\t\treturn retry.SevereError(fmt.Errorf(\"waiting for deployment %q to scale failed. spec.replicas does not match the desired replicas\", key.Name))\n\t\t}\n\n\t\tif deployment.Status.Replicas == desiredReplicas && deployment.Status.AvailableReplicas == desiredReplicas {\n\t\t\treturn retry.Ok()\n\t\t}\n\n\t\treturn retry.MinorError(fmt.Errorf(\"deployment %q currently has '%d' replicas. Desired: %d\", key.Name, deployment.Status.AvailableReplicas, desiredReplicas))\n\t})\n}", "func replicaUpdate(clientset *kubernetes.Clientset, metaname string, quantity string) {\n // Getting deployments\n deploymentsClient := clientset.AppsV1beta1().Deployments(apiv1.NamespaceDefault)\n\n // Updating deployment\n retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {\n // Retrieve the latest version of Deployment before attempting update\n // RetryOnConflict uses exponential backoff to avoid exhausting the apiserver\n result, getErr := deploymentsClient.Get(metaname, metav1.GetOptions{})\n if getErr != nil {\n panic(fmt.Errorf(\"Failed to get latest version of Deployment: %v\", getErr))\n }\n\n fmt.Printf(\"Updating replica count of %v by %v\\n\", metaname, quantity)\n\n // Parsing quantity to int32\n i, err := strconv.ParseInt(quantity, 10, 32)\n if err != nil {\n panic(err)\n }\n\n // Modify replica count\n oldRep := result.Spec.Replicas\n result.Spec.Replicas = int32Ptr(*oldRep + int32(i))\n if *result.Spec.Replicas < int32(1) {\n result.Spec.Replicas = int32Ptr(1)\n }\n _, updateErr := deploymentsClient.Update(result)\n return updateErr\n })\n if retryErr != nil {\n panic(fmt.Errorf(\"Update failed: %v\", retryErr))\n }\n fmt.Printf(\"Updated replica count of Deployment %v\\n\", metaname)\n}", "func (cli *DockerCli) CmdComposeScale(args ...string) error {\n\tcmd := Cli.Subcmd(\"compose scale\", []string{\"[SERVICE=NUM...]\"}, \"Set number of containers to run for a service.\", false)\n\tcomposeFile := cmd.String([]string{\"f\", \"-file\"}, \"docker-compose.yml\", \"Specify an alternate compose file\")\n\tprojectName := cmd.String([]string{\"p\", \"-project-name\"}, \"\", \"Specify an alternate project name\")\n\ttimeout := cmd.Int([]string{\"t\", \"-timeout\"}, 10, \"Specify a shutdown timeout in seconds\")\n\tcmd.Require(flag.Min, 0)\n\terr := cmd.ParseFlags(args, true)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif *projectName == \"\" {\n\t\t*projectName = getBaseDir()\n\t}\n\tproject, err := docker.NewProject(&docker.Context{\n\t\tContext: project.Context{\n\t\t\tComposeFiles: []string{*composeFile},\n\t\t\tProjectName: *projectName,\n\t\t},\n\t\tClientFactory: cli,\n\t})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\tservicesScale := map[string]int{}\n\tfor _, ss := range cmd.Args() {\n\t\tfields := strings.SplitN(ss, \"=\", 2)\n\t\tif len(fields) != 2 {\n\t\t\tcontinue\n\t\t}\n\t\tnum, err := strconv.Atoi(fields[1])\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tservicesScale[fields[0]] = num\n\t}\n\terr = project.Scale(*timeout, servicesScale)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (o *ReplaceAppsV1NamespacedReplicaSetScaleOK) WithPayload(payload *models.IoK8sAPIAutoscalingV1Scale) *ReplaceAppsV1NamespacedReplicaSetScaleOK {\n\to.Payload = payload\n\treturn o\n}", "func (s *Surface) Scale(x, y float64) {\n\ts.Ctx.Call(\"scale\", x, y)\n}", "func (o BeanstalkScheduledTaskOutput) ScaleMinCapacity() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v BeanstalkScheduledTask) *string { return v.ScaleMinCapacity }).(pulumi.StringPtrOutput)\n}", "func (m *Matrix3) Scale(s float64) {\n\tfor i, x := range m {\n\t\tm[i] = x * s\n\t}\n}", "func (o WorkerPoolOutput) Autoscale() GoogleDevtoolsRemotebuildexecutionAdminV1alphaAutoscaleResponseOutput {\n\treturn o.ApplyT(func(v *WorkerPool) GoogleDevtoolsRemotebuildexecutionAdminV1alphaAutoscaleResponseOutput {\n\t\treturn v.Autoscale\n\t}).(GoogleDevtoolsRemotebuildexecutionAdminV1alphaAutoscaleResponseOutput)\n}", "func (e Engine) randomScaleDown(nodepools []*godo.KubernetesNodePool, numToScale int) error {\n\tnodepools = shuffle(nodepools)\n\tfor _, np := range nodepools {\n\t\tif numToScale == 0 {\n\t\t\tbreak\n\t\t}\n\n\t\t// limitations in DigitalOcean prevent scaling a node pool to less than 1\n\t\tif np.Count == 1 {\n\t\t\tcontinue\n\t\t}\n\n\t\tscaleNodePoolTo := getMinNodesNeededInNodePoolCount(np.Count, numToScale)\n\t\terr := e.scaleNodePoolToCount(np, scaleNodePoolTo)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tnumToScale = numToScale - (np.Count - scaleNodePoolTo)\n\t}\n\n\t// this case can happen if the total number of desired nodes is less than the\n\t// number of node pools in the cluster since there has to be 1 node per node pool\n\tif numToScale != 0 {\n\t\treturn errors.New(\"unable to scale to desired node count\")\n\t}\n\n\treturn nil\n}" ]
[ "0.82982755", "0.73352265", "0.7108116", "0.70009696", "0.6907844", "0.6820197", "0.6735582", "0.67346925", "0.66280305", "0.64414525", "0.6375801", "0.6365863", "0.6314644", "0.62918156", "0.62655246", "0.6144791", "0.61160296", "0.60683995", "0.60674256", "0.5875461", "0.5866468", "0.5857986", "0.5782939", "0.57543933", "0.57534087", "0.57254034", "0.5705834", "0.56751645", "0.5608171", "0.5565324", "0.55606824", "0.55580336", "0.55124044", "0.55025023", "0.549499", "0.5439164", "0.5412729", "0.5410058", "0.53859705", "0.53627956", "0.535995", "0.5341778", "0.53367895", "0.53256214", "0.52972615", "0.5280797", "0.5268056", "0.5257847", "0.5250301", "0.52080476", "0.5198418", "0.5169913", "0.5127899", "0.51273984", "0.5111673", "0.5111604", "0.51113", "0.51027673", "0.5098645", "0.50898", "0.5084365", "0.50776035", "0.5072261", "0.5069976", "0.5023281", "0.50049305", "0.5001821", "0.49958563", "0.49925777", "0.4985599", "0.4985254", "0.49840513", "0.49684936", "0.49664554", "0.4954801", "0.49471998", "0.4941029", "0.49338496", "0.49309978", "0.49047324", "0.4897895", "0.48952124", "0.48904333", "0.4889313", "0.48847258", "0.4881543", "0.48783296", "0.48742384", "0.48736534", "0.48725292", "0.48671666", "0.48630372", "0.48601282", "0.48593038", "0.48547417", "0.48535618", "0.48533538", "0.48497355", "0.48388308", "0.4835516" ]
0.8210777
1
deployment is a helper, it returns the kube deployment resource of the workload.
func (a *Workload) Deployment(ctx context.Context) (*appsv1.Deployment, error) { return a.cluster.Kubectl.AppsV1().Deployments(a.app.Org).Get( ctx, a.app.Name, metav1.GetOptions{}, ) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func kubeconDeployment(image, tag string) (*apiv1b2.Deployment, error) {\n\tom := webappMeta(helloKubeconDeploymentName, \"api\")\n\tif tag == \"\" || image == \"\" {\n\t\treturn nil, fmt.Errorf(\"error: image and tag must be defined\")\n\t}\n\tpts := &v1.PodTemplateSpec{\n\t\tObjectMeta: om,\n\t\tSpec: v1.PodSpec{\n\t\t\tContainers: []v1.Container{\n\t\t\t\tkubeconContainer(helloKubeconDeploymentName, image, tag),\n\t\t\t},\n\t\t},\n\t}\n\td := &apiv1b2.Deployment{\n\t\tObjectMeta: om,\n\t\tSpec: apiv1b2.DeploymentSpec{\n\t\t\tSelector: &metav1.LabelSelector{\n\t\t\t\tMatchLabels: om.Labels,\n\t\t\t},\n\t\t\tTemplate: *pts,\n\t\t},\n\t}\n\treturn d, nil\n}", "func (r *ContainerizedWorkloadReconciler) renderDeployment(ctx context.Context,\n\tworkload *oamv1alpha2.ContainerizedWorkload) (*appsv1.Deployment, error) {\n\n\tresources, err := cwh.Translator(ctx, workload)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdeploy, ok := resources[0].(*appsv1.Deployment)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"internal error, deployment is not rendered correctly\")\n\t}\n\t// the translator lib doesn't set the namespace\n\tdeploy.Namespace = workload.Namespace\n\t// k8s server-side patch complains if the protocol is not set\n\tfor i := 0; i < len(deploy.Spec.Template.Spec.Containers); i++ {\n\t\tfor j := 0; j < len(deploy.Spec.Template.Spec.Containers[i].Ports); j++ {\n\t\t\tif len(deploy.Spec.Template.Spec.Containers[i].Ports[j].Protocol) == 0 {\n\t\t\t\tdeploy.Spec.Template.Spec.Containers[i].Ports[j].Protocol = corev1.ProtocolTCP\n\t\t\t}\n\t\t}\n\t}\n\tr.Log.Info(\" rendered a deployment\", \"deploy\", deploy.Spec.Template.Spec)\n\n\t// set the controller reference so that we can watch this deployment and it will be deleted automatically\n\tif err := ctrl.SetControllerReference(workload, deploy, r.Scheme); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn deploy, nil\n}", "func GetDeployment(ns string, name string) Deployment {\n\tclient, err := LoadClient(Kubeconfig)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tversion := GetEnv(\"KUBERNETES_VERSION\", K8sVersion)\n\tif version == \"v1.8\" || version == \"v1.7\" || version == \"v1.6\" {\n\t\tvar deployment appsv1beta1.Deployment\n\t\tif err := client.Get(context.Background(), ns, name, &deployment); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\t//Name\n\t\tn := *deployment.Metadata.Name\n\t\tnc := TrimQuotes(n)\n\t\t// Namespace\n\t\tns := *deployment.Metadata.Namespace\n\t\tnsc := TrimQuotes(ns)\n\t\t// PodWanted\n\t\tpw := *deployment.Status.Replicas\n\t\t// PodRunning\n\t\tpr := *deployment.Status.AvailableReplicas\n\t\tst := \"Ready\"\n\t\tif pw != pr {\n\t\t\tst = \"NotReady\"\n\t\t}\n\t\t// Put in slice\n\t\td := Deployment{Status: st, Name: nc, Namespace: nsc, PodWanted: pw, PodRunning: pr}\n\t\treturn d\n\t}\n\tvar deployment appsv1.Deployment\n\tif err := client.Get(context.Background(), ns, name, &deployment); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\t//Name\n\tn := *deployment.Metadata.Name\n\tnc := TrimQuotes(n)\n\t// Namespace\n\tns = *deployment.Metadata.Namespace\n\tnsc := TrimQuotes(ns)\n\t// PodWanted\n\tpw := *deployment.Status.Replicas\n\t// PodRunning\n\tpr := *deployment.Status.AvailableReplicas\n\tst := \"Ready\"\n\tif pw != pr {\n\t\tst = \"NotReady\"\n\t}\n\t// Put in slice\n\td := Deployment{Status: st, Name: nc, Namespace: nsc, PodWanted: pw, PodRunning: pr}\n\treturn d\n}", "func GetDeployment() *appsv1.Deployment {\n\tdeploy1 := &appsv1.Deployment{\n\t\tObjectMeta: v1.ObjectMeta{\n\t\t\tName: \"deployment1\",\n\t\t\tNamespace: \"default\",\n\t\t},\n\t\tSpec: appsv1.DeploymentSpec{\n\t\t\tSelector: &v1.LabelSelector{\n\t\t\t\tMatchLabels: map[string]string{\"deployment\": \"deployment1-deployment\"},\n\t\t\t},\n\t\t\tTemplate: corev1.PodTemplateSpec{\n\t\t\t\tObjectMeta: v1.ObjectMeta{Labels: map[string]string{\"deployment\": \"deployment1-deployment\"}},\n\t\t\t\tSpec: corev1.PodSpec{\n\t\t\t\t\tContainers: []corev1.Container{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"nginx\",\n\t\t\t\t\t\t\tImage: \"nginx\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\treturn deploy1\n}", "func GetDeployment(namespace string, deploymentName string) *v1beta1.Deployment {\n\treplicaset := int32(1)\n\treturn &v1beta1.Deployment{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: deploymentName,\n\t\t\tNamespace: namespace,\n\t\t\tLabels: map[string]string{\"firstLabel\": \"temp\"},\n\t\t\tAnnotations: map[string]string{\n\t\t\t\tconstants.ConfigmapUpdateOnChangeAnnotation: deploymentName,\n\t\t\t\tconstants.SecretUpdateOnChangeAnnotation: deploymentName},\n\t\t},\n\t\tSpec: v1beta1.DeploymentSpec{\n\t\t\tReplicas: &replicaset,\n\t\t\tStrategy: v1beta1.DeploymentStrategy{\n\t\t\t\tType: v1beta1.RollingUpdateDeploymentStrategyType,\n\t\t\t},\n\t\t\tTemplate: v1.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: map[string]string{\"secondLabel\": \"temp\"},\n\t\t\t\t},\n\t\t\t\tSpec: v1.PodSpec{\n\t\t\t\t\tContainers: []v1.Container{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tImage: \"tutum/hello-world\",\n\t\t\t\t\t\t\tName: deploymentName,\n\t\t\t\t\t\t\tEnv: []v1.EnvVar{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName: \"BUCKET_NAME\",\n\t\t\t\t\t\t\t\t\tValue: \"test\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}", "func Deployment(namespace, name string, containerImages ...string) kapisext.Deployment {\n\treturn kapisext.Deployment{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tNamespace: namespace,\n\t\t\tName: name,\n\t\t\tSelfLink: \"/deployment/\" + name,\n\t\t},\n\t\tSpec: kapisext.DeploymentSpec{\n\t\t\tTemplate: kapi.PodTemplateSpec{\n\t\t\t\tSpec: PodSpec(containerImages...),\n\t\t\t},\n\t\t},\n\t}\n}", "func (a *Workload) Get(ctx context.Context, deployment *appsv1.Deployment) *models.AppDeployment {\n\tactive := false\n\troute := \"\"\n\tstageID := \"\"\n\tstatus := \"\"\n\tusername := \"\"\n\n\t// Query application deployment for stageID and status (ready vs desired replicas)\n\n\tdeploymentSelector := fmt.Sprintf(\"app.kubernetes.io/part-of=%s,app.kubernetes.io/name=%s\", a.app.Org, a.app.Name)\n\n\tdeploymentListOptions := metav1.ListOptions{\n\t\tLabelSelector: deploymentSelector,\n\t}\n\n\tdeployments, err := a.cluster.Kubectl.AppsV1().Deployments(a.app.Org).List(ctx, deploymentListOptions)\n\n\tif err != nil {\n\t\tstatus = pkgerrors.Wrap(err, \"failed to get Deployment status\").Error()\n\t} else if len(deployments.Items) < 1 {\n\t\tstatus = \"0/0\"\n\t} else {\n\t\tstatus = fmt.Sprintf(\"%d/%d\",\n\t\t\tdeployments.Items[0].Status.ReadyReplicas,\n\t\t\tdeployments.Items[0].Status.Replicas)\n\n\t\tstageID = deployments.Items[0].\n\t\t\tSpec.Template.ObjectMeta.Labels[\"epinio.suse.org/stage-id\"]\n\t\tusername = deployments.Items[0].Spec.Template.ObjectMeta.Labels[\"app.kubernetes.io/created-by\"]\n\n\t\tactive = true\n\t}\n\n\troutes, err := a.cluster.ListIngressRoutes(ctx, a.app.Org, names.IngressName(a.app.Name))\n\tif err != nil {\n\t\troute = err.Error()\n\t} else {\n\t\troute = routes[0]\n\t}\n\n\treturn &models.AppDeployment{\n\t\tActive: active,\n\t\tUsername: username,\n\t\tStageID: stageID,\n\t\tStatus: status,\n\t\tRoute: route,\n\t}\n}", "func deployment(deployment appsv1.Deployment) (*ResourceUsage, error) {\n\tvar (\n\t\tresourceOverhead float64 // max overhead compute resources (percent)\n\t\tpodOverhead int32 // max overhead pods during deployment\n\t)\n\n\treplicas := deployment.Spec.Replicas\n\tstrategy := deployment.Spec.Strategy\n\n\tif *replicas == 0 {\n\t\treturn &ResourceUsage{\n\t\t\tCPU: new(resource.Quantity),\n\t\t\tMemory: new(resource.Quantity),\n\t\t\tDetails: Details{\n\t\t\t\tVersion: deployment.APIVersion,\n\t\t\t\tKind: deployment.Kind,\n\t\t\t\tName: deployment.Name,\n\t\t\t\tReplicas: *replicas,\n\t\t\t\tMaxReplicas: *replicas,\n\t\t\t\tStrategy: string(strategy.Type),\n\t\t\t},\n\t\t}, nil\n\t}\n\n\tswitch strategy.Type {\n\tcase appsv1.RecreateDeploymentStrategyType:\n\t\t// no overhead on recreate\n\t\tresourceOverhead = 1\n\t\tpodOverhead = 0\n\tcase \"\":\n\t\t// RollingUpdate is the default an can be an empty string. If so, set the defaults\n\t\t// (https://pkg.go.dev/k8s.io/api/apps/v1?tab=doc#RollingUpdateDeployment) and continue calculation.\n\t\tdefaults := intstr.FromString(\"25%\")\n\t\tstrategy = appsv1.DeploymentStrategy{\n\t\t\tType: appsv1.RollingUpdateDeploymentStrategyType,\n\t\t\tRollingUpdate: &appsv1.RollingUpdateDeployment{\n\t\t\t\tMaxUnavailable: &defaults,\n\t\t\t\tMaxSurge: &defaults,\n\t\t\t},\n\t\t}\n\n\t\tfallthrough\n\tcase appsv1.RollingUpdateDeploymentStrategyType:\n\t\t// Documentation: https://pkg.go.dev/k8s.io/api/apps/v1?tab=doc#RollingUpdateDeployment\n\t\t// all default values are set as stated in the docs\n\t\tvar (\n\t\t\tmaxUnavailableValue intstr.IntOrString\n\t\t\tmaxSurgeValue intstr.IntOrString\n\t\t)\n\n\t\t// can be nil, if so apply default value\n\t\tif strategy.RollingUpdate == nil {\n\t\t\tmaxUnavailableValue = intstr.FromString(\"25%\")\n\t\t\tmaxSurgeValue = intstr.FromString(\"25%\")\n\t\t} else {\n\t\t\tmaxUnavailableValue = *strategy.RollingUpdate.MaxUnavailable\n\t\t\tmaxSurgeValue = *strategy.RollingUpdate.MaxSurge\n\t\t}\n\n\t\t// docs say, that the asolute number is calculated by rounding down.\n\t\tmaxUnavailable, err := intstr.GetValueFromIntOrPercent(&maxUnavailableValue, int(*replicas), false)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// docs say, absolute number is calculated by rounding up.\n\t\tmaxSurge, err := intstr.GetValueFromIntOrPercent(&maxSurgeValue, int(*replicas), true)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// podOverhead is the number of pods which can run more during a deployment\n\t\tpodOverhead = int32(maxSurge - maxUnavailable)\n\n\t\tresourceOverhead = (float64(podOverhead) / float64(*replicas)) + 1\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"deployment: %s deployment strategy %q is unknown\", deployment.Name, strategy.Type)\n\t}\n\n\tcpu, memory := podResources(&deployment.Spec.Template.Spec)\n\n\tmem := float64(memory.Value()) * float64(*replicas) * resourceOverhead\n\tmemory.Set(int64(math.Round(mem)))\n\n\tcpu.SetMilli(int64(math.Round(float64(cpu.MilliValue()) * float64(*replicas) * resourceOverhead)))\n\n\tresourceUsage := ResourceUsage{\n\t\tCPU: cpu,\n\t\tMemory: memory,\n\t\tDetails: Details{\n\t\t\tVersion: deployment.APIVersion,\n\t\t\tKind: deployment.Kind,\n\t\t\tName: deployment.Name,\n\t\t\tReplicas: *replicas,\n\t\t\tStrategy: string(strategy.Type),\n\t\t\tMaxReplicas: *replicas + podOverhead,\n\t\t},\n\t}\n\n\treturn &resourceUsage, nil\n}", "func (o DeliveryPipelineSerialPipelineStageStrategyCanaryRuntimeConfigKubernetesServiceNetworkingPtrOutput) Deployment() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *DeliveryPipelineSerialPipelineStageStrategyCanaryRuntimeConfigKubernetesServiceNetworking) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.Deployment\n\t}).(pulumi.StringPtrOutput)\n}", "func KustomizeDeployment(kustomization *Kustomization, tmpl *template.Template) ([]byte, error) {\n\trepo := &kustomization.Repository\n\n\tdata := deploymentData{\n\t\tNs: kustomization.Ns,\n\t\tTier: kustomization.Tier,\n\t\tName: kustomization.Name,\n\t\tGroup: repo.Group,\n\t\tProject: repo.Project,\n\t\tPath: repo.Path,\n\t}\n\n\tmanifestBuffer := new(bytes.Buffer)\n\terr := tmpl.Execute(manifestBuffer, data)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"can not apply variables to deployment template: %v\", err)\n\t}\n\treturn manifestBuffer.Bytes(), nil\n}", "func Deployment() appsv1.Deployment {\n\treturn appsv1.Deployment{\n\t\tTypeMeta: TypeMeta(),\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tNamespace: DefaultNamespace,\n\t\t\tName: DefaultName,\n\t\t\tLabels: map[string]string{\n\t\t\t\tDefaultSelectorKey: DefaultSelectorValue,\n\t\t\t},\n\t\t\tAnnotations: map[string]string{\n\t\t\t\tcontroller.KconfigEnabledDeploymentAnnotation: \"true\",\n\t\t\t},\n\t\t},\n\t\tSpec: appsv1.DeploymentSpec{\n\t\t\tSelector: &metav1.LabelSelector{\n\t\t\t\tMatchLabels: map[string]string{\n\t\t\t\t\tDefaultSelectorKey: DefaultSelectorValue,\n\t\t\t\t},\n\t\t\t},\n\t\t\tTemplate: corev1.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tNamespace: DefaultNamespace,\n\t\t\t\t\tName: DefaultName,\n\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\tDefaultSelectorKey: DefaultSelectorValue,\n\t\t\t\t\t},\n\t\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\t\tcontroller.KconfigEnvRefVersionAnnotation: \"0\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSpec: corev1.PodSpec{\n\t\t\t\t\tContainers: []corev1.Container{\n\t\t\t\t\t\tcorev1.Container{\n\t\t\t\t\t\t\tEnv: []corev1.EnvVar{},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}", "func (o DeliveryPipelineSerialPipelineStageStrategyCanaryRuntimeConfigKubernetesServiceNetworkingOutput) Deployment() pulumi.StringOutput {\n\treturn o.ApplyT(func(v DeliveryPipelineSerialPipelineStageStrategyCanaryRuntimeConfigKubernetesServiceNetworking) string {\n\t\treturn v.Deployment\n\t}).(pulumi.StringOutput)\n}", "func createDeployment(cluster *client.VanClient, annotations map[string]string) (*v1.Deployment, error) {\n\tname := \"nginx\"\n\treplicas := int32(1)\n\tdep := &v1.Deployment{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: name,\n\t\t\tNamespace: cluster.Namespace,\n\t\t\tAnnotations: annotations,\n\t\t\tLabels: map[string]string{\n\t\t\t\t\"app\": name,\n\t\t\t},\n\t\t},\n\t\tSpec: v1.DeploymentSpec{\n\t\t\tReplicas: &replicas,\n\t\t\tSelector: &metav1.LabelSelector{\n\t\t\t\tMatchLabels: map[string]string{\n\t\t\t\t\t\"app\": name,\n\t\t\t\t},\n\t\t\t},\n\t\t\tTemplate: corev1.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\t\"app\": name,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSpec: corev1.PodSpec{\n\t\t\t\t\tContainers: []corev1.Container{\n\t\t\t\t\t\t{Name: \"nginx\", Image: \"quay.io/skupper/nginx-unprivileged:stable-alpine\", Ports: []corev1.ContainerPort{{Name: \"web\", ContainerPort: 8080}}, ImagePullPolicy: corev1.PullIfNotPresent},\n\t\t\t\t\t},\n\t\t\t\t\tRestartPolicy: corev1.RestartPolicyAlways,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\t// Deploying resource\n\tdep, err := cluster.KubeClient.AppsV1().Deployments(cluster.Namespace).Create(context.TODO(), dep, metav1.CreateOptions{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Wait for deployment to be ready\n\tdep, err = kube.WaitDeploymentReadyReplicas(dep.Name, cluster.Namespace, 1, cluster.KubeClient, timeout, interval)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn dep, nil\n}", "func (c *configuration) Deployment(clientSet ClientSet) *Deployment {\n\tif clientSet != nil {\n\t\treturn NewDeployment(clientSet)\n\t}\n\treturn nil\n}", "func generatorDeployment(c *configuration.Config, a *api.Apicurito) (dep client.Object) {\n\t// Define a new deployment\n\tname := DefineGeneratorName(a)\n\tdeployLabels := map[string]string{\n\t\t\"app\": \"apicurito\",\n\t\t\"component\": name,\n\t\t\"com.company\": \"Red_Hat\",\n\t\t\"rht.prod_name\": \"Red_Hat_Integration\",\n\t\t\"rht.prod_ver\": version.ShortVersion(),\n\t\t\"rht.comp\": \"Fuse\",\n\t\t\"rht.comp_ver\": version.ShortVersion(),\n\t\t\"rht.subcomp\": name,\n\t\t\"rht.subcomp_t\": \"infrastructure\",\n\t}\n\tdep = &appsv1.Deployment{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tAPIVersion: \"apps/v1\",\n\t\t\tKind: \"Deployment\",\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: name,\n\t\t\tNamespace: a.Namespace,\n\t\t\tOwnerReferences: []metav1.OwnerReference{\n\t\t\t\t*metav1.NewControllerRef(a, schema.GroupVersionKind{\n\t\t\t\t\tGroup: api.SchemeGroupVersion.Group,\n\t\t\t\t\tVersion: api.SchemeGroupVersion.Version,\n\t\t\t\t\tKind: a.Kind,\n\t\t\t\t}),\n\t\t\t},\n\t\t},\n\n\t\tSpec: appsv1.DeploymentSpec{\n\t\t\tReplicas: &a.Spec.Size,\n\t\t\tSelector: &metav1.LabelSelector{\n\t\t\t\tMatchLabels: labelComponent(name),\n\t\t\t},\n\t\t\tStrategy: appsv1.DeploymentStrategy{\n\t\t\t\tType: appsv1.RollingUpdateDeploymentStrategyType,\n\t\t\t},\n\t\t\tTemplate: corev1.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: deployLabels,\n\t\t\t\t},\n\t\t\t\tSpec: corev1.PodSpec{\n\t\t\t\t\tContainers: []corev1.Container{{\n\t\t\t\t\t\tImage: c.GeneratorImage,\n\t\t\t\t\t\tImagePullPolicy: corev1.PullIfNotPresent,\n\t\t\t\t\t\tName: name,\n\t\t\t\t\t\tPorts: []corev1.ContainerPort{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tContainerPort: 8080,\n\t\t\t\t\t\t\t\tName: \"http\",\n\t\t\t\t\t\t\t\tProtocol: corev1.ProtocolTCP,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tContainerPort: 8181,\n\t\t\t\t\t\t\t\tName: \"health\",\n\t\t\t\t\t\t\t\tProtocol: corev1.ProtocolTCP,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tContainerPort: 9779,\n\t\t\t\t\t\t\t\tName: \"prometheus\",\n\t\t\t\t\t\t\t\tProtocol: corev1.ProtocolTCP,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tContainerPort: 8778,\n\t\t\t\t\t\t\t\tName: \"jolokia\",\n\t\t\t\t\t\t\t\tProtocol: corev1.ProtocolTCP,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tLivenessProbe: &corev1.Probe{\n\t\t\t\t\t\t\tFailureThreshold: 3,\n\t\t\t\t\t\t\tInitialDelaySeconds: 180,\n\t\t\t\t\t\t\tPeriodSeconds: 10,\n\t\t\t\t\t\t\tSuccessThreshold: 1,\n\t\t\t\t\t\t\tTimeoutSeconds: 1,\n\t\t\t\t\t\t\tHandler: corev1.Handler{\n\t\t\t\t\t\t\t\tTCPSocket: &corev1.TCPSocketAction{\n\t\t\t\t\t\t\t\t\tPort: intstr.FromString(\"http\"),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tReadinessProbe: &corev1.Probe{\n\t\t\t\t\t\t\tFailureThreshold: 3,\n\t\t\t\t\t\t\tInitialDelaySeconds: 10,\n\t\t\t\t\t\t\tPeriodSeconds: 10,\n\t\t\t\t\t\t\tSuccessThreshold: 1,\n\t\t\t\t\t\t\tTimeoutSeconds: 1,\n\t\t\t\t\t\t\tHandler: corev1.Handler{\n\t\t\t\t\t\t\t\tTCPSocket: &corev1.TCPSocketAction{\n\t\t\t\t\t\t\t\t\tPort: intstr.FromString(\"http\"),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t}},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\treturn\n}", "func GetResource(name string, namespace string, kubeclient *kubernetes.Clientset) (string, error) {\n\tif namespace == \"\" {\n\t\tnamespace = \"default\"\n\t}\n\n\topts := metaV1.ListOptions{\n\t\tLimit: 10,\n\t}\n\topts.APIVersion = \"apps/v1\"\n\topts.Kind = \"Deployment\"\n\n\tlist, err := kubeclient.AppsV1().Deployments(namespace).List(opts)\n\tif err != nil {\n\t\treturn \"\", pkgerrors.Wrap(err, \"Get Deployment error\")\n\t}\n\n\tfor _, deployment := range list.Items {\n\t\tif deployment.Name == name {\n\t\t\treturn name, nil\n\t\t}\n\t}\n\treturn \"\", nil\n}", "func GetDeployment(name, namespace string) *Deployment {\n\tfor _, v := range GetDeployments() {\n\t\tif v.Name == name && v.Namespace == namespace {\n\t\t\treturn v\n\t\t}\n\t}\n\treturn nil\n}", "func GetDeployment(t *testing.T, k8client client.Client, deployName string) (*appsv1.Deployment, error) {\n\tdeploy := &appsv1.Deployment{}\n\tns := \"default\"\n\terr := k8client.Get(goctx.TODO(), types.NamespacedName{Namespace: ns, Name: deployName}, deploy)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Deployment doesnt exist: %v\", err)\n\t}\n\n\treturn deploy, nil\n}", "func (o DeliveryPipelineSerialPipelineStageStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshPtrOutput) Deployment() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *DeliveryPipelineSerialPipelineStageStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.Deployment\n\t}).(pulumi.StringPtrOutput)\n}", "func Get(dev *model.Dev, namespace string, c kubernetes.Interface) (*appsv1.Deployment, error) {\n\tif namespace == \"\" {\n\t\treturn nil, fmt.Errorf(\"empty namespace\")\n\t}\n\n\tvar d *appsv1.Deployment\n\tvar err error\n\n\tif len(dev.Labels) == 0 {\n\t\td, err = c.AppsV1().Deployments(namespace).Get(dev.Name, metav1.GetOptions{})\n\t\tif err != nil {\n\t\t\tlog.Debugf(\"error while retrieving deployment %s/%s: %s\", namespace, dev.Name, err)\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\tdeploys, err := c.AppsV1().Deployments(namespace).List(\n\t\t\tmetav1.ListOptions{\n\t\t\t\tLabelSelector: dev.LabelsSelector(),\n\t\t\t},\n\t\t)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif len(deploys.Items) == 0 {\n\t\t\treturn nil, fmt.Errorf(\"deployment for labels '%s' not found\", dev.LabelsSelector())\n\t\t}\n\t\tif len(deploys.Items) > 1 {\n\t\t\treturn nil, fmt.Errorf(\"Found '%d' deployments for labels '%s' instead of 1\", len(deploys.Items), dev.LabelsSelector())\n\t\t}\n\t\td = &deploys.Items[0]\n\t}\n\n\treturn d, nil\n}", "func (f *fixture) buildK8sDeployment(namespace k8s.Namespace, name string) (*appsv1.Deployment, *appsv1.ReplicaSet) {\n\td := &appsv1.Deployment{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tUID: types.UID(name + \"-uid\"),\n\t\t\tNamespace: namespace.String(),\n\t\t\tName: name,\n\t\t\tCreationTimestamp: apis.Now(),\n\t\t},\n\t}\n\trsName := name + \"-rs\"\n\trs := &appsv1.ReplicaSet{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tUID: types.UID(rsName + \"-uid\"),\n\t\t\tNamespace: namespace.String(),\n\t\t\tName: rsName,\n\t\t\tCreationTimestamp: apis.Now(),\n\t\t\tOwnerReferences: []metav1.OwnerReference{k8s.RuntimeObjToOwnerRef(d)},\n\t\t},\n\t}\n\n\treturn d, rs\n}", "func (s *deploymentServer) findDeployment(ctx context.Context, ns, name, tier string) (*appsv1.Deployment, error) {\n\t// log.Println(\"find deployment \" + ns + \" : \" + name + \".\" + tier)\n\tlog.Println(\"find deployment \" + ns + \" : \" + name)\n\tappsAPI := s.clientset.AppsV1()\n\tapiDeployments := appsAPI.Deployments(ns)\n\n\t// deployment, err := apiDeployments.Get(ctx, name+\".\"+tier, metav1.GetOptions{})\n\tdeployment, err := apiDeployments.Get(ctx, name, metav1.GetOptions{})\n\tif err != nil {\n\t\tswitch t := err.(type) {\n\t\tcase *errors.StatusError:\n\t\t\t{\n\t\t\t\tstatusCode := t.Status().Code\n\t\t\t\tif statusCode == 404 {\n\t\t\t\t\treturn nil, nil\n\t\t\t\t}\n\t\t\t\treturn nil, fmt.Errorf(\"could not get deployment `%s`, got error '%v' with status %d\", name, err, statusCode)\n\t\t\t}\n\t\t}\n\t\treturn nil, fmt.Errorf(\"could not get deployment `%s`, got error '%v'\", name, err)\n\t}\n\treturn deployment, nil\n}", "func (o DeliveryPipelineSerialPipelineStageStrategyCanaryRuntimeConfigKubernetesGatewayServiceMeshOutput) Deployment() pulumi.StringOutput {\n\treturn o.ApplyT(func(v DeliveryPipelineSerialPipelineStageStrategyCanaryRuntimeConfigKubernetesGatewayServiceMesh) string {\n\t\treturn v.Deployment\n\t}).(pulumi.StringOutput)\n}", "func (client *Client) GetDeployment(namespace, deployment string) (model.Deployment, error) {\n\tvar depl model.Deployment\n\terr := client.RestAPI.Get(rest.Rq{\n\t\tResult: &depl,\n\t\tURL: rest.URL{\n\t\t\tPath: deploymentPath,\n\t\t\tParams: rest.P{\n\t\t\t\t\"namespace\": namespace,\n\t\t\t\t\"deployment\": deployment,\n\t\t\t},\n\t\t},\n\t})\n\treturn depl, err\n}", "func (m Manifest) Deployment() (ctypes.Deployment, error) {\n\tlid, err := m.Spec.LeaseID.toAkash()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tgroup, err := m.Spec.Group.toAkash()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn deployment{lid: lid, group: group}, nil\n}", "func apicuritoDeployment(c *configuration.Config, a *api.Apicurito) (dep client.Object) {\n\t// Define a new deployment\n\tvar dm int32 = 420\n\tname := DefineUIName(a)\n\tdeployLabels := map[string]string{\n\t\t\"app\": \"apicurito\",\n\t\t\"component\": name,\n\t\t\"com.company\": \"Red_Hat\",\n\t\t\"rht.prod_name\": \"Red_Hat_Integration\",\n\t\t\"rht.prod_ver\": version.ShortVersion(),\n\t\t\"rht.comp\": \"Fuse\",\n\t\t\"rht.comp_ver\": version.ShortVersion(),\n\t\t\"rht.subcomp\": name,\n\t\t\"rht.subcomp_t\": \"infrastructure\",\n\t}\n\tdep = &appsv1.Deployment{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tAPIVersion: \"apps/v1\",\n\t\t\tKind: \"Deployment\",\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: name,\n\t\t\tNamespace: a.Namespace,\n\t\t\tOwnerReferences: []metav1.OwnerReference{\n\t\t\t\t*metav1.NewControllerRef(a, schema.GroupVersionKind{\n\t\t\t\t\tGroup: api.SchemeGroupVersion.Group,\n\t\t\t\t\tVersion: api.SchemeGroupVersion.Version,\n\t\t\t\t\tKind: a.Kind,\n\t\t\t\t}),\n\t\t\t},\n\t\t},\n\t\tSpec: appsv1.DeploymentSpec{\n\t\t\tReplicas: &a.Spec.Size,\n\t\t\tSelector: &metav1.LabelSelector{\n\t\t\t\tMatchLabels: labelComponent(name),\n\t\t\t},\n\t\t\tStrategy: appsv1.DeploymentStrategy{\n\t\t\t\tType: appsv1.RollingUpdateDeploymentStrategyType,\n\t\t\t},\n\t\t\tTemplate: corev1.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: deployLabels,\n\t\t\t\t},\n\t\t\t\tSpec: corev1.PodSpec{\n\t\t\t\t\tContainers: []corev1.Container{{\n\t\t\t\t\t\tImage: c.UiImage,\n\t\t\t\t\t\tImagePullPolicy: corev1.PullIfNotPresent,\n\t\t\t\t\t\tName: name,\n\t\t\t\t\t\tPorts: []corev1.ContainerPort{{\n\t\t\t\t\t\t\tContainerPort: 8080,\n\t\t\t\t\t\t\tName: \"api-port\",\n\t\t\t\t\t\t\tProtocol: corev1.ProtocolTCP,\n\t\t\t\t\t\t}},\n\t\t\t\t\t\tLivenessProbe: &corev1.Probe{\n\t\t\t\t\t\t\tHandler: corev1.Handler{\n\t\t\t\t\t\t\t\tHTTPGet: &corev1.HTTPGetAction{\n\t\t\t\t\t\t\t\t\tScheme: corev1.URISchemeHTTP,\n\t\t\t\t\t\t\t\t\tPort: intstr.FromString(\"api-port\"),\n\t\t\t\t\t\t\t\t\tPath: \"/\",\n\t\t\t\t\t\t\t\t}},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tReadinessProbe: &corev1.Probe{\n\t\t\t\t\t\t\tHandler: corev1.Handler{\n\t\t\t\t\t\t\t\tHTTPGet: &corev1.HTTPGetAction{\n\t\t\t\t\t\t\t\t\tScheme: corev1.URISchemeHTTP,\n\t\t\t\t\t\t\t\t\tPort: intstr.FromString(\"api-port\"),\n\t\t\t\t\t\t\t\t\tPath: \"/\",\n\t\t\t\t\t\t\t\t}},\n\t\t\t\t\t\t\tPeriodSeconds: 5,\n\t\t\t\t\t\t\tFailureThreshold: 2,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tVolumeMounts: []corev1.VolumeMount{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName: name,\n\t\t\t\t\t\t\t\tMountPath: \"/html/config\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t}},\n\t\t\t\t\tVolumes: []corev1.Volume{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: name,\n\t\t\t\t\t\t\tVolumeSource: corev1.VolumeSource{\n\t\t\t\t\t\t\t\tConfigMap: &corev1.ConfigMapVolumeSource{\n\t\t\t\t\t\t\t\t\tLocalObjectReference: corev1.LocalObjectReference{\n\t\t\t\t\t\t\t\t\t\tName: name,\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\tDefaultMode: &dm,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\treturn\n}", "func deploymentForPlex(p *v1alpha1.Plex) *appsv1.Deployment {\n\tls := labelsForPlex(p.Name)\n\treplicas := p.Spec.Size\n\n\tdep := &appsv1.Deployment{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tAPIVersion: \"apps/v1\",\n\t\t\tKind: \"Deployment\",\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: p.Name,\n\t\t\tNamespace: p.Namespace,\n\t\t},\n\n\t\tSpec: appsv1.DeploymentSpec{\n\t\t\tReplicas: &replicas,\n\t\t\tSelector: &metav1.LabelSelector{\n\t\t\t\tMatchLabels: ls,\n\t\t\t},\n\n\t\t\tTemplate: v1.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: ls,\n\t\t\t\t},\n\n\t\t\t\tSpec: v1.PodSpec{\n\t\t\t\t\tServiceAccountName:\t\"useroot\",\n\t\t\t\t\tContainers: []v1.Container{{\n\t\t\t\t\t\tName: \"plex\",\n\t\t\t\t\t\tResources: v1.ResourceRequirements{\n\t\t\t\t\t\t\tRequests: v1.ResourceList{\n\t\t\t\t\t\t\t\t\"cpu\":\t*resource.NewMilliQuantity(250, resource.BinarySI),\n\t\t\t\t\t\t\t\t\"memory\": *resource.NewMilliQuantity(64, resource.BinarySI),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tLimits: v1.ResourceList{\n\t\t\t\t\t\t\t\t\"cpu\":\t*resource.NewMilliQuantity(500, resource.BinarySI),\n\t\t\t\t\t\t\t\t\"memory\": *resource.NewMilliQuantity(128, resource.BinarySI),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tImage: \"plexinc/pms-docker:1.13.0.5023-31d3c0c65\",\n\t\t\t\t\t\tImagePullPolicy: v1.PullPolicy(\"Always\"),\n\t\t\t\t\t\tVolumeMounts:\t[]v1.VolumeMount{{\n\t\t\t\t\t\t\tName: \"plex-config\",\n\t\t\t\t\t\t\tMountPath: p.Spec.ConfigMountPath,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\t\n\t\t\t\t\t\t\tName: \"plex-transcode\",\n\t\t\t\t\t\t\tMountPath: p.Spec.TranscodeMountPath,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"plex-data\",\n\t\t\t\t\t\t\tMountPath: p.Spec.DataMountPath,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t\tPorts:\t[]v1.ContainerPort{{\n\t\t\t\t\t\t\t\tName:\t\"plex-ui\",\n\t\t\t\t\t\t\t\tProtocol:\tv1.ProtocolTCP,\n\t\t\t\t\t\t\t\tContainerPort:\t32400,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName:\t\"plex-home\",\n\t\t\t\t\t\t\t\tProtocol:\tv1.ProtocolTCP,\n\t\t\t\t\t\t\t\tContainerPort: 3005,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName:\t\"plex-roku\",\n\t\t\t\t\t\t\t\tProtocol:\tv1.ProtocolTCP,\n\t\t\t\t\t\t\t\tContainerPort: 8324,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName:\t\"plex-dlna-tcp\",\n\t\t\t\t\t\t\t\tProtocol:\tv1.ProtocolTCP,\n\t\t\t\t\t\t\t\tContainerPort: 32469,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName: \"plex-dlna-udp\",\n\t\t\t\t\t\t\t\tProtocol:\tv1.ProtocolUDP,\n\t\t\t\t\t\t\t\tContainerPort: 1900,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName: \"plex-discovery1\",\n\t\t\t\t\t\t\t\tProtocol:\tv1.ProtocolUDP,\n\t\t\t\t\t\t\t\tContainerPort: 32410,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName: \"plex-discovery2\",\n\t\t\t\t\t\t\t\tProtocol: v1.ProtocolUDP,\n\t\t\t\t\t\t\t\tContainerPort: 32412,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName: \"plex-discovery3\",\n\t\t\t\t\t\t\t\tProtocol:\tv1.ProtocolUDP,\n\t\t\t\t\t\t\t\tContainerPort: 32413,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName: \"plex-discovery4\",\n\t\t\t\t\t\t\t\tProtocol: v1.ProtocolUDP,\n\t\t\t\t\t\t\t\tContainerPort: 32414,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tEnv:\t[]v1.EnvVar{{\n\t\t\t\t\t\t\t\tName: \"TZ\",\n\t\t\t\t\t\t\t\tValue: p.Spec.TimeZone,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName: \"CLAIM_TOKEN\",\n\t\t\t\t\t\t\t\tValue: p.Spec.ClaimToken,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tReadinessProbe: &v1.Probe{\n\t\t\t\t\t\t\tHandler: v1.Handler{\n\t\t\t\t\t\t\t\tExec: &v1.ExecAction{\n\t\t\t\t\t\t\t\t\tCommand: []string{\n\t\t\t\t\t\t\t\t\t\t\"/bin/sh\",\n\t\t\t\t\t\t\t\t\t\t\"-c\",\n\t\t\t\t\t\t\t\t\t\t\"LD_LIBRARY_PATH=/usr/lib/plexmediaserver '/usr/lib/plexmediaserver/Plex Media Server Tests' --gtest_filter=SanityChecks\",\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tInitialDelaySeconds: 5,\n\t\t\t\t\t\t\t\tPeriodSeconds: 10,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\tLivenessProbe: &v1.Probe{\n\t\t\t\t\t\t\tHandler: v1.Handler{\n\t\t\t\t\t\t\t\t\tHTTPGet: &v1.HTTPGetAction{\n\t\t\t\t\t\t\t\t\t\tPath: \"/\",\n\t\t\t\t\t\t\t\t\t\tPort: intstr.FromInt(32400),\t\n\t\t\t\t\t\t\t\t\t\tScheme:\tv1.URISchemeHTTP,\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tInitialDelaySeconds: 15,\n\t\t\t\t\t\t\t\tPeriodSeconds: 20,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t}},\n\t\t\t\t\tInitContainers: []v1.Container{{\n\t\t\t\t\t\tName: \"init\",\n\t\t\t\t\t\tResources: v1.ResourceRequirements{\n\t\t\t\t\t\t\tRequests: v1.ResourceList{\n\t\t\t\t\t\t\t\t\"cpu\":\t*resource.NewMilliQuantity(250, resource.BinarySI),\n\t\t\t\t\t\t\t\t\"memory\": *resource.NewMilliQuantity(64, resource.BinarySI),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tLimits: v1.ResourceList{\n\t\t\t\t\t\t\t\t\"cpu\":\t*resource.NewMilliQuantity(500, resource.BinarySI),\n\t\t\t\t\t\t\t\t\"memory\": *resource.NewMilliQuantity(128, resource.BinarySI),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tImage: \"busybox:1.29\",\n\t\t\t\t\t\tImagePullPolicy: v1.PullPolicy(\"Always\"),\n\t\t\t\t\t\tCommand:\t[]string{\n\t\t\t\t\t\t\t\"/bin/sh\",\n\t\t\t\t\t\t\t\"-c\",\n\t\t\t\t\t\t\t\"mkdir -p /config/Library/Application Support/Plex Media Server && cp /etc/plex/Preferences.xml /config/Library/Application Support/Plex Media Server\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tVolumeMounts:\t[]v1.VolumeMount{{\n\t\t\t\t\t\t\tName: \"plex-config\",\n\t\t\t\t\t\t\tMountPath: p.Spec.ConfigMountPath,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\t\n\t\t\t\t\t\t\tName: \"plex-preferences\",\n\t\t\t\t\t\t\tMountPath: \"/etc/plex\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t}},\n\t\t\t\t\tVolumes:\t[]v1.Volume{{\n\t\t\t\t\t\tName:\t\"plex-config\",\n\t\t\t\t\t\tVolumeSource:\tv1.VolumeSource{\n\t\t\t\t\t\t\t\tPersistentVolumeClaim:\t&v1.PersistentVolumeClaimVolumeSource{\n\t\t\t\t\t\t\t\t\tClaimName:\t\"plex-config\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\tName: \"plex-transcode\",\n\t\t\t\t\t\tVolumeSource: v1.VolumeSource{\n\t\t\t\t\t\t\t\tPersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{\n\t\t\t\t\t\t\t\t\tClaimName: \"plex-transcode\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\tName: \"plex-data\",\n\t\t\t\t\t\tVolumeSource:\tv1.VolumeSource{\n\t\t\t\t\t\t\t\tPersistentVolumeClaim:\t&v1.PersistentVolumeClaimVolumeSource{\n\t\t\t\t\t\t\t\t\tClaimName:\t\"plex-data\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\tName: \"plex-preferences\",\n\t\t\t\t\t\tVolumeSource:\tv1.VolumeSource{\n\t\t\t\t\t\t\t\tConfigMap:\t&v1.ConfigMapVolumeSource{\n\t\t\t\t\t\t\t\t\tLocalObjectReference:\tv1.LocalObjectReference{\n\t\t\t\t\t\t\t\t\t\tName: p.Spec.ConfigMapName,\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n},\n\t}\naddOwnerRefToObject(dep, asOwner(p))\n\treturn dep\n}", "func (kc *k8sCluster) findDeployment(c context.Context, namespace, name string) (*kates.Deployment, error) {\n\tdep := &kates.Deployment{\n\t\tTypeMeta: kates.TypeMeta{Kind: \"Deployment\"},\n\t\tObjectMeta: kates.ObjectMeta{Name: name, Namespace: namespace},\n\t}\n\tif err := kc.client.Get(c, dep, dep); err != nil {\n\t\treturn nil, err\n\t}\n\treturn dep, nil\n}", "func RappDeployment(context dtypes.RContext) (*appsv1.Deployment, error) {\n\n\tconst rappDeployment = `\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: rapp-{{ .Name }}\n labels:\n app.kubernetes.io/name: rapp\n app.kubernetes.io/instance: \"{{ .Name }}\"\n app.kubernetes.io/managed-by: MetaController\nspec:\n selector:\n matchLabels:\n app.kubernetes.io/name: rapp\n app.kubernetes.io/instance: \"{{ .Name }}\"\n replicas: {{ .Replicas }}\n template:\n metadata:\n labels:\n app.kubernetes.io/name: rapp\n app.kubernetes.io/instance: \"{{ .Name }}\"\n app.kubernetes.io/managed-by: MetaController\n spec:\n {{- with .PullSecrets }}\n imagePullSecrets:\n {{range $val := .}}\n - name: {{ $val.name }}\n {{end}}\n {{- end }} \n containers:\n - name: rapp\n image: \"{{ .Image }}\"\n imagePullPolicy: {{ .PullPolicy }}\n command:\n - /start-rapp.sh\n env:\n - name: HOST\n value: \"0.0.0.0\"\n - name: PORT\n value: \"8080\"\n{{- with .Env }}\n{{ toYaml . | indent 10 }}\n{{- end }}\n ports:\n - name: http\n containerPort: 8080\n volumeMounts:\n - mountPath: /start-rapp.sh\n subPath: start-rapp.sh\n name: rapp-script\n - mountPath: /var/tmp\n readOnly: false\n name: localdir\n{{- with .VolumeMounts }}\n{{ toYaml . | indent 8 }}\n{{- end }}\n readinessProbe:\n httpGet:\n path: /\n port: 8080\n initialDelaySeconds: 10\n timeoutSeconds: 10\n periodSeconds: 20\n failureThreshold: 3\n volumes:\n - configMap:\n name: rapp-configs-{{ .Name }}\n defaultMode: 0777\n name: rapp-script\n - hostPath:\n path: /var/tmp\n type: DirectoryOrCreate\n name: localdir\n\n{{- with .Volumes }}\n{{ toYaml . | indent 6 }}\n{{- end }}\n{{- with .NodeSelector }}\n nodeSelector:\n{{ toYaml . | indent 8 }}\n{{- end }}\n{{- with .Affinity }}\n affinity:\n{{ toYaml . | indent 8 }}\n{{- end }}\n{{- with .Tolerations }}\n tolerations:\n{{ toYaml . | indent 8 }}\n{{- end }}\n\n`\n\tif context.Daemon {\n\t\tlog.Infof(\"Adding Daemon affinity rules\")\n\t\tif context.Affinity == nil {\n\t\t\tlog.Debugf(\"context.Affinity.podAntiAffinity does not exist\")\n\t\t\tcontext.Affinity = map[string]interface{}{}\n\t\t}\n\t\tif _, ok := context.Affinity.(map[string]interface{})[\"podAntiAffinity\"]; !ok {\n\t\t\tlog.Debugf(\"context.Affinity.podAntiAffinity does not exist\")\n\t\t\tcontext.Affinity.(map[string]interface{})[\"podAntiAffinity\"] = map[string]interface{}{}\n\t\t}\n\t\tcAp := context.Affinity.(map[string]interface{})[\"podAntiAffinity\"]\n\n\t\tif _, ok := cAp.(map[string]interface{})[\"requiredDuringSchedulingIgnoredDuringExecution\"]; !ok {\n\t\t\tlog.Debugf(\"context.Affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution does not exist\")\n\t\t\tcAp.(map[string]interface{})[\"requiredDuringSchedulingIgnoredDuringExecution\"] = []interface{}{}\n\t\t}\n\t\tcAp.(map[string]interface{})[\"requiredDuringSchedulingIgnoredDuringExecution\"] =\n\t\t\tappend(cAp.(map[string]interface{})[\"requiredDuringSchedulingIgnoredDuringExecution\"].([]interface{}),\n\t\t\t\tmap[string]interface{}{\n\t\t\t\t\t\"labelSelector\": map[string][]map[string]interface{}{\n\t\t\t\t\t\t\"matchExpressions\": []map[string]interface{}{\n\t\t\t\t\t\t\tmap[string]interface{}{\n\t\t\t\t\t\t\t\t\"key\": \"app.kubernetes.io/instance\",\n\t\t\t\t\t\t\t\t\"operator\": \"In\",\n\t\t\t\t\t\t\t\t\"values\": []string{context.Name}}}},\n\t\t\t\t\t\"topologyKey\": \"kubernetes.io/hostname\"})\n\t}\n\n\tresult, err := utils.ApplyTemplate(rappDeployment, context)\n\tif err != nil {\n\t\tlog.Debugf(\"ApplyTemplate Error: %+v\\n\", err)\n\t\treturn nil, err\n\t}\n\n\tdeployment := &appsv1.Deployment{}\n\tif err := json.Unmarshal([]byte(result), deployment); err != nil {\n\t\treturn nil, err\n\t}\n\treturn deployment, err\n}", "func (qjrDeployment *QueueJobResDeployment) getDeploymentTemplate(qjobRes *arbv1.XQueueJobResource) (*apps.Deployment, error) {\n\tdeploymentGVK := schema.GroupVersion{Group: apps.GroupName, Version: \"v1beta1\"}.WithKind(\"Deployment\")\n\tobj, _, err := qjrDeployment.jsonSerializer.Decode(qjobRes.Template.Raw, &deploymentGVK, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdeployment, ok := obj.(*apps.Deployment)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"Queuejob resource not defined as a Deployment\")\n\t}\n\n\treturn deployment, nil\n\n}", "func (c clientDeploymentInterface) GetDeployment(ctx apirequest.Context, name string, options *metav1.GetOptions) (*kapi.ReplicationController, error) {\n\topts := metav1.GetOptions{}\n\tif options != nil {\n\t\topts = *options\n\t}\n\treturn c.KubeClient.Core().ReplicationControllers(apirequest.NamespaceValue(ctx)).Get(name, opts)\n}", "func (pr *policyRecommendationComponent) deployment() *appsv1.Deployment {\n\tenvs := []corev1.EnvVar{\n\t\t{\n\t\t\tName: \"LOG_LEVEL\",\n\t\t\tValue: \"Info\",\n\t\t},\n\t\t{\n\t\t\tName: \"MULTI_CLUSTER_FORWARDING_CA\",\n\t\t\tValue: pr.cfg.TrustedBundle.MountPath(),\n\t\t},\n\t\t{\n\t\t\tName: \"LINSEED_URL\",\n\t\t\tValue: relasticsearch.LinseedEndpoint(pr.SupportedOSType(), pr.cfg.ClusterDomain),\n\t\t},\n\t\t{\n\t\t\tName: \"LINSEED_CA\",\n\t\t\tValue: pr.cfg.TrustedBundle.MountPath(),\n\t\t},\n\t\t{\n\t\t\tName: \"LINSEED_CLIENT_CERT\",\n\t\t\tValue: pr.cfg.PolicyRecommendationCertSecret.VolumeMountCertificateFilePath(),\n\t\t},\n\t\t{\n\t\t\tName: \"LINSEED_CLIENT_KEY\",\n\t\t\tValue: pr.cfg.PolicyRecommendationCertSecret.VolumeMountKeyFilePath(),\n\t\t},\n\t\t{\n\t\t\tName: \"LINSEED_TOKEN\",\n\t\t\tValue: GetLinseedTokenPath(false),\n\t\t},\n\t}\n\n\tvolumeMounts := pr.cfg.TrustedBundle.VolumeMounts(pr.SupportedOSType())\n\tvolumeMounts = append(volumeMounts, pr.cfg.PolicyRecommendationCertSecret.VolumeMount(pr.SupportedOSType()))\n\n\tcontrollerContainer := corev1.Container{\n\t\tName: \"policy-recommendation-controller\",\n\t\tImage: pr.image,\n\t\tImagePullPolicy: ImagePullPolicy(),\n\t\tEnv: envs,\n\t\tSecurityContext: securitycontext.NewNonRootContext(),\n\t\tVolumeMounts: volumeMounts,\n\t}\n\n\tvolumes := []corev1.Volume{\n\t\tpr.cfg.TrustedBundle.Volume(),\n\t\tpr.cfg.PolicyRecommendationCertSecret.Volume(),\n\t}\n\tvar initContainers []corev1.Container\n\tif pr.cfg.PolicyRecommendationCertSecret != nil && pr.cfg.PolicyRecommendationCertSecret.UseCertificateManagement() {\n\t\tinitContainers = append(initContainers, pr.cfg.PolicyRecommendationCertSecret.InitContainer(PolicyRecommendationNamespace))\n\t}\n\n\tcontainer := relasticsearch.ContainerDecorateIndexCreator(\n\t\trelasticsearch.ContainerDecorate(\n\t\t\tcontrollerContainer,\n\t\t\tpr.cfg.ESClusterConfig.ClusterName(),\n\t\t\tElasticsearchPolicyRecommendationUserSecret,\n\t\t\tpr.cfg.ClusterDomain,\n\t\t\trmeta.OSTypeLinux,\n\t\t),\n\t\tpr.cfg.ESClusterConfig.Replicas(),\n\t\tpr.cfg.ESClusterConfig.Shards())\n\n\tpodTemplateSpec := relasticsearch.DecorateAnnotations(&corev1.PodTemplateSpec{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: PolicyRecommendationName,\n\t\t\tNamespace: PolicyRecommendationNamespace,\n\t\t\tAnnotations: pr.policyRecommendationAnnotations(),\n\t\t},\n\t\tSpec: corev1.PodSpec{\n\t\t\tTolerations: pr.cfg.Installation.ControlPlaneTolerations,\n\t\t\tNodeSelector: pr.cfg.Installation.ControlPlaneNodeSelector,\n\t\t\tServiceAccountName: PolicyRecommendationName,\n\t\t\tImagePullSecrets: secret.GetReferenceList(pr.cfg.PullSecrets),\n\t\t\tContainers: []corev1.Container{\n\t\t\t\tcontainer,\n\t\t\t},\n\t\t\tInitContainers: initContainers,\n\t\t\tVolumes: volumes,\n\t\t},\n\t}, pr.cfg.ESClusterConfig, pr.cfg.ESSecrets).(*corev1.PodTemplateSpec)\n\n\treturn &appsv1.Deployment{\n\t\tTypeMeta: metav1.TypeMeta{Kind: \"Deployment\", APIVersion: \"apps/v1\"},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: PolicyRecommendationName,\n\t\t\tNamespace: PolicyRecommendationNamespace,\n\t\t},\n\t\tSpec: appsv1.DeploymentSpec{\n\t\t\tReplicas: ptr.Int32ToPtr(1),\n\t\t\tTemplate: *podTemplateSpec,\n\t\t},\n\t}\n}", "func (client DeploymentsClient) Get(ctx context.Context, resourceGroupName string, serviceName string, appName string, deploymentName string) (result DeploymentResource, err error) {\n\tif tracing.IsEnabled() {\n\t\tctx = tracing.StartSpan(ctx, fqdn+\"/DeploymentsClient.Get\")\n\t\tdefer func() {\n\t\t\tsc := -1\n\t\t\tif result.Response.Response != nil {\n\t\t\t\tsc = result.Response.Response.StatusCode\n\t\t\t}\n\t\t\ttracing.EndSpan(ctx, sc, err)\n\t\t}()\n\t}\n\treq, err := client.GetPreparer(ctx, resourceGroupName, serviceName, appName, deploymentName)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"appplatform.DeploymentsClient\", \"Get\", nil, \"Failure preparing request\")\n\t\treturn\n\t}\n\n\tresp, err := client.GetSender(req)\n\tif err != nil {\n\t\tresult.Response = autorest.Response{Response: resp}\n\t\terr = autorest.NewErrorWithError(err, \"appplatform.DeploymentsClient\", \"Get\", resp, \"Failure sending request\")\n\t\treturn\n\t}\n\n\tresult, err = client.GetResponder(resp)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"appplatform.DeploymentsClient\", \"Get\", resp, \"Failure responding to request\")\n\t\treturn\n\t}\n\n\treturn\n}", "func newDeployment(name, ns string, replicas int32) *apps.Deployment {\n\treturn &apps.Deployment{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tKind: \"Deployment\",\n\t\t\tAPIVersion: \"apps/v1\",\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tNamespace: ns,\n\t\t\tName: name,\n\t\t},\n\t\tSpec: apps.DeploymentSpec{\n\t\t\tReplicas: &replicas,\n\t\t\tSelector: &metav1.LabelSelector{MatchLabels: testLabels()},\n\t\t\tStrategy: apps.DeploymentStrategy{\n\t\t\t\tType: apps.RollingUpdateDeploymentStrategyType,\n\t\t\t\tRollingUpdate: new(apps.RollingUpdateDeployment),\n\t\t\t},\n\t\t\tTemplate: v1.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: testLabels(),\n\t\t\t\t},\n\t\t\t\tSpec: v1.PodSpec{\n\t\t\t\t\tContainers: []v1.Container{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: fakeContainerName,\n\t\t\t\t\t\t\tImage: fakeImage,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}", "func CalicoDeployment(repo string) string {\n\treturn calicoCommon(repo, \"kube-controllers\")\n}", "func getPreviousDeployment(ctx context.Context, clientset kubernetes.Interface, controllerResource *piraeusv1.LinstorController, log logr.Logger) (string, error) {\n\tlog = log.WithValues(\n\t\t\"name\", controllerResource.Name,\n\t\t\"namespace\", controllerResource.Namespace,\n\t)\n\n\tmeta := getObjectMeta(controllerResource, \"%s-controller\")\n\n\tlog.V(DEBUG).Info(\"fetching existing deployment\", \"meta\", meta)\n\n\tdeployment, err := clientset.AppsV1().Deployments(meta.Namespace).Get(ctx, meta.Name, metav1.GetOptions{})\n\tif err != nil && !errors.IsNotFound(err) {\n\t\treturn \"\", fmt.Errorf(\"failed to check on existing deployment: %w\", err)\n\t}\n\n\tif deployment == nil {\n\t\tlog.V(DEBUG).Info(\"no deployment found, previous deployment unknown\")\n\n\t\treturn \"unknown\", nil\n\t}\n\n\tlog.V(DEBUG).Info(\"got deployment\")\n\n\tcontainers := deployment.Spec.Template.Spec.Containers\n\n\tfor i := range containers {\n\t\tif containers[i].Name == \"linstor-controller\" {\n\t\t\treturn containers[i].Image, nil\n\t\t}\n\t}\n\n\treturn \"\", nil\n}", "func (a *Client) GetDeployment(params *GetDeploymentParams, authInfo runtime.ClientAuthInfoWriter) (*GetDeploymentOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetDeploymentParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"get-deployment\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/deployments/{deployment_id}\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &GetDeploymentReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*GetDeploymentOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for get-deployment: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}", "func dep(name, namespace string, replicas int32, labels, annotations map[string]string) *kappsv1.Deployment {\n\trep := &replicas\n\treturn &kappsv1.Deployment{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tKind: \"Deployment\",\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: name,\n\t\t\tNamespace: namespace,\n\t\t\tLabels: labels,\n\t\t\tAnnotations: annotations,\n\t\t},\n\t\tSpec: kappsv1.DeploymentSpec{\n\t\t\tReplicas: rep,\n\t\t},\n\t}\n}", "func GetDeployment(k8s *Client, ns, name string) (*appsv1.Deployment, error) {\n\tdeploySvc := k8s.AppsV1().Deployments(ns)\n\treturn deploySvc.Get(name, metav1.GetOptions{})\n}", "func newDeployment() *appsv1.Deployment {\n\tvar replicas int32 = 1\n\treturn &appsv1.Deployment{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tNamespace: tNs,\n\t\t\tName: tName,\n\t\t\tLabels: map[string]string{\n\t\t\t\tapplicationNameLabelKey: tName,\n\t\t\t},\n\t\t\tOwnerReferences: []metav1.OwnerReference{tOwnerRef},\n\t\t},\n\t\tSpec: appsv1.DeploymentSpec{\n\t\t\tReplicas: &replicas,\n\t\t\tSelector: &metav1.LabelSelector{MatchLabels: map[string]string{applicationNameLabelKey: tName}},\n\t\t\tTemplate: corev1.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\tdashboardLabelKey: dashboardLabelValue,\n\t\t\t\t\t\teventSourceLabelKey: eventSourceLabelValue,\n\t\t\t\t\t\tapplicationNameLabelKey: tName,\n\t\t\t\t\t\tapplicationLabelKey: tName,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSpec: corev1.PodSpec{\n\t\t\t\t\tContainers: []corev1.Container{{\n\t\t\t\t\t\tImage: tImg,\n\t\t\t\t\t\tPorts: []corev1.ContainerPort{{\n\t\t\t\t\t\t\tName: portName,\n\t\t\t\t\t\t\tContainerPort: tPort,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName: metricsPortName,\n\t\t\t\t\t\t\t\tContainerPort: metricsPort,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tName: adapterContainerName,\n\t\t\t\t\t\tEnv: tEnvVars,\n\t\t\t\t\t\tReadinessProbe: &corev1.Probe{\n\t\t\t\t\t\t\tHandler: corev1.Handler{\n\t\t\t\t\t\t\t\tHTTPGet: &corev1.HTTPGetAction{\n\t\t\t\t\t\t\t\t\tPath: adapterHealthEndpoint,\n\t\t\t\t\t\t\t\t\tPort: intstr.FromInt(adapterPort),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}", "func newDeployment(deploymentName string, replicas int32, podLabels map[string]string, containerName, image string, strategyType appsv1.DeploymentStrategyType) *appsv1.Deployment {\n\tzero := int64(0)\n\treturn &appsv1.Deployment{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: deploymentName,\n\t\t\tLabels: podLabels,\n\t\t},\n\t\tSpec: appsv1.DeploymentSpec{\n\t\t\tReplicas: &replicas,\n\t\t\tSelector: &metav1.LabelSelector{MatchLabels: podLabels},\n\t\t\tStrategy: appsv1.DeploymentStrategy{\n\t\t\t\tType: strategyType,\n\t\t\t},\n\t\t\tTemplate: corev1.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: podLabels,\n\t\t\t\t},\n\t\t\t\tSpec: corev1.PodSpec{\n\t\t\t\t\tTerminationGracePeriodSeconds: &zero,\n\t\t\t\t\tContainers: []corev1.Container{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: containerName,\n\t\t\t\t\t\t\tImage: image,\n\t\t\t\t\t\t\tSecurityContext: &corev1.SecurityContext{},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}", "func (p *PodConverter) DeploymentFor(ctx context.Context, namespace string, pod *kube_core.Pod) (string, bool, error) {\n\towners := pod.GetObjectMeta().GetOwnerReferences()\n\tvar rs *kube_apps.ReplicaSet\n\tfor _, owner := range owners {\n\t\tif owner.Kind == \"ReplicaSet\" {\n\t\t\trs = &kube_apps.ReplicaSet{}\n\t\t\trsKey := kube_client.ObjectKey{Namespace: namespace, Name: owner.Name}\n\t\t\tif err := p.ReplicaSetGetter.Get(ctx, rsKey, rs); err != nil {\n\t\t\t\treturn \"\", false, err\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif rs == nil {\n\t\treturn \"\", false, nil\n\t}\n\n\trsOwners := rs.GetObjectMeta().GetOwnerReferences()\n\tfor _, owner := range rsOwners {\n\t\tif owner.Kind == \"Deployment\" {\n\t\t\treturn owner.Name, true, nil\n\t\t}\n\t}\n\n\treturn \"\", false, nil\n}", "func (c *Controller) GetOwnerDeployment(ctx context.Context, ns, name string) (*appsv1.Deployment, error) {\n\tpod, err := c.kubeclientset.CoreV1().Pods(ns).Get(ctx, name, metav1.GetOptions{})\n\tif err != nil {\n\t\tklog.Errorf(\"failed to get pod: %v\", err)\n\t\treturn nil, err\n\t}\n\n\townerRS := findOwner(pod.OwnerReferences, \"ReplicaSet\")\n\tif ownerRS == nil {\n\t\treturn nil, fmt.Errorf(\"failed to get OwnerReferences in Pod %s/%s\", pod.Namespace, pod.Name)\n\t}\n\n\trs, err := c.kubeclientset.AppsV1().ReplicaSets(ns).Get(ctx, ownerRS.Name, metav1.GetOptions{})\n\tif err != nil {\n\t\tklog.Errorf(\"failed to get replicaset: %v\", err)\n\t\treturn nil, err\n\t}\n\n\townerDeploy := findOwner(rs.OwnerReferences, \"Deployment\")\n\tif ownerDeploy == nil {\n\t\treturn nil, fmt.Errorf(\"failed to get OwnerReferences in ReplicaSet %s/%s\", rs.Namespace, rs.Name)\n\t}\n\n\tdeploy, err := c.kubeclientset.AppsV1().Deployments(ns).Get(ctx, ownerDeploy.Name, metav1.GetOptions{})\n\tif err != nil {\n\t\tklog.Errorf(\"failed to get deployment: %v\", err)\n\t\treturn nil, err\n\t}\n\n\treturn deploy, nil\n}", "func (r *InstanceReconciler) deploymentForInstance(m *terraformv1alpha1.Instance) *appsv1.Deployment {\n\tls := labelsForInstance(m.Name)\n\treplicas := m.Spec.Size\n\n\tdep := &appsv1.Deployment{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: m.Name,\n\t\t\tNamespace: m.Namespace,\n\t\t},\n\t\tSpec: appsv1.DeploymentSpec{\n\t\t\tReplicas: &replicas,\n\t\t\tSelector: &metav1.LabelSelector{\n\t\t\t\tMatchLabels: ls,\n\t\t\t},\n\t\t\tTemplate: corev1.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: ls,\n\t\t\t\t},\n\t\t\t\tSpec: corev1.PodSpec{\n\t\t\t\t\tContainers: []corev1.Container{{\n\t\t\t\t\t\tImage: \"memcached:1.4.36-alpine\",\n\t\t\t\t\t\tName: \"memcached\",\n\t\t\t\t\t\tCommand: []string{\"memcached\", \"-m=64\", \"-o\", \"modern\", \"-v\"},\n\t\t\t\t\t\tPorts: []corev1.ContainerPort{{\n\t\t\t\t\t\t\tContainerPort: 11211,\n\t\t\t\t\t\t\tName: \"memcached\",\n\t\t\t\t\t\t}},\n\t\t\t\t\t}},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\t// Set Provider instance as the owner and controller\n\tctrl.SetControllerReference(m, dep, r.Scheme)\n\treturn dep\n}", "func (a GetSLITriggeredAdapter) GetDeployment() string {\n\treturn a.event.Deployment\n}", "func (o NetworkEndpointGroupServerlessDeploymentOutput) Resource() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v NetworkEndpointGroupServerlessDeployment) *string { return v.Resource }).(pulumi.StringPtrOutput)\n}", "func (a ProblemAdapter) GetDeployment() string {\n\treturn \"\"\n}", "func createDeployment(k *kabanerov1alpha1.Kabanero, clientset *kubernetes.Clientset, c client.Client, name string, image string, env []corev1.EnvVar, envFrom []corev1.EnvFromSource, livenessProbe *corev1.Probe, reqLogger logr.Logger) error {\n\tcl := clientset.AppsV1().Deployments(k.ObjectMeta.Namespace)\n\n\t// Check if the Deployment resource already exists.\n\tdInstance := &appsv1.Deployment{}\n\terr := c.Get(context.Background(), types.NamespacedName{\n\t\tName: name,\n\t\tNamespace: k.ObjectMeta.Namespace}, dInstance)\n\n\tdeploymentExists := true\n\tif err != nil {\n\t\tif apierrors.IsNotFound(err) == false {\n\t\t\treturn err\n\t\t}\n\n\t\t// The deployment does not already exist. Create one.\n\t\tdeploymentExists = false\n\n\t\t// Gather Kabanero operator ownerReference information.\n\t\townerRef, err := getOwnerReference(k, c, reqLogger)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Initialize the deployment\n\t\tvar repCount int32 = 1\n\t\tdInstance = &appsv1.Deployment{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: name,\n\t\t\t\tOwnerReferences: []metav1.OwnerReference{\n\t\t\t\t\t{\n\t\t\t\t\t\tAPIVersion: ownerRef.APIVersion,\n\t\t\t\t\t\tKind: ownerRef.Kind,\n\t\t\t\t\t\tName: ownerRef.Name,\n\t\t\t\t\t\tUID: ownerRef.UID,\n\t\t\t\t\t\tController: ownerRef.Controller,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tSpec: appsv1.DeploymentSpec{\n\t\t\t\tReplicas: &repCount,\n\t\t\t\tSelector: &metav1.LabelSelector{\n\t\t\t\t\tMatchLabels: map[string]string{\n\t\t\t\t\t\t\"app\": name,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tTemplate: corev1.PodTemplateSpec{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\t\t\"app\": name,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tSpec: corev1.PodSpec{\n\t\t\t\t\t\tServiceAccountName: name,\n\t\t\t\t\t\tContainers: []corev1.Container{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName: name,\n\t\t\t\t\t\t\t\tImagePullPolicy: \"Always\",\n\t\t\t\t\t\t\t\tPorts: []corev1.ContainerPort{\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\tContainerPort: 9443,\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t}\n\n\t// Here we update the things that can change. In the future we could\n\t// consider re-applying all the fields in case someone hand-edited the\n\t// deployment object in an incompatible way.\n\tdInstance.Spec.Template.Spec.Containers[0].Env = env\n\tdInstance.Spec.Template.Spec.Containers[0].EnvFrom = envFrom\n\tdInstance.Spec.Template.Spec.Containers[0].Image = image\n\tdInstance.Spec.Template.Spec.Containers[0].LivenessProbe = livenessProbe\n\n\tif deploymentExists == false {\n\t\treqLogger.Info(fmt.Sprintf(\"createDeployment: Deployment for create: %v\", dInstance))\n\n\t\t_, err = cl.Create(dInstance)\n\t} else {\n\t\treqLogger.Info(fmt.Sprintf(\"createDeployment: Deployment for update: %v\", dInstance))\n\n\t\t_, err = cl.Update(dInstance)\n\t}\n\n\treturn err\n}", "func (o LookupOccurrenceResultOutput) Deployment() DeploymentResponseOutput {\n\treturn o.ApplyT(func(v LookupOccurrenceResult) DeploymentResponse { return v.Deployment }).(DeploymentResponseOutput)\n}", "func getDeploymentPatch(podTemplate *corev1.PodTemplateSpec, annotations map[string]string) (types.PatchType, []byte, error) {\n\t// Create a patch of the Deployment that replaces spec.template\n\tpatch, err := json.Marshal([]interface{}{\n\t\tmap[string]interface{}{\n\t\t\t\"op\": \"replace\",\n\t\t\t\"path\": \"/spec/template\",\n\t\t\t\"value\": podTemplate,\n\t\t},\n\t\tmap[string]interface{}{\n\t\t\t\"op\": \"replace\",\n\t\t\t\"path\": \"/metadata/annotations\",\n\t\t\t\"value\": annotations,\n\t\t},\n\t})\n\treturn types.JSONPatchType, patch, err\n}", "func toDeployment(s latest.ServiceConfig, objectMeta metav1.ObjectMeta, podTemplate apiv1.PodTemplateSpec, labelSelector map[string]string, original appsv1.Deployment) *appsv1.Deployment {\n\trevisionHistoryLimit := int32(3)\n\tdep := original.DeepCopy()\n\tdep.ObjectMeta = objectMeta\n\tdep.Spec.Replicas = toReplicas(s.Deploy.Replicas)\n\tdep.Spec.RevisionHistoryLimit = &revisionHistoryLimit\n\tdep.Spec.Template = forceRestartPolicy(podTemplate, apiv1.RestartPolicyAlways)\n\tdep.Spec.Strategy = toDeploymentStrategy(s, original.Spec.Strategy)\n\tdep.Spec.Selector = &metav1.LabelSelector{\n\t\tMatchLabels: labelSelector,\n\t}\n\treturn dep\n}", "func generateDeployment(image string) clusterv1.MachineDeployment {\n\tmachineLabels := map[string]string{\"name\": image}\n\treturn clusterv1.MachineDeployment{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: image,\n\t\t\tAnnotations: make(map[string]string),\n\t\t},\n\t\tSpec: clusterv1.MachineDeploymentSpec{\n\t\t\tReplicas: pointer.Int32(3),\n\t\t\tSelector: metav1.LabelSelector{MatchLabels: machineLabels},\n\t\t\tTemplate: clusterv1.MachineTemplateSpec{\n\t\t\t\tObjectMeta: clusterv1.ObjectMeta{\n\t\t\t\t\tLabels: machineLabels,\n\t\t\t\t},\n\t\t\t\tSpec: clusterv1.MachineSpec{\n\t\t\t\t\tNodeDrainTimeout: &metav1.Duration{Duration: 10 * time.Second},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}", "func (r *Reconciler) getChannelDeployment(secret *corev1.Secret) (*appsv1.Deployment, error) {\n\n\t// Get The Channel Deployment Name (One Channel Deployment Per Kafka Auth Secret)\n\tdeploymentName := util.ChannelDnsSafeName(secret.Name)\n\n\t// Get The Channel Deployment By Namespace / Name\n\tdeployment, err := r.deploymentLister.Deployments(commonconstants.KnativeEventingNamespace).Get(deploymentName)\n\n\t// Return The Results\n\treturn deployment, err\n}", "func (c *Container) Deployment() (*deploy.Deployment, error) {\n\tmeta := kube.ObjectMeta{\n\t\tGenerateName: c.name(),\n\t}\n\n\treturn deployWithPod(meta, c)\n}", "func SimpleDeployment(image, tag string) *apiv1b2.Deployment {\n\tname := deploymentName\n\timgtag := fmt.Sprintf(\"%s:%s\", image, tag)\n\n\treturn &apiv1b2.Deployment{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: name,\n\t\t\tLabels: map[string]string{\"app\": name, \"tier\": \"api\"},\n\t\t},\n\t\tSpec: apiv1b2.DeploymentSpec{\n\t\t\tSelector: &metav1.LabelSelector{\n\t\t\t\tMatchLabels: map[string]string{\"app\": name, \"tier\": \"api\"},\n\t\t\t},\n\t\t\tTemplate: v1.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{Name: name,\n\t\t\t\t\tLabels: map[string]string{\"app\": name, \"tier\": \"api\"},\n\t\t\t\t},\n\t\t\t\tSpec: v1.PodSpec{\n\t\t\t\t\tContainers: []v1.Container{\n\t\t\t\t\t\tv1.Container{\n\t\t\t\t\t\t\tName: name,\n\t\t\t\t\t\t\tImage: imgtag,\n\t\t\t\t\t\t\tImagePullPolicy: \"Always\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}", "func (c *Client) GetDeploymentOperator() scheduler.DeploymentOperator {\n\treturn c.Deployment\n}", "func MakeDeployment(config *deployapi.DeploymentConfig, codec runtime.Codec) (*api.ReplicationController, error) {\n\tvar err error\n\tvar encodedConfig string\n\n\tif encodedConfig, err = EncodeDeploymentConfig(config, codec); err != nil {\n\t\treturn nil, err\n\t}\n\n\tdeployment := &api.ReplicationController{\n\t\tObjectMeta: api.ObjectMeta{\n\t\t\tName: LatestDeploymentIDForConfig(config),\n\t\t\tAnnotations: map[string]string{\n\t\t\t\tdeployapi.DeploymentConfigAnnotation: config.Name,\n\t\t\t\tdeployapi.DeploymentStatusAnnotation: string(deployapi.DeploymentStatusNew),\n\t\t\t\tdeployapi.DeploymentEncodedConfigAnnotation: encodedConfig,\n\t\t\t\tdeployapi.DeploymentVersionAnnotation: strconv.Itoa(config.LatestVersion),\n\t\t\t},\n\t\t\tLabels: config.Labels,\n\t\t},\n\t\tSpec: config.Template.ControllerTemplate,\n\t}\n\n\t// The deployment should be inactive initially\n\tdeployment.Spec.Replicas = 0\n\n\t// Ensure that pods created by this deployment controller can be safely associated back\n\t// to the controller, and that multiple deployment controllers for the same config don't\n\t// manipulate each others' pods.\n\tdeployment.Spec.Template.Labels[deployapi.DeploymentConfigLabel] = config.Name\n\tdeployment.Spec.Template.Labels[deployapi.DeploymentLabel] = deployment.Name\n\tdeployment.Spec.Selector[deployapi.DeploymentConfigLabel] = config.Name\n\tdeployment.Spec.Selector[deployapi.DeploymentLabel] = deployment.Name\n\n\treturn deployment, nil\n}", "func (o NetworkEndpointGroupServerlessDeploymentPtrOutput) Resource() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *NetworkEndpointGroupServerlessDeployment) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Resource\n\t}).(pulumi.StringPtrOutput)\n}", "func getDeploymentByReplicaSet(clientset *kubernetes.Clientset, namespace, name string) (*Owner, error) {\n\trs, err := clientset.AppsV1().ReplicaSets(namespace).Get(name, meta_v1.GetOptions{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\towner := &Owner{}\n\tfor _, ref := range rs.OwnerReferences {\n\t\tif strings.EqualFold(ref.Kind, \"Deployment\") {\n\t\t\towner.Kind = ref.Kind\n\t\t\towner.Name = ref.Name\n\t\t\towner.ApiVersion = ref.APIVersion\n\t\t\towner.Controller = ref.Controller\n\t\t\treturn owner, nil\n\t\t}\n\t}\n\n\treturn owner, fmt.Errorf(\"ReplicaSet %s/%s has no controller of deployment\", namespace, name)\n}", "func createDeployment(cwg ClientWg, name string) {\n\tgo func() {\n\t\tdeploymentsClient := cwg.clientset.AppsV1().Deployments(metav1.NamespaceDefault)\n\t\tfound := checkNodeAlreadyPresent(deploymentsClient, name)\n\t\tif found {\n\t\t\tdefer cwg.wg.Done()\n\t\t\treturn\t\t\t\n\t\t}\n\t\tdeployment := &appv1.Deployment{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: name,\n\t\t\t},\n\t\t\tSpec: appv1.DeploymentSpec{\n\t\t\t\tReplicas: int32Ptr(1),\n\t\t\t\tSelector: &metav1.LabelSelector{\n\t\t\t\t\tMatchLabels: map[string]string{\n\t\t\t\t\t\t\"app\": \"demo\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tTemplate: corev1.PodTemplateSpec{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\t\t\"app\": \"demo\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tSpec: corev1.PodSpec{\n\t\t\t\t\t\tContainers: []corev1.Container{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName: \"web\",\n\t\t\t\t\t\t\t\tImage: \"nginx:1.12\",\n\t\t\t\t\t\t\t\tPorts: []corev1.ContainerPort{\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\tName: \"http\",\n\t\t\t\t\t\t\t\t\t\tProtocol: corev1.ProtocolTCP,\n\t\t\t\t\t\t\t\t\t\tContainerPort: 80,\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\t// Create Deployment\n\t\tfmt.Println(\"Creating deployment...\")\n\t\tresult, err := deploymentsClient.Create(deployment)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tfmt.Printf(\"Created deployment %q.\\n\", result.GetObjectMeta().GetName())\n\t\tdefer cwg.wg.Done()\n\t}()\n}", "func (c *Client) GetDeployment(cluster, service, arn string) (*ecs.Deployment, error) {\n\tinput := &ecs.DescribeServicesInput{\n\t\tCluster: &cluster,\n\t\tServices: []*string{&service},\n\t}\n\toutput, err := c.svc.DescribeServices(input)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tds := output.Services[0].Deployments\n\tfor _, d := range ds {\n\t\tif *d.TaskDefinition == arn {\n\t\t\treturn d, nil\n\t\t}\n\t}\n\treturn nil, nil\n}", "func (d *Deployment) Get(namespace, name string) (*appsv1.Deployment, error) {\n\tdeploy, err := d.cs.AppsV1().Deployments(namespace).Get(context.Background(), name, metav1.GetOptions{})\n\tif err != nil {\n\t\tif k8serrors.IsNotFound(err) {\n\t\t\treturn nil, k8serror.ErrNotFound\n\t\t}\n\t\treturn nil, err\n\t}\n\n\treturn deploy, nil\n}", "func (c *ECClient) GetDeployment(id string) (resp *http.Response, err error) {\n\tlog.Printf(\"[DEBUG] GetDeployment ID: %s\\n\", id)\n\n\tresourceURL := c.BaseURL + deploymentResource + \"/\" + id\n\tauthString := \"ApiKey \" + c.Key\n\tlog.Printf(\"[DEBUG] GetDeployment Resource URL: %s\\n\", resourceURL)\n\treq, err := http.NewRequest(\"GET\", resourceURL, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq.Header.Set(\"Content-Type\", jsonContentType)\n\treq.Header.Set(\"Authorization\", authString)\n\n\tresp, err = c.HTTPClient.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlog.Printf(\"[DEBUG] GetDeployment response: %v\\n\", resp)\n\n\tif resp.StatusCode != 200 && resp.StatusCode != 404 {\n\t\trespBytes, _ := ioutil.ReadAll(resp.Body)\n\t\treturn nil, fmt.Errorf(\"%q: deployment could not be retrieved: %v\", id, string(respBytes))\n\t}\n\n\treturn resp, nil\n\n}", "func newDeploymentForCR(cr *tpokkiv1alpha1.GatlingTask, cm *corev1.ConfigMap) *appsv1.Deployment {\n\tlabels := map[string]string{\n\t\t\"app\": \"gatling\",\n\t\t\"gatling_cr\": cr.Name,\n\t}\n\n\tvolumeName := \"configmap-simulations\"\n\t// location must be /input, see https://github.com/tpokki/gatling-image\n\tvolumePath := \"/input\"\n\n\treturn &appsv1.Deployment{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: cr.Name,\n\t\t\tNamespace: cr.Namespace,\n\t\t},\n\t\tSpec: appsv1.DeploymentSpec{\n\t\t\tReplicas: &cr.Spec.Replicas,\n\t\t\tSelector: &metav1.LabelSelector{\n\t\t\t\tMatchLabels: labels,\n\t\t\t},\n\t\t\tTemplate: corev1.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: labels,\n\t\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\t\t\"prometheus.io/scrape\": \"true\",\n\t\t\t\t\t\t\"prometheus.io/port\": \"9102\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSpec: corev1.PodSpec{\n\t\t\t\t\tVolumes: []corev1.Volume{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: volumeName,\n\t\t\t\t\t\t\tVolumeSource: corev1.VolumeSource{\n\t\t\t\t\t\t\t\tConfigMap: &corev1.ConfigMapVolumeSource{\n\t\t\t\t\t\t\t\t\tLocalObjectReference: corev1.LocalObjectReference{\n\t\t\t\t\t\t\t\t\t\tName: cm.ObjectMeta.Name,\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tRestartPolicy: cr.Spec.RestartPolicy,\n\t\t\t\t\tContainers: []corev1.Container{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"gatling\",\n\t\t\t\t\t\t\tImage: \"quay.io/tpokki/gatling:0.0.1-3.3.1-prometheus\",\n\t\t\t\t\t\t\tArgs: []string{\"-nr\", \"-s\", cr.Spec.ScenarioSpec.Name},\n\t\t\t\t\t\t\tResources: cr.Spec.ResourceRequirements,\n\t\t\t\t\t\t\tVolumeMounts: []corev1.VolumeMount{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName: volumeName,\n\t\t\t\t\t\t\t\t\tMountPath: volumePath,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}", "func (client *Client) GetDeployment(id int64, req *Request) (*Response, error) {\n\treturn client.Execute(&Request{\n\t\tMethod: \"GET\",\n\t\tPath: fmt.Sprintf(\"%s/%d\", DeploymentsPath, id),\n\t\tQueryParams: req.QueryParams,\n\t\tResult: &GetDeploymentResult{},\n\t})\n}", "func ScaleDeployment(ctx context.Context, c client.Client, key client.ObjectKey, replicas int32) error {\n\tdeployment := &appsv1.Deployment{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: key.Name,\n\t\t\tNamespace: key.Namespace,\n\t\t},\n\t}\n\n\treturn scaleResource(ctx, c, deployment, replicas)\n}", "func (r *AppReconciler) deploymentForApp(app *cloudv1alpha1.App) (*appsv1.Deployment, error) {\n\tprojectName := AppProjectName(app)\n\tlabels := LabelsForApp(projectName, app.Name)\n\treplicas := app.Spec.Replicas\n\n\tdep := &appsv1.Deployment{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: app.Name,\n\t\t\tNamespace: app.Namespace,\n\t\t\tLabels: labels,\n\t\t},\n\t\tSpec: appsv1.DeploymentSpec{\n\t\t\tReplicas: &replicas,\n\t\t\tSelector: &metav1.LabelSelector{\n\t\t\t\tMatchLabels: labels,\n\t\t\t},\n\t\t\tTemplate: corev1.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: labels,\n\t\t\t\t},\n\t\t\t\tSpec: corev1.PodSpec{\n\t\t\t\t\tContainers: []corev1.Container{},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tcontainers := []corev1.Container{}\n\tfor _, c := range app.Spec.Containers {\n\t\tcontainer := corev1.Container{\n\t\t\tImage: c.Image,\n\t\t\tName: c.Name,\n\t\t\tCommand: c.Command,\n\t\t\tPorts: []corev1.ContainerPort{},\n\t\t}\n\n\t\tfor _, p := range c.Ports {\n\t\t\tcontainer.Ports = append(container.Ports, corev1.ContainerPort{\n\t\t\t\tProtocol: p.Protocol,\n\t\t\t\tContainerPort: p.Number,\n\t\t\t})\n\t\t}\n\n\t\tcontainers = append(containers, container)\n\t}\n\tdep.Spec.Template.Spec.Containers = containers\n\n\t// Set app instance as the owner and controller\n\terr := ctrl.SetControllerReference(app, dep, r.Scheme)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn dep, nil\n}", "func (c *ClientSetClient) GetDeployment(namespace, name string) (*appsv1.Deployment, error) {\n\tctx := context.TODO()\n\treturn c.clientset.AppsV1().Deployments(namespace).Get(ctx, name, metav1.GetOptions{})\n}", "func (s *AppsServiceOp) GetDeployment(ctx context.Context, appID, deploymentID string) (*Deployment, *Response, error) {\n\tpath := fmt.Sprintf(\"%s/%s/deployments/%s\", appsBasePath, appID, deploymentID)\n\treq, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\troot := new(deploymentRoot)\n\tresp, err := s.client.Do(ctx, req, root)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\treturn root.Deployment, resp, nil\n}", "func (d *DeploymentEvent) GetDeployment() *Deployment {\n\tif d == nil {\n\t\treturn nil\n\t}\n\treturn d.Deployment\n}", "func currentDeployment(pds *int32, replicas, statusReplicas, updatedReplicas, availableReplicas int32, conditions []extensions.DeploymentCondition) *extensions.Deployment {\n\td := &extensions.Deployment{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: \"progress-test\",\n\t\t},\n\t\tSpec: extensions.DeploymentSpec{\n\t\t\tProgressDeadlineSeconds: pds,\n\t\t\tReplicas: &replicas,\n\t\t\tStrategy: extensions.DeploymentStrategy{\n\t\t\t\tType: extensions.RecreateDeploymentStrategyType,\n\t\t\t},\n\t\t},\n\t\tStatus: newDeploymentStatus(statusReplicas, updatedReplicas, availableReplicas),\n\t}\n\td.Status.Conditions = conditions\n\treturn d\n}", "func (c *KubeClient) GetDeployment(namespace, deploymentName string) (*v1.Deployment, error) {\n\n\tdeploymentDetails, err := c.Client.AppsV1().Deployments(namespace).Get(deploymentName, metav1.GetOptions{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn deploymentDetails, nil\n}", "func (d *DeploymentStatusEvent) GetDeployment() *Deployment {\n\tif d == nil {\n\t\treturn nil\n\t}\n\treturn d.Deployment\n}", "func (r *ReconcileWebApp) deploymentForWebApp(m *appv1alpha1.WebApp) *appsv1.Deployment {\n\tls := labelsForWebApp(m.Name)\n\treplicas := m.Spec.Replicas\n\timage := m.Spec.Image\n\tport := m.Spec.Port\n\n\tdep := &appsv1.Deployment{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: m.Name,\n\t\t\tNamespace: m.Namespace,\n\t\t},\n\t\tSpec: appsv1.DeploymentSpec{\n\t\t\tReplicas: &replicas,\n\t\t\tSelector: &metav1.LabelSelector{\n\t\t\t\tMatchLabels: ls,\n\t\t\t},\n\t\t\tTemplate: corev1.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: ls,\n\t\t\t\t},\n\t\t\t\tSpec: corev1.PodSpec{\n\t\t\t\t\tContainers: []corev1.Container{{\n\t\t\t\t\t\tImage: image,\n\t\t\t\t\t\tName: \"webapp\",\n\t\t\t\t\t\t//Command: []string{\"WebApp\", \"-m=64\", \"-o\", \"modern\", \"-v\"},\n\t\t\t\t\t\tPorts: []corev1.ContainerPort{{\n\t\t\t\t\t\t\tContainerPort: port,\n\t\t\t\t\t\t\tName: \"webapp\",\n\t\t\t\t\t\t}},\n\t\t\t\t\t}},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\t// Set WebApp instance as the owner of the Deployment.\n\tcontrollerutil.SetControllerReference(m, dep, r.scheme)\n\treturn dep\n}", "func (r *Reconciler) getChannelDeployment(channel *knativekafkav1alpha1.KafkaChannel) (*appsv1.Deployment, error) {\n\n\t// Get The Channel Deployment Name (One Channel Deployment Per Kafka Auth Secret)\n\tdeploymentName := util.ChannelDeploymentDnsSafeName(r.kafkaSecretName(channel))\n\n\t// Get The Channel Deployment By Namespace / Name\n\tdeployment := &appsv1.Deployment{}\n\tdeployment, err := r.deploymentLister.Deployments(constants.KnativeEventingNamespace).Get(deploymentName)\n\n\t// Return The Results\n\treturn deployment, err\n}", "func newDeployment(apployment *appscodev1alpha1.Apployment) *appsv1.Deployment {\n\tlabels := map[string]string{\n\t\t\"app\": \"Appscode\",\n\t\t\"controller\": apployment.Name,\n\t}\n\treturn &appsv1.Deployment{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: apployment.Spec.ApploymentName,\n\t\t\tNamespace: apployment.Namespace,\n\t\t\tOwnerReferences: []metav1.OwnerReference{\n\t\t\t\t*metav1.NewControllerRef(apployment, appscodev1alpha1.SchemeGroupVersion.WithKind(\"Apployment\")),\n\t\t\t},\n\t\t},\n\t\tSpec: appsv1.DeploymentSpec{\n\t\t\tReplicas: apployment.Spec.Replicas,\n\t\t\tSelector: &metav1.LabelSelector{\n\t\t\t\tMatchLabels: labels,\n\t\t\t},\n\t\t\tTemplate: corev1.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: labels,\n\t\t\t\t},\n\t\t\t\tSpec: corev1.PodSpec{\n\t\t\t\t\tContainers: []corev1.Container{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: apployment.Name,\n\t\t\t\t\t\t\tImage: apployment.Spec.Image,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}", "func ReconcileDeployment(reqLogger logr.Logger, c client.Client, deployment *appsv1.Deployment, wait bool) error {\n\tif err := SetCreationSpecAnnotation(&deployment.ObjectMeta, deployment); err != nil {\n\t\treturn err\n\t}\n\n\tfoundDeployment := &appsv1.Deployment{}\n\tif err := c.Get(context.TODO(), types.NamespacedName{Name: deployment.Name, Namespace: deployment.Namespace}, foundDeployment); err != nil {\n\t\t// Return API error\n\t\tif client.IgnoreNotFound(err) != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Create the deployment\n\t\treqLogger.Info(\"Creating new deployment\", \"Deployment.Name\", deployment.Name, \"Deployment.Namespace\", deployment.Namespace)\n\t\tif err := c.Create(context.TODO(), deployment); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif wait {\n\t\t\treturn errors.NewRequeueError(\"Created new deployment with wait, requeing for status check\", 3)\n\t\t}\n\t\treturn nil\n\t}\n\n\t// Check the found deployment spec\n\tif !CreationSpecsEqual(deployment.ObjectMeta, foundDeployment.ObjectMeta) {\n\t\t// We need to update the deployment\n\t\treqLogger.Info(\"Deployment annotation spec has changed, updating\", \"Deployment.Name\", deployment.Name, \"Deployment.Namespace\", deployment.Namespace)\n\t\tfoundDeployment.Spec = deployment.Spec\n\t\tif err := c.Update(context.TODO(), foundDeployment); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif wait {\n\t\trunningDeploy := &appsv1.Deployment{}\n\t\tif err := c.Get(context.TODO(), types.NamespacedName{Name: deployment.Name, Namespace: deployment.Namespace}, runningDeploy); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif runningDeploy.Status.ReadyReplicas != *deployment.Spec.Replicas {\n\t\t\treturn errors.NewRequeueError(fmt.Sprintf(\"Waiting for %s to be ready\", deployment.Name), 3)\n\t\t}\n\t}\n\n\treturn nil\n}", "func (pm *PipelineManager) newDeployment(pipeline *api.Pipeline) *appsv1.Deployment {\n\tlbls := pipeLabels(pipeline)\n\n\tdeployment := &appsv1.Deployment{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: pipeline.Name,\n\t\t\tNamespace: pipeline.Namespace,\n\t\t\tOwnerReferences: []metav1.OwnerReference{\n\t\t\t\t*metav1.NewControllerRef(pipeline, api.SchemeGroupVersion.WithKind(api.PipelineResourceKind)),\n\t\t\t},\n\t\t\tLabels: lbls,\n\t\t},\n\t\tSpec: appsv1.DeploymentSpec{\n\t\t\tSelector: &metav1.LabelSelector{\n\t\t\t\tMatchLabels: lbls,\n\t\t\t},\n\t\t\tStrategy: appsv1.DeploymentStrategy{\n\t\t\t\tType: appsv1.RecreateDeploymentStrategyType,\n\t\t\t},\n\t\t\tMinReadySeconds: 10,\n\t\t\tTemplate: corev1.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: lbls,\n\t\t\t\t},\n\t\t\t\tSpec: corev1.PodSpec{\n\t\t\t\t\tRestartPolicy: corev1.RestartPolicyAlways,\n\t\t\t\t\tVolumes: []corev1.Volume{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"config\",\n\t\t\t\t\t\t\tVolumeSource: corev1.VolumeSource{\n\t\t\t\t\t\t\t\tConfigMap: &corev1.ConfigMapVolumeSource{\n\t\t\t\t\t\t\t\t\tLocalObjectReference: corev1.LocalObjectReference{Name: pipeline.Name},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tContainers: []corev1.Container{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"gravity\",\n\t\t\t\t\t\t\tImage: pipeline.Spec.Image,\n\t\t\t\t\t\t\tCommand: pipeline.Spec.Command,\n\t\t\t\t\t\t\tPorts: []corev1.ContainerPort{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName: \"http\",\n\t\t\t\t\t\t\t\t\tContainerPort: containerPort,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tLivenessProbe: &corev1.Probe{\n\t\t\t\t\t\t\t\tHandler: corev1.Handler{\n\t\t\t\t\t\t\t\t\tHTTPGet: &corev1.HTTPGetAction{\n\t\t\t\t\t\t\t\t\t\tPort: intstr.FromString(\"http\"),\n\t\t\t\t\t\t\t\t\t\tPath: \"/healthz\",\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tInitialDelaySeconds: 10,\n\t\t\t\t\t\t\t\tTimeoutSeconds: 5,\n\t\t\t\t\t\t\t\tPeriodSeconds: 10,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tVolumeMounts: []corev1.VolumeMount{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName: \"config\",\n\t\t\t\t\t\t\t\t\tMountPath: \"/etc/gravity\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tResources: corev1.ResourceRequirements{ //TODO from tps config or metrics\n\t\t\t\t\t\t\t\tRequests: corev1.ResourceList{\n\t\t\t\t\t\t\t\t\t\"cpu\": resource.MustParse(\"100m\"),\n\t\t\t\t\t\t\t\t\t\"memory\": resource.MustParse(\"150M\"),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tif pipeline.Spec.Paused {\n\t\tdeployment.Spec.Replicas = int32Ptr(0)\n\t} else {\n\t\tdeployment.Spec.Replicas = int32Ptr(1)\n\t}\n\treturn deployment\n}", "func (n *NodeClient) Get(twin, deployment uint32) (dl gridtypes.Deployment, err error) {\n\turl := n.url(\"deployment\", fmt.Sprint(twin), fmt.Sprint(deployment))\n\n\trequest, err := http.NewRequest(http.MethodGet, url, nil)\n\tif err != nil {\n\t\treturn dl, errors.Wrap(err, \"failed to build request\")\n\t}\n\n\tif err := n.client.authorize(request); err != nil {\n\t\treturn dl, errors.Wrap(err, \"failed to sign request\")\n\t}\n\n\tresponse, err := http.DefaultClient.Do(request)\n\tif err != nil {\n\t\treturn dl, err\n\t}\n\n\tif err := n.response(response, &dl, http.StatusOK); err != nil {\n\t\treturn dl, err\n\t}\n\n\treturn dl, nil\n}", "func (r *Reconciler) newChannelDeployment(secret *corev1.Secret) (*appsv1.Deployment, error) {\n\n\t// Get The Channel Deployment Name (One Channel Deployment Per Kafka Auth Secret)\n\tdeploymentName := util.ChannelDnsSafeName(secret.Name)\n\n\t// Replicas Int Value For De-Referencing\n\treplicas := int32(r.config.Channel.Replicas)\n\n\t// Create The Channel Container Environment Variables\n\tchannelEnvVars, err := r.channelDeploymentEnvVars(secret)\n\tif err != nil {\n\t\tr.logger.Error(\"Failed To Create Channel Deployment Environment Variables\", zap.Error(err))\n\t\treturn nil, err\n\t}\n\n\t// Create & Return The Channel's Deployment\n\tdeployment := &appsv1.Deployment{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tAPIVersion: appsv1.SchemeGroupVersion.String(),\n\t\t\tKind: constants.DeploymentKind,\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: deploymentName,\n\t\t\tNamespace: commonconstants.KnativeEventingNamespace,\n\t\t\tLabels: map[string]string{\n\t\t\t\tconstants.AppLabel: deploymentName, // Matches Service Selector Key/Value Below\n\t\t\t\tconstants.KafkaChannelChannelLabel: \"true\", // Allows for identification of KafkaChannels\n\t\t\t},\n\t\t\tOwnerReferences: []metav1.OwnerReference{\n\t\t\t\tutil.NewSecretOwnerReference(secret),\n\t\t\t},\n\t\t},\n\t\tSpec: appsv1.DeploymentSpec{\n\t\t\tReplicas: &replicas,\n\t\t\tSelector: &metav1.LabelSelector{\n\t\t\t\tMatchLabels: map[string]string{\n\t\t\t\t\tconstants.AppLabel: deploymentName, // Matches Template ObjectMeta Pods\n\t\t\t\t},\n\t\t\t},\n\t\t\tTemplate: corev1.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\tconstants.AppLabel: deploymentName, // Matched By Deployment Selector Above\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSpec: corev1.PodSpec{\n\t\t\t\t\tServiceAccountName: r.environment.ServiceAccount,\n\t\t\t\t\tContainers: []corev1.Container{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: deploymentName,\n\t\t\t\t\t\t\tLivenessProbe: &corev1.Probe{\n\t\t\t\t\t\t\t\tHandler: corev1.Handler{\n\t\t\t\t\t\t\t\t\tHTTPGet: &corev1.HTTPGetAction{\n\t\t\t\t\t\t\t\t\t\tPort: intstr.FromInt(constants.HealthPort),\n\t\t\t\t\t\t\t\t\t\tPath: health.LivenessPath,\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tInitialDelaySeconds: constants.ChannelLivenessDelay,\n\t\t\t\t\t\t\t\tPeriodSeconds: constants.ChannelLivenessPeriod,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tReadinessProbe: &corev1.Probe{\n\t\t\t\t\t\t\t\tHandler: corev1.Handler{\n\t\t\t\t\t\t\t\t\tHTTPGet: &corev1.HTTPGetAction{\n\t\t\t\t\t\t\t\t\t\tPort: intstr.FromInt(constants.HealthPort),\n\t\t\t\t\t\t\t\t\t\tPath: health.ReadinessPath,\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tInitialDelaySeconds: constants.ChannelReadinessDelay,\n\t\t\t\t\t\t\t\tPeriodSeconds: constants.ChannelReadinessPeriod,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tImage: r.environment.ChannelImage,\n\t\t\t\t\t\t\tPorts: []corev1.ContainerPort{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName: \"server\",\n\t\t\t\t\t\t\t\t\tContainerPort: int32(constants.HttpContainerPortNumber),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tEnv: channelEnvVars,\n\t\t\t\t\t\t\tImagePullPolicy: corev1.PullIfNotPresent,\n\t\t\t\t\t\t\tResources: corev1.ResourceRequirements{\n\t\t\t\t\t\t\t\tRequests: corev1.ResourceList{\n\t\t\t\t\t\t\t\t\tcorev1.ResourceCPU: r.config.Channel.CpuRequest,\n\t\t\t\t\t\t\t\t\tcorev1.ResourceMemory: r.config.Channel.MemoryRequest,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tLimits: corev1.ResourceList{\n\t\t\t\t\t\t\t\t\tcorev1.ResourceCPU: r.config.Channel.CpuLimit,\n\t\t\t\t\t\t\t\t\tcorev1.ResourceMemory: r.config.Channel.MemoryLimit,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\t// Return Channel Deployment\n\treturn deployment, nil\n}", "func newDeploymentForCR(cr *v1.Restaurant, cmVersion string) *appsv1.Deployment {\n\treplicas := cr.Spec.Deployment.Replicas\n\tif replicas == 0 {\n\t\treplicas = 1\n\t}\n\tprobe := &corev1.Probe{\n\t\tHandler: corev1.Handler{\n\t\t\tHTTPGet: &corev1.HTTPGetAction{\n\t\t\t\tPath: \"/health\",\n\t\t\t\tPort: intstr.FromInt(8080),\n\t\t\t},\n\t\t},\n\t}\n\tmaxSurge := intstr.FromInt(1)\n\tmaxUnavailable := intstr.FromInt(0)\n\treturn &appsv1.Deployment{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: cr.Name,\n\t\t\tNamespace: cr.Namespace,\n\t\t\tLabels: map[string]string{\n\t\t\t\t\"app\": cr.Name,\n\t\t\t},\n\t\t},\n\t\tSpec: appsv1.DeploymentSpec{\n\t\t\tReplicas: &replicas,\n\t\t\tStrategy: appsv1.DeploymentStrategy{\n\t\t\t\tType: appsv1.RollingUpdateDeploymentStrategyType,\n\t\t\t\tRollingUpdate: &appsv1.RollingUpdateDeployment{\n\t\t\t\t\tMaxSurge: &maxSurge,\n\t\t\t\t\tMaxUnavailable: &maxUnavailable,\n\t\t\t\t},\n\t\t\t},\n\t\t\tSelector: &metav1.LabelSelector{\n\t\t\t\tMatchLabels: getLabels(cr),\n\t\t\t},\n\t\t\tTemplate: corev1.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: getLabels(cr),\n\t\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\t\t\"configMapResourceVersion\": cmVersion,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSpec: corev1.PodSpec{\n\t\t\t\t\tContainers: []corev1.Container{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"restaurant\",\n\t\t\t\t\t\t\tImage: \"quay.io/ruben/restaurant-api:latest\",\n\t\t\t\t\t\t\tCommand: []string{\"./application\", \"-Dquarkus.http.host=0.0.0.0\"},\n\t\t\t\t\t\t\tPorts: []corev1.ContainerPort{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName: \"http\",\n\t\t\t\t\t\t\t\t\tContainerPort: 8080,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tEnv: []corev1.EnvVar{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName: \"DATA_PATH\",\n\t\t\t\t\t\t\t\t\tValue: \"/data\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tLivenessProbe: probe,\n\t\t\t\t\t\t\tReadinessProbe: probe,\n\t\t\t\t\t\t\tVolumeMounts: []corev1.VolumeMount{{\n\t\t\t\t\t\t\t\tName: \"data\",\n\t\t\t\t\t\t\t\tMountPath: \"/data\",\n\t\t\t\t\t\t\t}},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tVolumes: []corev1.Volume{{\n\t\t\t\t\t\tName: \"data\",\n\t\t\t\t\t\tVolumeSource: corev1.VolumeSource{\n\t\t\t\t\t\t\tConfigMap: &corev1.ConfigMapVolumeSource{\n\t\t\t\t\t\t\t\tLocalObjectReference: corev1.LocalObjectReference{\n\t\t\t\t\t\t\t\t\tName: cr.Name,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t}},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}", "func (w *Worker) initDeployment(d types.Deployment) {\n\tw.log.Info(\"Initializing new deployment\")\n\tartifact, err := w.ciClient.GetBuildArtifactByID(d.ArtifactID)\n\tif err != nil {\n\t\tw.log.Error(\"Failed to get build artifact\", err)\n\t\treturn\n\t}\n\tmanifestVals := types.ManifestValues{\n\t\tName: d.K8SName,\n\t\tImage: artifact.Name,\n\t\tReplicas: d.Replicas,\n\t}\n\tmanifest, err := renderManifestTemplate(d.Manifest, manifestVals)\n\tif err != nil {\n\t\tw.log.Error(\"Failed to render manifest template\", err)\n\t\treturn\n\t}\n\tfmt.Println(\"Manifest:\\n\" + manifest)\n\tok, stdout := w.kubectl.CreateDeployment(manifest)\n\tif ok != true {\n\t\tfmt.Println(\"fucked up\")\n\t}\n\tfmt.Println(\"stdout: \" + stdout)\n\terr = w.recordRevision(d, stdout)\n\tif err != nil {\n\t\tw.log.Error(\"Failed to write revision to db\", err)\n\t}\n\tif ok == true {\n\t\td.IsInitialized = true\n\t\terr = w.databaseClient.SaveDeployment(&d)\n\t\tif err != nil {\n\t\t\tw.log.Error(\"Failed to update deployment db record\", err)\n\t\t}\n\t}\n}", "func DeploymentForPolicyController(instance *operatorv1.PolicyController) *appsv1.Deployment {\n\timage := instance.Spec.ImageRegistry + utils.GetImageRef(\"POLICY_CONTROLLER_TAG_OR_SHA\")\n\treplicas := instance.Spec.Replicas\n\tresources := instance.Spec.Resources\n\n\tif resources == nil {\n\t\tresources = &corev1.ResourceRequirements{\n\t\t\tLimits: map[corev1.ResourceName]resource.Quantity{\n\t\t\t\tcorev1.ResourceCPU: *cpu200,\n\t\t\t\tcorev1.ResourceMemory: *memory384},\n\t\t\tRequests: map[corev1.ResourceName]resource.Quantity{\n\t\t\t\tcorev1.ResourceCPU: *cpu100,\n\t\t\t\tcorev1.ResourceMemory: *memory128},\n\t\t}\n\t}\n\n\tiamPolicyDep := &appsv1.Deployment{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: constants.IamPolicyControllerDepName,\n\t\t\tNamespace: instance.Namespace,\n\t\t\tLabels: map[string]string{\n\t\t\t\t\"app\": \"iam-policy-controller\",\n\t\t\t},\n\t\t},\n\t\tSpec: appsv1.DeploymentSpec{\n\t\t\tReplicas: &replicas,\n\t\t\tSelector: &metav1.LabelSelector{\n\t\t\t\tMatchLabels: map[string]string{\n\t\t\t\t\t\"app\": \"iam-policy-controller\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tTemplate: corev1.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\t\"app\": \"iam-policy-controller\",\n\t\t\t\t\t\t\"app.kubernetes.io/instance\": \"iam-policy-controller\",\n\t\t\t\t\t},\n\t\t\t\t\tAnnotations: utils.AnnotationsForMetering(),\n\t\t\t\t},\n\t\t\t\tSpec: corev1.PodSpec{\n\t\t\t\t\tTerminationGracePeriodSeconds: &seconds60,\n\t\t\t\t\tServiceAccountName: serviceAccountName,\n\t\t\t\t\tHostNetwork: false,\n\t\t\t\t\tHostIPC: false,\n\t\t\t\t\tHostPID: false,\n\t\t\t\t\tAffinity: &corev1.Affinity{\n\t\t\t\t\t\tNodeAffinity: &corev1.NodeAffinity{\n\t\t\t\t\t\t\tRequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{\n\t\t\t\t\t\t\t\tNodeSelectorTerms: []corev1.NodeSelectorTerm{\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\tMatchExpressions: []corev1.NodeSelectorRequirement{\n\t\t\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\t\tKey: \"beta.kubernetes.io/arch\",\n\t\t\t\t\t\t\t\t\t\t\t\tOperator: corev1.NodeSelectorOpIn,\n\t\t\t\t\t\t\t\t\t\t\t\tValues: []string{gorun.GOARCH},\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tPodAntiAffinity: &corev1.PodAntiAffinity{\n\t\t\t\t\t\t\tPreferredDuringSchedulingIgnoredDuringExecution: []corev1.WeightedPodAffinityTerm{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tWeight: 100,\n\t\t\t\t\t\t\t\t\tPodAffinityTerm: corev1.PodAffinityTerm{\n\t\t\t\t\t\t\t\t\t\tTopologyKey: \"kubernetes.io/hostname\",\n\t\t\t\t\t\t\t\t\t\tLabelSelector: &metav1.LabelSelector{\n\t\t\t\t\t\t\t\t\t\t\tMatchExpressions: []metav1.LabelSelectorRequirement{\n\t\t\t\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\t\t\tKey: \"app\",\n\t\t\t\t\t\t\t\t\t\t\t\t\tOperator: metav1.LabelSelectorOpIn,\n\t\t\t\t\t\t\t\t\t\t\t\t\tValues: []string{\"iam-policy-controller\"},\n\t\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tTolerations: []corev1.Toleration{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tKey: \"dedicated\",\n\t\t\t\t\t\t\tOperator: corev1.TolerationOpExists,\n\t\t\t\t\t\t\tEffect: corev1.TaintEffectNoSchedule,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tKey: \"CriticalAddonsOnly\",\n\t\t\t\t\t\t\tOperator: corev1.TolerationOpExists,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tVolumes: []corev1.Volume{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"tmp\",\n\t\t\t\t\t\t\tVolumeSource: corev1.VolumeSource{\n\t\t\t\t\t\t\t\tEmptyDir: &corev1.EmptyDirVolumeSource{},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tContainers: []corev1.Container{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: constants.IamPolicyControllerDepName,\n\t\t\t\t\t\t\tImage: image,\n\t\t\t\t\t\t\tImagePullPolicy: corev1.PullAlways,\n\t\t\t\t\t\t\tVolumeMounts: []corev1.VolumeMount{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName: \"tmp\",\n\t\t\t\t\t\t\t\t\tMountPath: \"/tmp\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tArgs: []string{\"--v=0\", \"--update-frequency=60\"},\n\t\t\t\t\t\t\tLivenessProbe: &corev1.Probe{\n\t\t\t\t\t\t\t\tHandler: corev1.Handler{\n\t\t\t\t\t\t\t\t\tExec: &corev1.ExecAction{\n\t\t\t\t\t\t\t\t\t\tCommand: []string{\"sh\", \"-c\", \"pgrep iam-policy -l\"},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tInitialDelaySeconds: 30,\n\t\t\t\t\t\t\t\tTimeoutSeconds: 5,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tReadinessProbe: &corev1.Probe{\n\t\t\t\t\t\t\t\tHandler: corev1.Handler{\n\t\t\t\t\t\t\t\t\tExec: &corev1.ExecAction{\n\t\t\t\t\t\t\t\t\t\tCommand: []string{\"sh\", \"-c\", \"exec echo start iam-policy-controller\"},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tInitialDelaySeconds: 10,\n\t\t\t\t\t\t\t\tTimeoutSeconds: 2,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tSecurityContext: &corev1.SecurityContext{\n\t\t\t\t\t\t\t\tPrivileged: &falseVar,\n\t\t\t\t\t\t\t\tRunAsNonRoot: &trueVar,\n\t\t\t\t\t\t\t\tReadOnlyRootFilesystem: &trueVar,\n\t\t\t\t\t\t\t\tAllowPrivilegeEscalation: &falseVar,\n\t\t\t\t\t\t\t\tCapabilities: &corev1.Capabilities{\n\t\t\t\t\t\t\t\t\tDrop: []corev1.Capability{\"ALL\"},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tResources: *resources,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\treturn iamPolicyDep\n}", "func GetOwnerRefDeployment(deployment *appsv1.Deployment) metav1.OwnerReference {\n\tf := false\n\tt := true\n\treturn metav1.OwnerReference{\n\t\tAPIVersion: \"apps/v1\",\n\t\tKind: \"Deployment\",\n\t\tName: deployment.Name,\n\t\tUID: deployment.UID,\n\t\tController: &f,\n\t\tBlockOwnerDeletion: &t,\n\t}\n}", "func GetDeployment(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *DeploymentState, opts ...pulumi.ResourceOption) (*Deployment, error) {\n\tvar resource Deployment\n\terr := ctx.ReadResource(\"aws-native:apigatewayv2:Deployment\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (r *ReconcileDolevOp) manageDeployment(instance *dolevgroupv1alpha1.DolevOp, reqLogger logr.Logger) (*reconcile.Result, error) {\n\tdeployment := &appsv1.Deployment{}\n\terr := r.client.Get(context.TODO(), types.NamespacedName{Name: instance.Name, Namespace: instance.Namespace}, deployment)\n\tif err != nil && errors.IsNotFound(err) {\n\t\tserverDeployment, err := r.deploymentForWebServer(instance)\n\t\tif err != nil {\n\t\t\treqLogger.Error(err, \"error getting server deployment\")\n\t\t\treturn &reconcile.Result{}, err\n\t\t}\n\t\treqLogger.Info(\"Creating a new server deployment.\", \"Deployment.Namespace\", serverDeployment.Namespace, \"Deployment.Name\", serverDeployment.Name)\n\t\terr = r.client.Create(context.TODO(), serverDeployment)\n\t\tif err != nil {\n\t\t\treqLogger.Error(err, \"Failed to create new Server Deployment.\", \"Deployment.Namespace\", serverDeployment.Namespace, \"Deployment.Name\", serverDeployment.Name)\n\t\t\treturn &reconcile.Result{}, err\n\t\t}\n\t\treturn &reconcile.Result{Requeue: true}, nil\n\t} else if err != nil {\n\t\treqLogger.Error(err, \"Failed to get server deployment.\")\n\t\treturn &reconcile.Result{}, err\n\t}\n\treturn nil, nil\n}", "func (bc *ReconcileJenkinsInstance) newDeployment(instanceName types.NamespacedName) (*appsv1.Deployment, error) {\n\texists := false\n\n\tjenkinsInstance, err := bc.getJenkinsInstance(instanceName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Get the deployment with the name specified in JenkinsInstance.spec\n\tdeployment, err := bc.getDeployment(instanceName)\n\n\t// If the resource doesn't exist, we'll create it\n\tif err != nil {\n\t\tif !errors.IsNotFound(err) {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\t// If the Deployment is not controlled by this JenkinsInstance resource, we should log\n\t\t// a warning to the event recorder and return\n\t\tif !metav1.IsControlledBy(deployment, jenkinsInstance) {\n\t\t\tmsg := fmt.Sprintf(MessageResourceExists, deployment.GetName())\n\t\t\tbc.Event(jenkinsInstance, corev1.EventTypeWarning, ErrResourceExists, msg)\n\t\t\treturn deployment, fmt.Errorf(msg)\n\t\t}\n\n\t\texists = true\n\t}\n\n\tlabels := map[string]string{\n\t\t\"app\": \"jenkinsci\",\n\t\t\"controller\": jenkinsInstance.GetName(),\n\t\t\"component\": string(jenkinsInstance.UID),\n\t}\n\n\t// get binary data for variables and groovy config\n\tjenkinsJvmEnv, err := configdata.Asset(\"environment/jenkins-jvm-environment\")\n\tif err != nil {\n\t\tglog.Errorf(\"Error locating binary asset: %s\", err)\n\t\treturn nil, err\n\t}\n\n\t// Create environment variables\n\t// variables out of jenkins-jvm-environment\n\tvar env []corev1.EnvVar\n\tscanner := bufio.NewScanner(strings.NewReader(string(jenkinsJvmEnv[:])))\n\tfor scanner.Scan() {\n\n\t\tenvComponents := strings.Split(scanner.Text(), \":\")\n\t\tenv = append(env, corev1.EnvVar{\n\t\t\tName: envComponents[0],\n\t\t\tValue: envComponents[1],\n\t\t})\n\t}\n\n\t// user-specified environment variables\n\tfor envVar, envVarVal := range jenkinsInstance.Spec.Env {\n\t\tenv = append(env, corev1.EnvVar{\n\t\t\tName: envVar,\n\t\t\tValue: envVarVal,\n\t\t})\n\t}\n\n\t// build a command string to install plugins and launch jenkins\n\tcommandString := \"\"\n\tcommandString += \"/usr/local/bin/install-plugins.sh $(cat /var/jenkins_home/init.groovy.d/plugins.txt | tr '\\\\n' ' ') && \"\n\tcommandString += \"/sbin/tini -- /usr/local/bin/jenkins.sh\"\n\tcommandString += \"\"\n\n\t// if service account name is specified, check that it exists\n\tif jenkinsInstance.Spec.ServiceAccount != \"\" {\n\t\tserviceAccount := &corev1.ServiceAccount{}\n\t\terr := bc.Client.Get(\n\t\t\tcontext.TODO(),\n\t\t\ttypes.NamespacedName{\n\t\t\t\tNamespace: jenkinsInstance.GetNamespace(),\n\t\t\t\tName: jenkinsInstance.Spec.ServiceAccount},\n\t\t\tserviceAccount)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// Get the correct volume source to use\n\t// if pvc name is specified, try to either locate it or create it\n\tpvcName := jenkinsInstance.GetName()\n\tif jenkinsInstance.Spec.Storage != nil {\n\t\tif jenkinsInstance.Spec.Storage.JobsPvc != \"\" {\n\t\t\tpvcName = jenkinsInstance.Spec.Storage.JobsPvc\n\t\t}\n\t\tpvc := &corev1.PersistentVolumeClaim{}\n\t\terr = bc.Client.Get(\n\t\t\tcontext.TODO(),\n\t\t\ttypes.NamespacedName{\n\t\t\t\tNamespace: jenkinsInstance.GetNamespace(),\n\t\t\t\tName: pvcName},\n\t\t\tpvc)\n\n\t\t// if PVC is not found\n\t\tif errors.IsNotFound(err) {\n\t\t\t// error out if pvc spec is not specified\n\t\t\tif jenkinsInstance.Spec.Storage.JobsPvcSpec == nil {\n\t\t\t\treturn nil, fmt.Errorf(\n\t\t\t\t\t\"PVC %s does not exist and JobsPvcSpec is not specified\",\n\t\t\t\t\tpvcName)\n\t\t\t}\n\n\t\t\t// otherwise create the pvc\n\t\t\tpvc = &corev1.PersistentVolumeClaim{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName: pvcName,\n\t\t\t\t\tNamespace: jenkinsInstance.GetNamespace(),\n\t\t\t\t},\n\t\t\t\tSpec: *jenkinsInstance.Spec.Storage.JobsPvcSpec,\n\t\t\t}\n\t\t\terr = controllerutil.SetControllerReference(jenkinsInstance, pvc, bc.scheme)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\terr = bc.Client.Create(context.TODO(), pvc)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t}\n\n\t// if PVC name is not specified, use an EmptyDir\n\tvar volumeSource corev1.VolumeSource\n\tif jenkinsInstance.Spec.Storage == nil {\n\t\tvolumeSource = corev1.VolumeSource{\n\t\t\tEmptyDir: &corev1.EmptyDirVolumeSource{},\n\t\t}\n\t} else {\n\t\tvolumeSource = corev1.VolumeSource{\n\t\t\tPersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{\n\t\t\t\tClaimName: pvcName,\n\t\t\t\tReadOnly: false,\n\t\t\t},\n\t\t}\n\t}\n\n\tvar replicas int32 = JenkinsReplicas\n\tvar runAsUser int64 = 0\n\n\tif exists {\n\t\tdeploymentCopy := deployment.DeepCopy()\n\t\tdeploymentCopy.Annotations = jenkinsInstance.Spec.Annotations\n\t\tdeploymentCopy.Spec.Replicas = &replicas\n\t\tdeploymentCopy.Spec.Selector = &metav1.LabelSelector{\n\t\t\tMatchLabels: labels,\n\t\t}\n\t\tdeploymentCopy.Spec.Template.Spec.Containers = []corev1.Container{\n\t\t\t{\n\t\t\t\tName: \"jenkinsci\",\n\t\t\t\tImage: jenkinsInstance.Spec.Image,\n\t\t\t\tPorts: []corev1.ContainerPort{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"master\",\n\t\t\t\t\t\tContainerPort: JenkinsMasterPort,\n\t\t\t\t\t\tHostPort: JenkinsMasterPort,\n\t\t\t\t\t\tProtocol: \"TCP\",\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"agent\",\n\t\t\t\t\t\tContainerPort: JenkinsAgentPort,\n\t\t\t\t\t\tHostPort: JenkinsAgentPort,\n\t\t\t\t\t\tProtocol: \"TCP\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tEnv: env,\n\t\t\t\tCommand: []string{\n\t\t\t\t\t\"bash\",\n\t\t\t\t\t\"-c\",\n\t\t\t\t\tcommandString,\n\t\t\t\t},\n\t\t\t\tImagePullPolicy: JenkinsPullPolicy,\n\t\t\t\tVolumeMounts: []corev1.VolumeMount{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"init-groovy-d\",\n\t\t\t\t\t\tReadOnly: false,\n\t\t\t\t\t\tMountPath: \"/var/jenkins_home/init.groovy.d\",\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"job-storage\",\n\t\t\t\t\t\tReadOnly: false,\n\t\t\t\t\t\tMountPath: \"/var/jenkins_home/jobs\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\tdeploymentCopy.Spec.Template.Spec.Volumes = []corev1.Volume{\n\t\t\t{\n\t\t\t\tName: \"init-groovy-d\",\n\t\t\t\tVolumeSource: corev1.VolumeSource{\n\t\t\t\t\tSecret: &corev1.SecretVolumeSource{\n\t\t\t\t\t\tSecretName: jenkinsInstance.GetName(),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"job-storage\",\n\t\t\t\tVolumeSource: volumeSource,\n\t\t\t},\n\t\t}\n\t\tdeploymentCopy.Spec.Template.Spec.ServiceAccountName = jenkinsInstance.Spec.ServiceAccount\n\n\t\tchanged := reflect.DeepEqual(deploymentCopy.Annotations, deployment.Annotations) &&\n\t\t\treflect.DeepEqual(deploymentCopy.Spec.Selector, deployment.Spec.Selector) &&\n\t\t\treflect.DeepEqual(deploymentCopy.Spec.Template.Spec.Containers, deployment.Spec.Template.Spec.Containers) &&\n\t\t\treflect.DeepEqual(deploymentCopy.Spec.Template.Spec.Volumes, deployment.Spec.Template.Spec.Volumes) &&\n\t\t\t(deploymentCopy.Spec.Replicas == deployment.Spec.Replicas) &&\n\t\t\t(deploymentCopy.Spec.Template.Spec.ServiceAccountName == deployment.Spec.Template.Spec.ServiceAccountName)\n\n\t\tif !changed {\n\t\t\treturn deployment, nil\n\t\t}\n\n\t\tglog.Info(\"updating deployment\")\n\t\terr = bc.Client.Update(context.TODO(), deploymentCopy)\n\t\treturn deploymentCopy, err\n\n\t} else {\n\t\tdeployment = &appsv1.Deployment{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: jenkinsInstance.GetName(),\n\t\t\t\tNamespace: jenkinsInstance.GetNamespace(),\n\t\t\t\tAnnotations: jenkinsInstance.Spec.Annotations,\n\t\t\t},\n\t\t\tSpec: appsv1.DeploymentSpec{\n\t\t\t\tReplicas: &replicas,\n\t\t\t\tSelector: &metav1.LabelSelector{\n\t\t\t\t\tMatchLabels: labels,\n\t\t\t\t},\n\t\t\t\tTemplate: corev1.PodTemplateSpec{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\tLabels: labels,\n\t\t\t\t\t},\n\t\t\t\t\tSpec: corev1.PodSpec{\n\t\t\t\t\t\tSecurityContext: &corev1.PodSecurityContext{\n\t\t\t\t\t\t\tRunAsUser: &runAsUser,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tContainers: []corev1.Container{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName: \"jenkinsci\",\n\t\t\t\t\t\t\t\tImage: jenkinsInstance.Spec.Image,\n\t\t\t\t\t\t\t\tPorts: []corev1.ContainerPort{\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\tName: \"master\",\n\t\t\t\t\t\t\t\t\t\tContainerPort: JenkinsMasterPort,\n\t\t\t\t\t\t\t\t\t\tHostPort: JenkinsMasterPort,\n\t\t\t\t\t\t\t\t\t\tProtocol: \"TCP\",\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\tName: \"agent\",\n\t\t\t\t\t\t\t\t\t\tContainerPort: JenkinsAgentPort,\n\t\t\t\t\t\t\t\t\t\tHostPort: JenkinsAgentPort,\n\t\t\t\t\t\t\t\t\t\tProtocol: \"TCP\",\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tEnv: env,\n\t\t\t\t\t\t\t\tCommand: []string{\n\t\t\t\t\t\t\t\t\t\"bash\",\n\t\t\t\t\t\t\t\t\t\"-c\",\n\t\t\t\t\t\t\t\t\tcommandString,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tImagePullPolicy: JenkinsPullPolicy,\n\t\t\t\t\t\t\t\tVolumeMounts: []corev1.VolumeMount{\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\tName: \"init-groovy-d\",\n\t\t\t\t\t\t\t\t\t\tReadOnly: false,\n\t\t\t\t\t\t\t\t\t\tMountPath: \"/var/jenkins_home/init.groovy.d\",\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\tName: \"job-storage\",\n\t\t\t\t\t\t\t\t\t\tReadOnly: false,\n\t\t\t\t\t\t\t\t\t\tMountPath: \"/var/jenkins_home/jobs\",\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\n\t\t\t\t\t\tVolumes: []corev1.Volume{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName: \"init-groovy-d\",\n\t\t\t\t\t\t\t\tVolumeSource: corev1.VolumeSource{\n\t\t\t\t\t\t\t\t\tSecret: &corev1.SecretVolumeSource{\n\t\t\t\t\t\t\t\t\t\tSecretName: jenkinsInstance.GetName(),\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName: \"job-storage\",\n\t\t\t\t\t\t\t\tVolumeSource: volumeSource,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\t// assign service account\n\t\tdeployment.Spec.Template.Spec.ServiceAccountName = jenkinsInstance.Spec.ServiceAccount\n\n\t\terr = controllerutil.SetControllerReference(jenkinsInstance, deployment, bc.scheme)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\terr = bc.Client.Create(context.TODO(), deployment)\n\t\treturn deployment, err\n\t}\n}", "func (in *IstioClient) GetDeployment(namespace, deploymentName string) (*v1beta1.Deployment, error) {\n\treturn in.k8s.AppsV1beta1().Deployments(namespace).Get(deploymentName, emptyGetOptions)\n}", "func (s *githubService) Deployment(env, branch, name string) (Template, error) {\n\tfileContent, _, _, err := s.getContents(env, branch, \"deployments/\"+name+\".yaml\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif fileContent.DownloadURL == nil {\n\t\treturn \"\", fmt.Errorf(\"no download url in github file response\")\n\t}\n\n\tresp, err := http.Get(*fileContent.DownloadURL)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer resp.Body.Close()\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn Template(body), nil\n}", "func Transform(deployment *appsv1.Deployment) *appsv1.Deployment {\n\treturn &appsv1.Deployment{\n\t\tObjectMeta: metadata.TransformObjectMeta(deployment.ObjectMeta),\n\t\tSpec: appsv1.DeploymentSpec{\n\t\t\tReplicas: deployment.Spec.Replicas,\n\t\t},\n\t\tStatus: appsv1.DeploymentStatus{\n\t\t\tAvailableReplicas: deployment.Status.AvailableReplicas,\n\t\t},\n\t}\n}", "func GetDeploymentByFile(file string) Kind {\n\tk := new(Deployment)\n\n\tdata := utils.ReadFileToMap(file)\n\n\to := utils.ToMap(data)\n\tmetadata := utils.ToMap(o[\"metadata\"])\n\tspec := utils.ToMap(o[\"spec\"])\n\tstatus := utils.ToMap(o[\"status\"])\n\n\tk.Name = metadata[\"name\"].(string)\n\tk.UID = metadata[\"uid\"].(string)\n\tk.Namespace = metadata[\"namespace\"].(string)\n\tk.CreationTimestamp = utils.ToTime(metadata[\"creationTimestamp\"].(string))\n\n\tk.SelfLink = metadata[\"selfLink\"].(string)\n\tk.Labels = GetLabels(metadata[\"labels\"])\n\tk.Annotations = GetAnnotations(metadata[\"annotations\"])\n\n\tif v, ok := spec[\"selector\"]; ok {\n\t\tif matchLabels, ok := utils.ToMap(v)[\"matchLabels\"]; ok {\n\t\t\tk.Selectors = GetLabels(matchLabels)\n\t\t}\n\t}\n\n\tif v, ok := spec[\"replicas\"]; ok {\n\t\tk.Replicas = utils.FloatToString(v.(float64))\n\t}\n\tif v, ok := status[\"availableReplicas\"]; ok {\n\t\tk.AvailableReplicas = utils.FloatToString(v.(float64))\n\t}\n\tif v, ok := status[\"readyReplicas\"]; ok {\n\t\tk.ReadyReplicas = utils.FloatToString(v.(float64))\n\t}\n\tif v, ok := status[\"updatedReplicas\"]; ok {\n\t\tk.UpdatedReplicas = utils.FloatToString(v.(float64))\n\t}\n\n\tk.Conditions = GetDeploymentConditions(status[\"conditions\"].([]interface{}))\n\n\tk.FilePath = file\n\treturn k\n}", "func getDeploymentInfo(pvc v1.PersistentVolumeClaim) (string, string, bool) {\n\tdeploymentName := pvc.GetLabels()[k8sutil.LabelKeyArangoDeployment]\n\trole := pvc.GetLabels()[k8sutil.LabelKeyRole]\n\tenforceAntiAffinity, _ := strconv.ParseBool(pvc.GetAnnotations()[constants.AnnotationEnforceAntiAffinity]) // If annotation empty, this will yield false.\n\treturn deploymentName, role, enforceAntiAffinity\n}", "func (s *Fs) get(path string) (gridtypes.Deployment, error) {\n\tvar wl gridtypes.Deployment\n\tfile, err := os.Open(path)\n\tif os.IsNotExist(err) {\n\t\treturn wl, errors.Wrapf(provision.ErrDeploymentNotExists, \"deployment '%s' does not exist\", path)\n\t} else if err != nil {\n\t\treturn wl, errors.Wrap(err, \"failed to open workload file\")\n\t}\n\tdefer file.Close()\n\treader, err := versioned.NewReader(file)\n\tif err != nil {\n\t\treturn wl, errors.Wrap(err, \"failed to load workload\")\n\t}\n\tversion := reader.Version()\n\tif !version.EQ(deploymentSchemaV1) {\n\t\treturn wl, fmt.Errorf(\"invalid workload version\")\n\t}\n\n\tif err := json.NewDecoder(reader).Decode(&wl); err != nil {\n\t\treturn wl, errors.Wrap(err, \"failed to read workload data\")\n\t}\n\n\treturn wl, nil\n}", "func MakeDeployment(serviceInstance *v1alpha1.ServiceInstance, cfg *config.Config) (*appsv1.Deployment, error) {\n\tif cfg == nil {\n\t\treturn nil, errors.New(\"the Kf defaults configmap couldn't be found\")\n\t}\n\tconfigDefaults, err := cfg.Defaults()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif configDefaults.RouteServiceProxyImage == \"\" {\n\t\treturn nil, errors.New(\"config value for RouteServiceProxyImage couldn't be found\")\n\t}\n\n\treturn &appsv1.Deployment{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: DeploymentName(serviceInstance),\n\t\t\tNamespace: serviceInstance.Namespace,\n\t\t\tOwnerReferences: []metav1.OwnerReference{\n\t\t\t\t*kmeta.NewControllerRef(serviceInstance),\n\t\t\t},\n\t\t\tLabels: v1alpha1.UnionMaps(serviceInstance.GetLabels()),\n\t\t},\n\t\tSpec: appsv1.DeploymentSpec{\n\t\t\tSelector: metav1.SetAsLabelSelector(labels.Set(PodLabels(serviceInstance))),\n\t\t\tTemplate: corev1.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: v1alpha1.UnionMaps(\n\t\t\t\t\t\tPodLabels(serviceInstance),\n\n\t\t\t\t\t\t// Insert a label for isolating apps with their own NetworkPolicies.\n\t\t\t\t\t\tmap[string]string{\n\t\t\t\t\t\t\tv1alpha1.NetworkPolicyLabel: v1alpha1.NetworkPolicyApp,\n\t\t\t\t\t\t}),\n\t\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\t\t\"sidecar.istio.io/inject\": \"true\",\n\t\t\t\t\t\t\"traffic.sidecar.istio.io/includeOutboundIPRanges\": \"*\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSpec: makePodSpec(*serviceInstance, configDefaults),\n\t\t\t},\n\t\t\tRevisionHistoryLimit: ptr.Int32(revisionHistoryLimit),\n\t\t\tReplicas: ptr.Int32(replicas),\n\t\t\tStrategy: appsv1.DeploymentStrategy{\n\t\t\t\tType: appsv1.RollingUpdateDeploymentStrategyType,\n\t\t\t\tRollingUpdate: &appsv1.RollingUpdateDeployment{\n\t\t\t\t\tMaxUnavailable: &defaultMaxUnavailable,\n\t\t\t\t\tMaxSurge: &defaultMaxSurge,\n\t\t\t\t},\n\t\t\t},\n\t\t\tProgressDeadlineSeconds: ptr.Int32(600),\n\t\t},\n\t}, nil\n}", "func (r *Reconciler) newChannelDeployment(channel *knativekafkav1alpha1.KafkaChannel) (*appsv1.Deployment, error) {\n\n\t// Get The Channel Deployment Name (One Channel Deployment Per Kafka Auth Secret)\n\tdeploymentName := util.ChannelDeploymentDnsSafeName(r.kafkaSecretName(channel))\n\n\t// Replicas Int Value For De-Referencing\n\treplicas := int32(r.environment.ChannelReplicas)\n\n\t// Create The Channel Container Environment Variables\n\tchannelEnvVars, err := r.channelDeploymentEnvVars(channel)\n\tif err != nil {\n\t\tr.Logger.Error(\"Failed To Create Channel Deployment Environment Variables\", zap.Error(err))\n\t\treturn nil, err\n\t}\n\n\t// Create & Return The Channel's Deployment\n\tdeployment := &appsv1.Deployment{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tAPIVersion: appsv1.SchemeGroupVersion.String(),\n\t\t\tKind: constants.DeploymentKind,\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: deploymentName,\n\t\t\tNamespace: constants.KnativeEventingNamespace,\n\t\t\tLabels: map[string]string{\n\t\t\t\tAppLabel: deploymentName, // Matches Service Selector Key/Value Below\n\t\t\t\tKafkaChannelChannelLabel: \"true\", // Allows for identification of KafkaChannels\n\t\t\t},\n\t\t},\n\t\tSpec: appsv1.DeploymentSpec{\n\t\t\tReplicas: &replicas,\n\t\t\tSelector: &metav1.LabelSelector{\n\t\t\t\tMatchLabels: map[string]string{\n\t\t\t\t\tAppLabel: deploymentName, // Matches Template ObjectMeta Pods\n\t\t\t\t},\n\t\t\t},\n\t\t\tTemplate: corev1.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\tAppLabel: deploymentName, // Matched By Deployment Selector Above\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSpec: corev1.PodSpec{\n\t\t\t\t\tServiceAccountName: r.environment.ServiceAccount,\n\t\t\t\t\tContainers: []corev1.Container{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: deploymentName,\n\t\t\t\t\t\t\tLivenessProbe: &corev1.Probe{\n\t\t\t\t\t\t\t\tHandler: corev1.Handler{\n\t\t\t\t\t\t\t\t\tHTTPGet: &corev1.HTTPGetAction{\n\t\t\t\t\t\t\t\t\t\tPort: intstr.FromInt(constants.HealthPort),\n\t\t\t\t\t\t\t\t\t\tPath: health.LivenessPath,\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tInitialDelaySeconds: constants.ChannelLivenessDelay,\n\t\t\t\t\t\t\t\tPeriodSeconds: constants.ChannelLivenessPeriod,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tReadinessProbe: &corev1.Probe{\n\t\t\t\t\t\t\t\tHandler: corev1.Handler{\n\t\t\t\t\t\t\t\t\tHTTPGet: &corev1.HTTPGetAction{\n\t\t\t\t\t\t\t\t\t\tPort: intstr.FromInt(constants.HealthPort),\n\t\t\t\t\t\t\t\t\t\tPath: health.ReadinessPath,\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tInitialDelaySeconds: constants.ChannelReadinessDelay,\n\t\t\t\t\t\t\t\tPeriodSeconds: constants.ChannelReadinessPeriod,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tImage: r.environment.ChannelImage,\n\t\t\t\t\t\t\tPorts: []corev1.ContainerPort{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName: \"server\",\n\t\t\t\t\t\t\t\t\tContainerPort: int32(constants.HttpContainerPortNumber),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tEnv: channelEnvVars,\n\t\t\t\t\t\t\tImagePullPolicy: corev1.PullAlways,\n\t\t\t\t\t\t\tVolumeMounts: []corev1.VolumeMount{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName: constants.LoggingConfigVolumeName,\n\t\t\t\t\t\t\t\t\tMountPath: constants.LoggingConfigMountPath,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tResources: corev1.ResourceRequirements{\n\t\t\t\t\t\t\t\tRequests: corev1.ResourceList{\n\t\t\t\t\t\t\t\t\tcorev1.ResourceCPU: r.environment.ChannelCpuRequest,\n\t\t\t\t\t\t\t\t\tcorev1.ResourceMemory: r.environment.ChannelMemoryRequest,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tLimits: corev1.ResourceList{\n\t\t\t\t\t\t\t\t\tcorev1.ResourceCPU: r.environment.ChannelCpuLimit,\n\t\t\t\t\t\t\t\t\tcorev1.ResourceMemory: r.environment.ChannelMemoryLimit,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tVolumes: []corev1.Volume{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: constants.LoggingConfigVolumeName,\n\t\t\t\t\t\t\tVolumeSource: corev1.VolumeSource{\n\t\t\t\t\t\t\t\tConfigMap: &corev1.ConfigMapVolumeSource{\n\t\t\t\t\t\t\t\t\tLocalObjectReference: corev1.LocalObjectReference{\n\t\t\t\t\t\t\t\t\t\tName: constants.LoggingConfigMapName,\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\t// Return Channel Deployment\n\treturn deployment, nil\n}", "func (m *Machine) GetBOSHDeployment(namespace string, name string) (*bdv1.BOSHDeployment, error) {\n\tclient := m.VersionedClientset.BoshdeploymentV1alpha1().BOSHDeployments(namespace)\n\td, err := client.Get(context.Background(), name, metav1.GetOptions{})\n\treturn d, err\n}", "func (k *k8sUtil) DeploymentOps() (k8sExtnsV1Beta1.DeploymentInterface, error) {\n\tvar cs *kubernetes.Clientset\n\n\tinC, err := k.InCluster()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tns, err := k.NS()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif inC {\n\t\tcs, err = k.inClusterCS()\n\t} else {\n\t\tcs, err = k.outClusterCS()\n\t}\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn cs.ExtensionsV1beta1().Deployments(ns), nil\n}", "func (r *ReconcileIntegration) deploymentForIntegration(m *integrationv1alpha1.Integration) *appsv1.Deployment {\n\tlabels := labelsForIntegration(m.Name)\n\treplicas := m.Spec.Replicas\n\n\tdeployment := &appsv1.Deployment{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tAPIVersion: \"apps/v1\",\n\t\t\tKind: \"Deployment\",\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: nameForDeployment(m),\n\t\t\tNamespace: m.Namespace,\n\t\t},\n\t\tSpec: appsv1.DeploymentSpec{\n\t\t\tReplicas: &replicas,\n\t\t\tSelector: &metav1.LabelSelector{\n\t\t\t\tMatchLabels: labels,\n\t\t\t},\n\t\t\tTemplate: corev1.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: labels,\n\t\t\t\t},\n\t\t\t\tSpec: corev1.PodSpec{\n\t\t\t\t\tContainers: []corev1.Container{{\n\t\t\t\t\t\tImage: m.Spec.Image,\n\t\t\t\t\t\tName: \"micro-integrator\",\n\t\t\t\t\t\tPorts: []corev1.ContainerPort{{\n\t\t\t\t\t\t\tContainerPort: 8290,\n\t\t\t\t\t\t}},\n\t\t\t\t\t\tEnv: m.Spec.Env,\n\t\t\t\t\t}},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\t// Set Integration instance as the owner and controller\n\tcontrollerutil.SetControllerReference(m, deployment, r.scheme)\n\treturn deployment\n}", "func (d *Deployer) Deploy(obj *unstructured.Unstructured) error {\n\tfound := &unstructured.Unstructured{}\n\tfound.SetGroupVersionKind(obj.GroupVersionKind())\n\terr := d.client.Get(context.TODO(), types.NamespacedName{Name: obj.GetName(), Namespace: obj.GetNamespace()}, found)\n\tif err != nil {\n\t\tif errors.IsNotFound(err) {\n\t\t\tlog.Info(\"Create\", \"Kind:\", obj.GroupVersionKind(), \"Name:\", obj.GetName())\n\t\t\treturn d.client.Create(context.TODO(), obj)\n\t\t}\n\t\treturn err\n\t}\n\n\t// if resource has annotation skip-creation-if-exist: true, don't update it to keep customized changes from users\n\tmetadata, ok := obj.Object[\"metadata\"].(map[string]interface{})\n\tif ok {\n\t\tannotations, ok := metadata[\"annotations\"].(map[string]interface{})\n\t\tif ok && annotations != nil && annotations[config.AnnotationSkipCreation] != nil {\n\t\t\tif strings.ToLower(annotations[config.AnnotationSkipCreation].(string)) == \"true\" {\n\t\t\t\tlog.Info(\"Skip creation\", \"Kind:\", obj.GroupVersionKind(), \"Name:\", obj.GetName())\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}\n\n\tdeployerFn, ok := d.deployerFns[found.GetKind()]\n\tif ok {\n\t\treturn deployerFn(obj, found)\n\t}\n\treturn nil\n}", "func (c *teamClient) WaitForDeployment(logger *log.Entry, resource unstructured.Unstructured, deadline time.Time) error {\n\tvar cur *apps.Deployment\n\tvar nova *apps.Deployment\n\tvar err error\n\tvar resourceVersion int\n\tvar updated bool\n\n\tlogger = logger.WithFields(log.Fields{\n\t\t\"application\": resource.GetName(),\n\t\t\"namespace\": resource.GetNamespace(),\n\t})\n\n\tcli := c.structuredClient.AppsV1().Deployments(resource.GetNamespace())\n\n\t// For Naiserator applications, rely on Naiserator set a terminal rollout status.\n\tgvk := resource.GroupVersionKind()\n\tif gvk.Kind == \"Application\" && gvk.Group == \"nais.io\" {\n\t\treturn c.waitForApplication(logger, resource, deadline)\n\t}\n\n\t// For native Kubernetes deployment objects, get the current deployment object.\n\tfor deadline.After(time.Now()) {\n\t\tcur, err = cli.Get(resource.GetName(), metav1.GetOptions{})\n\t\tif err == nil {\n\t\t\tresourceVersion, _ = strconv.Atoi(cur.GetResourceVersion())\n\t\t\tlogger.Tracef(\"Found current deployment at version %d: %s\", resourceVersion, cur.GetSelfLink())\n\t\t} else if errors.IsNotFound(err) {\n\t\t\tlogger.Tracef(\"Deployment '%s' in namespace '%s' is not currently present in the cluster.\", resource.GetName(), resource.GetNamespace())\n\t\t} else {\n\t\t\tlogger.Tracef(\"Recoverable error while polling for deployment object: %s\", err)\n\t\t\ttime.Sleep(requestInterval)\n\t\t\tcontinue\n\t\t}\n\t\tbreak\n\t}\n\n\t// Wait until the new deployment object is present in the cluster.\n\tfor deadline.After(time.Now()) {\n\t\tnova, err = cli.Get(resource.GetName(), metav1.GetOptions{})\n\t\tif err != nil {\n\t\t\ttime.Sleep(requestInterval)\n\t\t\tcontinue\n\t\t}\n\n\t\trv, _ := strconv.Atoi(nova.GetResourceVersion())\n\t\tif rv > resourceVersion {\n\t\t\tlogger.Tracef(\"New deployment appeared at version %d: %s\", rv, cur.GetSelfLink())\n\t\t\tresourceVersion = rv\n\t\t\tupdated = true\n\t\t}\n\n\t\tif updated && deploymentComplete(nova, &nova.Status) {\n\t\t\treturn nil\n\t\t}\n\n\t\tlogger.WithFields(log.Fields{\n\t\t\t\"deployment_replicas\": nova.Status.Replicas,\n\t\t\t\"deployment_updated_replicas\": nova.Status.UpdatedReplicas,\n\t\t\t\"deployment_available_replicas\": nova.Status.AvailableReplicas,\n\t\t\t\"deployment_observed_generation\": nova.Status.ObservedGeneration,\n\t\t}).Tracef(\"Still waiting for deployment to finish rollout...\")\n\n\t\ttime.Sleep(requestInterval)\n\t}\n\n\tif err != nil {\n\t\treturn fmt.Errorf(\"%s; last error was: %s\", ErrDeploymentTimeout, err)\n\t}\n\n\treturn ErrDeploymentTimeout\n}" ]
[ "0.69726676", "0.6918134", "0.6861107", "0.6786004", "0.6662607", "0.6639795", "0.6584675", "0.6517535", "0.65127385", "0.64983946", "0.6455542", "0.64240986", "0.6340011", "0.6307846", "0.63056487", "0.6301531", "0.62912184", "0.6233751", "0.6231887", "0.62260896", "0.62260205", "0.6214806", "0.62050414", "0.6189618", "0.61504275", "0.61315244", "0.60796255", "0.60608715", "0.6053624", "0.600363", "0.6002015", "0.59940696", "0.5949882", "0.59443396", "0.59417665", "0.5940021", "0.5936066", "0.59325945", "0.59323287", "0.59317744", "0.5918441", "0.58829725", "0.58547276", "0.5845414", "0.58255935", "0.58111876", "0.58069533", "0.5783837", "0.5777101", "0.57448274", "0.5738966", "0.57383376", "0.5735692", "0.56992936", "0.56884897", "0.5687936", "0.56858695", "0.5683238", "0.56743664", "0.5665906", "0.5664662", "0.56642884", "0.5663586", "0.5645821", "0.5637228", "0.5634851", "0.56347364", "0.5634682", "0.561919", "0.5615409", "0.5614711", "0.5614345", "0.56141835", "0.56119597", "0.5605202", "0.56002194", "0.5581692", "0.55642277", "0.55565757", "0.55455846", "0.55454135", "0.55439836", "0.55433685", "0.5529161", "0.5525033", "0.55208796", "0.5511409", "0.5495388", "0.5494678", "0.5488861", "0.54811436", "0.54783803", "0.547797", "0.5461672", "0.54538715", "0.5447281", "0.5440137", "0.54283327", "0.54282045", "0.54262996" ]
0.7065447
0
Get returns the state of the app deployment encoded in the workload.
func (a *Workload) Get(ctx context.Context, deployment *appsv1.Deployment) *models.AppDeployment { active := false route := "" stageID := "" status := "" username := "" // Query application deployment for stageID and status (ready vs desired replicas) deploymentSelector := fmt.Sprintf("app.kubernetes.io/part-of=%s,app.kubernetes.io/name=%s", a.app.Org, a.app.Name) deploymentListOptions := metav1.ListOptions{ LabelSelector: deploymentSelector, } deployments, err := a.cluster.Kubectl.AppsV1().Deployments(a.app.Org).List(ctx, deploymentListOptions) if err != nil { status = pkgerrors.Wrap(err, "failed to get Deployment status").Error() } else if len(deployments.Items) < 1 { status = "0/0" } else { status = fmt.Sprintf("%d/%d", deployments.Items[0].Status.ReadyReplicas, deployments.Items[0].Status.Replicas) stageID = deployments.Items[0]. Spec.Template.ObjectMeta.Labels["epinio.suse.org/stage-id"] username = deployments.Items[0].Spec.Template.ObjectMeta.Labels["app.kubernetes.io/created-by"] active = true } routes, err := a.cluster.ListIngressRoutes(ctx, a.app.Org, names.IngressName(a.app.Name)) if err != nil { route = err.Error() } else { route = routes[0] } return &models.AppDeployment{ Active: active, Username: username, StageID: stageID, Status: status, Route: route, } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (d *Deployment) Get(namespace, name string) (*appsv1.Deployment, error) {\n\tdeploy, err := d.cs.AppsV1().Deployments(namespace).Get(context.Background(), name, metav1.GetOptions{})\n\tif err != nil {\n\t\tif k8serrors.IsNotFound(err) {\n\t\t\treturn nil, k8serror.ErrNotFound\n\t\t}\n\t\treturn nil, err\n\t}\n\n\treturn deploy, nil\n}", "func (app *ApplicationStatus) Get() int {\n\tapp.Lock()\n\tdefer app.Unlock()\n\n\treturn app.code\n}", "func Get(dev *model.Dev, namespace string, c kubernetes.Interface) (*appsv1.Deployment, error) {\n\tif namespace == \"\" {\n\t\treturn nil, fmt.Errorf(\"empty namespace\")\n\t}\n\n\tvar d *appsv1.Deployment\n\tvar err error\n\n\tif len(dev.Labels) == 0 {\n\t\td, err = c.AppsV1().Deployments(namespace).Get(dev.Name, metav1.GetOptions{})\n\t\tif err != nil {\n\t\t\tlog.Debugf(\"error while retrieving deployment %s/%s: %s\", namespace, dev.Name, err)\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\tdeploys, err := c.AppsV1().Deployments(namespace).List(\n\t\t\tmetav1.ListOptions{\n\t\t\t\tLabelSelector: dev.LabelsSelector(),\n\t\t\t},\n\t\t)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif len(deploys.Items) == 0 {\n\t\t\treturn nil, fmt.Errorf(\"deployment for labels '%s' not found\", dev.LabelsSelector())\n\t\t}\n\t\tif len(deploys.Items) > 1 {\n\t\t\treturn nil, fmt.Errorf(\"Found '%d' deployments for labels '%s' instead of 1\", len(deploys.Items), dev.LabelsSelector())\n\t\t}\n\t\td = &deploys.Items[0]\n\t}\n\n\treturn d, nil\n}", "func (n *NodeClient) Get(twin, deployment uint32) (dl gridtypes.Deployment, err error) {\n\turl := n.url(\"deployment\", fmt.Sprint(twin), fmt.Sprint(deployment))\n\n\trequest, err := http.NewRequest(http.MethodGet, url, nil)\n\tif err != nil {\n\t\treturn dl, errors.Wrap(err, \"failed to build request\")\n\t}\n\n\tif err := n.client.authorize(request); err != nil {\n\t\treturn dl, errors.Wrap(err, \"failed to sign request\")\n\t}\n\n\tresponse, err := http.DefaultClient.Do(request)\n\tif err != nil {\n\t\treturn dl, err\n\t}\n\n\tif err := n.response(response, &dl, http.StatusOK); err != nil {\n\t\treturn dl, err\n\t}\n\n\treturn dl, nil\n}", "func (s *Fs) get(path string) (gridtypes.Deployment, error) {\n\tvar wl gridtypes.Deployment\n\tfile, err := os.Open(path)\n\tif os.IsNotExist(err) {\n\t\treturn wl, errors.Wrapf(provision.ErrDeploymentNotExists, \"deployment '%s' does not exist\", path)\n\t} else if err != nil {\n\t\treturn wl, errors.Wrap(err, \"failed to open workload file\")\n\t}\n\tdefer file.Close()\n\treader, err := versioned.NewReader(file)\n\tif err != nil {\n\t\treturn wl, errors.Wrap(err, \"failed to load workload\")\n\t}\n\tversion := reader.Version()\n\tif !version.EQ(deploymentSchemaV1) {\n\t\treturn wl, fmt.Errorf(\"invalid workload version\")\n\t}\n\n\tif err := json.NewDecoder(reader).Decode(&wl); err != nil {\n\t\treturn wl, errors.Wrap(err, \"failed to read workload data\")\n\t}\n\n\treturn wl, nil\n}", "func GetAppStatus(appName string) string {\n\tif !common.IsDeployed(appName) {\n\t\treturn \"NOT DEPLOYED\"\n\t}\n\n\terr := apps.CommandLocked([]string{appName})\n\tif err == nil {\n\t\treturn \"BUILDING\"\n\t}\n\n\tcontainerIDs, err := common.GetAppContainerIDs(appName, \"\")\n\tif err != nil {\n\t\tlog.ErrorLogger.Println(err.Error())\n\t\treturn \"\"\n\t}\n\tfor _, containerID := range containerIDs {\n\t\tstatus, err := common.DockerInspect(containerID, \"'{{.State.Status}}'\")\n\t\tif err != nil {\n\t\t\tlog.ErrorLogger.Println(err.Error())\n\t\t\treturn \"\"\n\t\t}\n\t\tif status != \"exited\" {\n\t\t\treturn \"DEPLOYED\"\n\t\t}\n\t}\n\n\treturn \"STOPPED\"\n}", "func (ai *AppInteractor) Get(id string) (domain.App, error) {\n\treturn ai.AppRepository.Get(id)\n}", "func GetDeployment(ns string, name string) Deployment {\n\tclient, err := LoadClient(Kubeconfig)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tversion := GetEnv(\"KUBERNETES_VERSION\", K8sVersion)\n\tif version == \"v1.8\" || version == \"v1.7\" || version == \"v1.6\" {\n\t\tvar deployment appsv1beta1.Deployment\n\t\tif err := client.Get(context.Background(), ns, name, &deployment); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\t//Name\n\t\tn := *deployment.Metadata.Name\n\t\tnc := TrimQuotes(n)\n\t\t// Namespace\n\t\tns := *deployment.Metadata.Namespace\n\t\tnsc := TrimQuotes(ns)\n\t\t// PodWanted\n\t\tpw := *deployment.Status.Replicas\n\t\t// PodRunning\n\t\tpr := *deployment.Status.AvailableReplicas\n\t\tst := \"Ready\"\n\t\tif pw != pr {\n\t\t\tst = \"NotReady\"\n\t\t}\n\t\t// Put in slice\n\t\td := Deployment{Status: st, Name: nc, Namespace: nsc, PodWanted: pw, PodRunning: pr}\n\t\treturn d\n\t}\n\tvar deployment appsv1.Deployment\n\tif err := client.Get(context.Background(), ns, name, &deployment); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\t//Name\n\tn := *deployment.Metadata.Name\n\tnc := TrimQuotes(n)\n\t// Namespace\n\tns = *deployment.Metadata.Namespace\n\tnsc := TrimQuotes(ns)\n\t// PodWanted\n\tpw := *deployment.Status.Replicas\n\t// PodRunning\n\tpr := *deployment.Status.AvailableReplicas\n\tst := \"Ready\"\n\tif pw != pr {\n\t\tst = \"NotReady\"\n\t}\n\t// Put in slice\n\td := Deployment{Status: st, Name: nc, Namespace: nsc, PodWanted: pw, PodRunning: pr}\n\treturn d\n}", "func (s appDeploymentNamespaceLister) Get(name string) (*v1beta1.AppDeployment, error) {\n\tobj, exists, err := s.indexer.GetByKey(s.namespace + \"/\" + name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !exists {\n\t\treturn nil, errors.NewNotFound(v1beta1.Resource(\"appdeployment\"), name)\n\t}\n\treturn obj.(*v1beta1.AppDeployment), nil\n}", "func (m *AppVulnerabilityTask) GetAppVersion()(*string) {\n val, err := m.GetBackingStore().Get(\"appVersion\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*string)\n }\n return nil\n}", "func (a *Workload) Deployment(ctx context.Context) (*appsv1.Deployment, error) {\n\treturn a.cluster.Kubectl.AppsV1().Deployments(a.app.Org).Get(\n\t\tctx, a.app.Name, metav1.GetOptions{},\n\t)\n}", "func GetApp(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *AppState, opts ...pulumi.ResourceOption) (*App, error) {\n\tvar resource App\n\terr := ctx.ReadResource(\"aws-native:sagemaker:App\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (client DeploymentsClient) Get(ctx context.Context, resourceGroupName string, serviceName string, appName string, deploymentName string) (result DeploymentResource, err error) {\n\tif tracing.IsEnabled() {\n\t\tctx = tracing.StartSpan(ctx, fqdn+\"/DeploymentsClient.Get\")\n\t\tdefer func() {\n\t\t\tsc := -1\n\t\t\tif result.Response.Response != nil {\n\t\t\t\tsc = result.Response.Response.StatusCode\n\t\t\t}\n\t\t\ttracing.EndSpan(ctx, sc, err)\n\t\t}()\n\t}\n\treq, err := client.GetPreparer(ctx, resourceGroupName, serviceName, appName, deploymentName)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"appplatform.DeploymentsClient\", \"Get\", nil, \"Failure preparing request\")\n\t\treturn\n\t}\n\n\tresp, err := client.GetSender(req)\n\tif err != nil {\n\t\tresult.Response = autorest.Response{Response: resp}\n\t\terr = autorest.NewErrorWithError(err, \"appplatform.DeploymentsClient\", \"Get\", resp, \"Failure sending request\")\n\t\treturn\n\t}\n\n\tresult, err = client.GetResponder(resp)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"appplatform.DeploymentsClient\", \"Get\", resp, \"Failure responding to request\")\n\t\treturn\n\t}\n\n\treturn\n}", "func (f *FUOTADeploymentAPI) Get(ctx context.Context, req *pb.GetFUOTADeploymentRequest) (*pb.GetFUOTADeploymentResponse, error) {\n\tid, err := uuid.FromString(req.Id)\n\tif err != nil {\n\t\treturn nil, status.Errorf(codes.InvalidArgument, \"id: %s\", err)\n\t}\n\n\tif valid, err := fuotaCred.NewValidator().ValidateFUOTADeploymentAccess(ctx, auth.Read, id); !valid || err != nil {\n\t\treturn nil, status.Errorf(codes.Unauthenticated, \"authentication failed: %s\", err)\n\t}\n\n\tfd, err := f.st.GetFUOTADeployment(ctx, id, false)\n\tif err != nil {\n\t\treturn nil, helpers.ErrToRPCError(err)\n\t}\n\n\tresp := pb.GetFUOTADeploymentResponse{\n\t\tFuotaDeployment: &pb.FUOTADeployment{\n\t\t\tId: fd.ID.String(),\n\t\t\tName: fd.Name,\n\t\t\tDr: uint32(fd.DR),\n\t\t\tFrequency: uint32(fd.Frequency),\n\t\t\tPayload: fd.Payload,\n\t\t\tRedundancy: uint32(fd.Redundancy),\n\t\t\tMulticastTimeout: uint32(fd.MulticastTimeout),\n\t\t\tUnicastTimeout: ptypes.DurationProto(fd.UnicastTimeout),\n\t\t\tState: string(fd.State),\n\t\t},\n\t}\n\n\tresp.CreatedAt = timestamppb.New(fd.CreatedAt)\n\tresp.UpdatedAt = timestamppb.New(fd.UpdatedAt)\n\tresp.FuotaDeployment.NextStepAfter = timestamppb.New(fd.NextStepAfter)\n\n\tswitch fd.GroupType {\n\tcase FUOTADeploymentGroupTypeB:\n\t\tresp.FuotaDeployment.GroupType = pb.MulticastGroupType_CLASS_B\n\tcase FUOTADeploymentGroupTypeC:\n\t\tresp.FuotaDeployment.GroupType = pb.MulticastGroupType_CLASS_C\n\tdefault:\n\t\treturn nil, status.Errorf(codes.Internal, \"unexpected group-type: %s\", fd.GroupType)\n\t}\n\n\treturn &resp, nil\n}", "func (m *SecurityActionState) GetAppId()(*string) {\n val, err := m.GetBackingStore().Get(\"appId\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*string)\n }\n return nil\n}", "func (a GetSLITriggeredAdapter) GetDeployment() string {\n\treturn a.event.Deployment\n}", "func (s blueGreenDeploymentNamespaceLister) Get(name string) (*v1.BlueGreenDeployment, error) {\n\tobj, exists, err := s.indexer.GetByKey(s.namespace + \"/\" + name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !exists {\n\t\treturn nil, errors.NewNotFound(v1.Resource(\"bluegreendeployment\"), name)\n\t}\n\treturn obj.(*v1.BlueGreenDeployment), nil\n}", "func DeployApp(w http.ResponseWriter, r *http.Request) {\n\tparams := mux.Vars(r)\n\tstatus := params[\"status\"]\n\tlog.Printf(\"Params: %s\\n\", params)\n\n\tclientset, err := getConfig()\n\tif err != nil {\n\t\tlog.Fatalln(\"failed to get the config:\", err)\n\t}\n\n\tdeploymentsClient := clientset.AppsV1().Deployments(namespace)\n\n\tdeploymentName := params[\"app\"] + \"-deployment\"\n\n\tlist, err := deploymentsClient.List(metav1.ListOptions{})\n\tif err != nil {\n\t\tlog.Fatalln(\"failed to get deployments:\", err)\n\t}\n\n\tcontainers := []apiv1.Container{createContainer(params[\"app\"], repository+\"/\"+params[\"app\"]+appversion)}\n\n\tif status == \"true\" {\n\t\tfor _, d := range list.Items {\n\t\t\tif d.Name == deploymentName && *d.Spec.Replicas > 0 {\n\t\t\t\tlog.Printf(\"Deployment already running: %s\\n\", deploymentName)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tnodeLabel(params[\"node\"], \"app\", params[\"app\"], \"add\")\n\n\t\tdeployment := &appsv1.Deployment{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: deploymentName,\n\t\t\t},\n\t\t\tSpec: appsv1.DeploymentSpec{\n\t\t\t\tReplicas: int32Ptr(1),\n\t\t\t\tSelector: &metav1.LabelSelector{\n\t\t\t\t\tMatchLabels: map[string]string{\n\t\t\t\t\t\t\"app\": params[\"app\"],\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tTemplate: apiv1.PodTemplateSpec{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\t\t\"app\": params[\"app\"],\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tSpec: apiv1.PodSpec{\n\t\t\t\t\t\tContainers: containers,\n\t\t\t\t\t\tNodeSelector: map[string]string{\n\t\t\t\t\t\t\t\"app\": params[\"app\"],\n\t\t\t\t\t\t},\n\t\t\t\t\t\tVolumes: []apiv1.Volume{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName: \"mem\",\n\t\t\t\t\t\t\t\tVolumeSource: apiv1.VolumeSource{\n\t\t\t\t\t\t\t\t\tHostPath: &apiv1.HostPathVolumeSource{\n\t\t\t\t\t\t\t\t\t\tPath: \"/dev/mem\",\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName: \"gpiomem\",\n\t\t\t\t\t\t\t\tVolumeSource: apiv1.VolumeSource{\n\t\t\t\t\t\t\t\t\tHostPath: &apiv1.HostPathVolumeSource{\n\t\t\t\t\t\t\t\t\t\tPath: \"/dev/gpiomem\",\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\t// Create Deployment\n\t\tfmt.Println(\"Creating deployment...\")\n\t\tresult, err := deploymentsClient.Create(deployment)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tfmt.Printf(\"Created deployment %q.\\n\", result.GetObjectMeta().GetName())\n\n\t} else {\n\n\t\tnodeLabel(params[\"node\"], \"app\", params[\"app\"], \"del\")\n\n\t\tfmt.Println(\"Deleting deployment...\")\n\t\tdeletePolicy := metav1.DeletePropagationForeground\n\t\tif err := deploymentsClient.Delete(deploymentName, &metav1.DeleteOptions{\n\t\t\tPropagationPolicy: &deletePolicy,\n\t\t}); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tfmt.Println(\"Deleted deployment.\")\n\t}\n\n}", "func get(ctx *quorumContext, w http.ResponseWriter, r *http.Request) (int, error) {\n\tif ctx.session == nil {\n\t\tfmt.Fprintf(w, \"Cannot get value before deploying contract\\n\")\n\t\treturn 400, nil\n\t}\n\n\tval, err := ctx.session.Get()\n\tif err != nil {\n\t\tfmt.Fprintf(w, \"Failed to get stored contract value: %v\", err)\n\t\treturn 500, err\n\t}\n\tfmt.Fprintf(w, \"Stored value: %v\\n\", val)\n\treturn 200, nil\n}", "func (m *AppVulnerabilityTask) GetAppPublisher()(*string) {\n val, err := m.GetBackingStore().Get(\"appPublisher\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*string)\n }\n return nil\n}", "func (ar AppResource) Get(ctx context.Context, r *http.Request) (int, interface{}) {\n\terr := requireScope(ctx, \"read:app\")\n\tif err != nil {\n\t\treturn http.StatusUnauthorized, err\n\t}\n\tu := getCurrentUser(ctx)\n\tmctx := getModelContext(ctx)\n\taId := params(ctx, \"id\")\n\tif aId == \"\" {\n\t\treturn http.StatusBadRequest, \"app id not given\"\n\t}\n\tid, err := strconv.Atoi(aId)\n\tif err != nil {\n\t\treturn http.StatusBadRequest, err\n\t}\n\tqueryApp, err := app.GetApp(mctx, id)\n\tif err != nil {\n\t\tif err == app.ErrAppNotFound {\n\t\t\treturn http.StatusBadRequest, \"app doesn't exist\"\n\t\t}\n\t\treturn http.StatusBadRequest, err\n\t}\n\trole, err := group.GetRoleOfUser(mctx, u.GetId(), queryApp.AdminGroupId)\n\tif role != group.ADMIN {\n\t\treturn http.StatusForbidden, \"only admins of the app can read it\"\n\t}\n\tresp := &App{\n\t\tId: queryApp.Id,\n\t\tFullName: queryApp.FullName,\n\t\tSecret: queryApp.Secret,\n\t\tRedirectUri: queryApp.RedirectUri,\n\t}\n\treturn http.StatusOK, resp\n}", "func (d *DeploymentStatusEvent) GetDeployment() *Deployment {\n\tif d == nil {\n\t\treturn nil\n\t}\n\treturn d.Deployment\n}", "func (a ProblemAdapter) GetDeployment() string {\n\treturn \"\"\n}", "func (s *APIServer) GetApp(c *gin.Context) {\n\tenvName := c.Param(\"envName\")\n\tenvMeta, err := env.GetEnvByName(envName)\n\tif err != nil {\n\t\tutil.HandleError(c, util.StatusInternalServerError, err)\n\t\treturn\n\t}\n\tnamespace := envMeta.Namespace\n\tappName := c.Param(\"appName\")\n\tctx := util.GetContext(c)\n\tapplicationMeta, err := common.RetrieveApplicationStatusByName(ctx, s.KubeClient, appName, namespace)\n\tif err != nil {\n\t\tutil.HandleError(c, util.StatusInternalServerError, err)\n\t\treturn\n\t}\n\tutil.AssembleResponse(c, applicationMeta, nil)\n}", "func getState(app *BaseApp, isCheckTx bool) *state {\n\tif isCheckTx {\n\t\treturn app.checkState\n\t}\n\n\treturn app.deliverState\n}", "func (s *Fs) Get(twin uint32, deployment uint64) (gridtypes.Deployment, error) {\n\ts.m.RLock()\n\tdefer s.m.RUnlock()\n\n\tpath := s.rooted(filepath.Join(fmt.Sprint(twin), fmt.Sprint(deployment)))\n\n\treturn s.get(path)\n}", "func (s arangoDeploymentNamespaceLister) Get(name string) (*v1.ArangoDeployment, error) {\n\tobj, exists, err := s.indexer.GetByKey(s.namespace + \"/\" + name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !exists {\n\t\treturn nil, errors.NewNotFound(v1.Resource(\"arangodeployment\"), name)\n\t}\n\treturn obj.(*v1.ArangoDeployment), nil\n}", "func GetDeployment() *appsv1.Deployment {\n\tdeploy1 := &appsv1.Deployment{\n\t\tObjectMeta: v1.ObjectMeta{\n\t\t\tName: \"deployment1\",\n\t\t\tNamespace: \"default\",\n\t\t},\n\t\tSpec: appsv1.DeploymentSpec{\n\t\t\tSelector: &v1.LabelSelector{\n\t\t\t\tMatchLabels: map[string]string{\"deployment\": \"deployment1-deployment\"},\n\t\t\t},\n\t\t\tTemplate: corev1.PodTemplateSpec{\n\t\t\t\tObjectMeta: v1.ObjectMeta{Labels: map[string]string{\"deployment\": \"deployment1-deployment\"}},\n\t\t\t\tSpec: corev1.PodSpec{\n\t\t\t\t\tContainers: []corev1.Container{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"nginx\",\n\t\t\t\t\t\t\tImage: \"nginx\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\treturn deploy1\n}", "func (m *AppVulnerabilityTask) GetAppName()(*string) {\n val, err := m.GetBackingStore().Get(\"appName\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*string)\n }\n return nil\n}", "func Get(c *deis.Client, appID string) (api.App, error) {\n\tu := fmt.Sprintf(\"/v2/apps/%s/\", appID)\n\n\tres, reqErr := c.Request(\"GET\", u, nil)\n\tif reqErr != nil && !deis.IsErrAPIMismatch(reqErr) {\n\t\treturn api.App{}, reqErr\n\t}\n\tdefer res.Body.Close()\n\n\tapp := api.App{}\n\n\tif err := json.NewDecoder(res.Body).Decode(&app); err != nil {\n\t\treturn api.App{}, err\n\t}\n\n\treturn app, reqErr\n}", "func (r *DeviceInstallStateRequest) Get(ctx context.Context) (resObj *DeviceInstallState, err error) {\n\tvar query string\n\tif r.query != nil {\n\t\tquery = \"?\" + r.query.Encode()\n\t}\n\terr = r.JSONRequest(ctx, \"GET\", query, nil, &resObj)\n\treturn\n}", "func (t *SignalingState) Get() SignalingState {\n\treturn SignalingState(atomic.LoadInt32((*int32)(t)))\n}", "func (d *DeploymentEvent) GetDeployment() *Deployment {\n\tif d == nil {\n\t\treturn nil\n\t}\n\treturn d.Deployment\n}", "func GetAppStateContent(key string) (content string, err error) {\n\te := db.GetEngine(db.DefaultContext)\n\tappState := &AppState{ID: key}\n\thas, err := e.Get(appState)\n\tif err != nil {\n\t\treturn \"\", err\n\t} else if !has {\n\t\treturn \"\", nil\n\t}\n\treturn appState.Content, nil\n}", "func GetDeployment(t *testing.T, k8client client.Client, deployName string) (*appsv1.Deployment, error) {\n\tdeploy := &appsv1.Deployment{}\n\tns := \"default\"\n\terr := k8client.Get(goctx.TODO(), types.NamespacedName{Namespace: ns, Name: deployName}, deploy)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Deployment doesnt exist: %v\", err)\n\t}\n\n\treturn deploy, nil\n}", "func (r *RuntimeServer) GetDeployInfo(ctx context.Context, re *pb.ServiceRequest) (*pb.DeployInfo, error) {\n\tvar deployinfo pb.DeployInfo\n\tappService := r.store.GetAppService(re.ServiceId)\n\tif appService != nil {\n\t\tdeployinfo.Namespace = appService.TenantID\n\t\tif appService.GetStatefulSet() != nil {\n\t\t\tdeployinfo.Statefuleset = appService.GetStatefulSet().Name\n\t\t\tdeployinfo.StartTime = appService.GetStatefulSet().ObjectMeta.CreationTimestamp.Format(time.RFC3339)\n\t\t}\n\t\tif appService.GetDeployment() != nil {\n\t\t\tdeployinfo.Deployment = appService.GetDeployment().Name\n\t\t\tdeployinfo.StartTime = appService.GetDeployment().ObjectMeta.CreationTimestamp.Format(time.RFC3339)\n\t\t}\n\t\tif services := appService.GetServices(false); services != nil {\n\t\t\tservice := make(map[string]string, len(services))\n\t\t\tfor _, s := range services {\n\t\t\t\tservice[s.Name] = s.Name\n\t\t\t}\n\t\t\tdeployinfo.Services = service\n\t\t}\n\t\tif endpoints := appService.GetEndpoints(false); endpoints != nil &&\n\t\t\tappService.AppServiceBase.ServiceKind == model.ServiceKindThirdParty {\n\t\t\teps := make(map[string]string, len(endpoints))\n\t\t\tfor _, s := range endpoints {\n\t\t\t\teps[s.Name] = s.Name\n\t\t\t}\n\t\t\tdeployinfo.Endpoints = eps\n\t\t}\n\t\tif secrets := appService.GetSecrets(false); secrets != nil {\n\t\t\tsecretsinfo := make(map[string]string, len(secrets))\n\t\t\tfor _, s := range secrets {\n\t\t\t\tsecretsinfo[s.Name] = s.Name\n\t\t\t}\n\t\t\tdeployinfo.Secrets = secretsinfo\n\t\t}\n\t\tif ingresses := appService.GetIngress(false); ingresses != nil {\n\t\t\tingress := make(map[string]string, len(ingresses))\n\t\t\tfor _, s := range ingresses {\n\t\t\t\tingress[s.Name] = s.Name\n\t\t\t}\n\t\t\tdeployinfo.Ingresses = ingress\n\t\t}\n\t\tif pods := appService.GetPods(false); pods != nil {\n\t\t\tpodNames := make(map[string]string, len(pods))\n\t\t\tfor _, s := range pods {\n\t\t\t\tpodNames[s.Name] = s.Name\n\t\t\t}\n\t\t\tdeployinfo.Pods = podNames\n\t\t}\n\t\tif rss := appService.GetReplicaSets(); rss != nil {\n\t\t\trsnames := make(map[string]string, len(rss))\n\t\t\tfor _, s := range rss {\n\t\t\t\trsnames[s.Name] = s.Name\n\t\t\t}\n\t\t\tdeployinfo.Replicatset = rsnames\n\t\t}\n\t\tdeployinfo.Status = appService.GetServiceStatus()\n\t}\n\treturn &deployinfo, nil\n}", "func (ds *MySQLDatastore) GetApp(ctx context.Context, name string) (*models.App, error) {\n\trow := ds.db.QueryRow(`SELECT name, config FROM apps WHERE name=?`, name)\n\n\tvar resName string\n\tvar config string\n\terr := row.Scan(&resName, &config)\n\n\tres := &models.App{\n\t\tName: resName,\n\t}\n\n\tjson.Unmarshal([]byte(config), &res.Config)\n\n\tif err != nil {\n\t\tif err == sql.ErrNoRows {\n\t\t\treturn nil, models.ErrAppsNotFound\n\t\t}\n\t\treturn nil, err\n\t}\n\n\treturn res, nil\n}", "func (t *SignalTable) SignalStateGet() ([]byte, error) {\n\n buff, err := json.Marshal(t.state.GetAll())\n\n return buff, err\n}", "func (r *DeviceCompliancePolicyStateRequest) Get(ctx context.Context) (resObj *DeviceCompliancePolicyState, err error) {\n\tvar query string\n\tif r.query != nil {\n\t\tquery = \"?\" + r.query.Encode()\n\t}\n\terr = r.JSONRequest(ctx, \"GET\", query, nil, &resObj)\n\treturn\n}", "func (m *MacOSSoftwareUpdateStateSummary) GetState()(*MacOSSoftwareUpdateState) {\n val, err := m.GetBackingStore().Get(\"state\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*MacOSSoftwareUpdateState)\n }\n return nil\n}", "func DeployApp(ctx context.Context, cluster *tester.ClusterContext, namespace string, replicas int32) error {\n\tns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}\n\tif err := cluster.ControllerClient.Create(ctx, ns); err != nil {\n\t\treturn err\n\t}\n\n\tif err := testutils.OffloadNamespace(cluster.KubeconfigPath, namespace); err != nil {\n\t\treturn err\n\t}\n\n\tstatefulSet := &appsv1.StatefulSet{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: StatefulSetName,\n\t\t\tNamespace: namespace,\n\t\t\tLabels: map[string]string{\n\t\t\t\t\"app\": StatefulSetName,\n\t\t\t},\n\t\t},\n\t\tSpec: appsv1.StatefulSetSpec{\n\t\t\tReplicas: pointer.Int32(replicas),\n\t\t\tSelector: &metav1.LabelSelector{\n\t\t\t\tMatchLabels: map[string]string{\n\t\t\t\t\t\"app\": StatefulSetName,\n\t\t\t\t},\n\t\t\t},\n\t\t\tTemplate: corev1.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\t\"app\": StatefulSetName,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSpec: corev1.PodSpec{\n\t\t\t\t\tContainers: []corev1.Container{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"tester\",\n\t\t\t\t\t\t\tImage: image,\n\t\t\t\t\t\t\tResources: testutils.ResourceRequirements(),\n\t\t\t\t\t\t\tVolumeMounts: []corev1.VolumeMount{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName: \"liqo-storage-claim\",\n\t\t\t\t\t\t\t\t\tMountPath: \"/usr/share/nginx/html\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t// put pods in anti-affinity, and prefer local cluster. In this way one pod will be local, the other remote\n\t\t\t\t\tAffinity: &corev1.Affinity{\n\t\t\t\t\t\tNodeAffinity: &corev1.NodeAffinity{\n\t\t\t\t\t\t\tPreferredDuringSchedulingIgnoredDuringExecution: []corev1.PreferredSchedulingTerm{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tWeight: 2,\n\t\t\t\t\t\t\t\t\tPreference: corev1.NodeSelectorTerm{\n\t\t\t\t\t\t\t\t\t\tMatchExpressions: []corev1.NodeSelectorRequirement{\n\t\t\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\t\tKey: consts.TypeLabel,\n\t\t\t\t\t\t\t\t\t\t\t\tOperator: corev1.NodeSelectorOpDoesNotExist,\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tPodAntiAffinity: &corev1.PodAntiAffinity{\n\t\t\t\t\t\t\tRequiredDuringSchedulingIgnoredDuringExecution: []corev1.PodAffinityTerm{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tLabelSelector: &metav1.LabelSelector{\n\t\t\t\t\t\t\t\t\t\tMatchLabels: map[string]string{\n\t\t\t\t\t\t\t\t\t\t\t\"app\": StatefulSetName,\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\tTopologyKey: \"kubernetes.io/hostname\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tVolumeClaimTemplates: []corev1.PersistentVolumeClaim{\n\t\t\t\t{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\tName: \"liqo-storage-claim\",\n\t\t\t\t\t},\n\t\t\t\t\tSpec: corev1.PersistentVolumeClaimSpec{\n\t\t\t\t\t\tAccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce},\n\t\t\t\t\t\tResources: corev1.ResourceRequirements{\n\t\t\t\t\t\t\tRequests: corev1.ResourceList{\n\t\t\t\t\t\t\t\tcorev1.ResourceStorage: resource.MustParse(\"25Mi\"),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tStorageClassName: pointer.String(\"liqo\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\treturn cluster.ControllerClient.Create(ctx, statefulSet)\n}", "func (app *Application) GetStatus() *Status {\n if app.status == nil {\n app.status = app.Get(\"status\").(*Status)\n }\n\n return app.status\n}", "func (app *App) Get(uuid gocql.UUID) (a *teoapi.Application, err error) {\n\ta = &teoapi.Application{UUID: uuid}\n\tstmt, names := qb.Select(\"applications\").Where(qb.Eq(\"uuid\")).Limit(1).ToCql()\n\tq := gocqlx.Query(app.tre.session.Query(stmt), names).BindMap(qb.M{\n\t\t\"uuid\": uuid,\n\t})\n\tif err = q.GetRelease(a); err != nil {\n\t\tfmt.Printf(\"Get Error: %s\\n\", err.Error())\n\t\treturn\n\t}\n\ta.Com, err = app.tre.com.List(uuid)\n\treturn\n}", "func GetApplication(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *ApplicationState, opts ...pulumi.ResourceOption) (*Application, error) {\n\tvar resource Application\n\terr := ctx.ReadResource(\"aws:kinesisanalyticsv2/application:Application\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (m *ServiceMgr) getDeployedApps(w http.ResponseWriter, r *http.Request) {\n\tlogPrefix := \"ServiceMgr::getDeployedApps\"\n\n\tif !m.validateAuth(w, r, EventingPermissionManage) {\n\t\treturn\n\t}\n\n\taudit.Log(auditevent.ListDeployed, r, nil)\n\n\tappDeployedNodesCounter, _, appPausingNodesCounter, numEventingNodes, info := m.getAppList()\n\tif info.Code != m.statusCodes.ok.Code {\n\t\tm.sendErrorInfo(w, info)\n\t\treturn\n\t}\n\n\tdeployedApps := make(map[string]string)\n\tfor app, numNodesDeployed := range appDeployedNodesCounter {\n\t\tif appPausingNodesCounter != nil {\n\t\t\t_, ok := appPausingNodesCounter[app]\n\t\t\tif numNodesDeployed == numEventingNodes && !ok {\n\t\t\t\tdeployedApps[app] = \"\"\n\t\t\t}\n\t\t} else {\n\t\t\tif numNodesDeployed == numEventingNodes {\n\t\t\t\tdeployedApps[app] = \"\"\n\t\t\t}\n\t\t}\n\t}\n\n\tdata, err := json.MarshalIndent(deployedApps, \"\", \" \")\n\tif err != nil {\n\t\tlogging.Errorf(\"%s failed to marshal list of deployed apps, err: %v\", logPrefix, err)\n\n\t\tinfo.Code = m.statusCodes.errMarshalResp.Code\n\t\tinfo.Info = fmt.Sprintf(\"Unable to marshall response, err: %v\", err)\n\t\tm.sendErrorInfo(w, info)\n\t\treturn\n\t}\n\n\tw.Header().Add(headerKey, strconv.Itoa(m.statusCodes.ok.Code))\n\tfmt.Fprintf(w, \"%s\", string(data))\n}", "func (app *Application) Get() (*Result, error) {\n\tbody, _, err := get(ApplicationsResource + \"/\" + app.ID)\n\tif err != nil {\n\t\tresult := &Result{}\n\t\tjson.Unmarshal(body, result)\n\t\treturn result, err\n\t}\n\terr = json.Unmarshal(body, app)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn nil, nil\n}", "func (s *AppServerV3) GetApp() Application {\n\treturn s.Spec.App\n}", "func (conf *config) GetAppConfig() App {\n\treturn conf.App\n}", "func GetResource(name string, namespace string, kubeclient *kubernetes.Clientset) (string, error) {\n\tif namespace == \"\" {\n\t\tnamespace = \"default\"\n\t}\n\n\topts := metaV1.ListOptions{\n\t\tLimit: 10,\n\t}\n\topts.APIVersion = \"apps/v1\"\n\topts.Kind = \"Deployment\"\n\n\tlist, err := kubeclient.AppsV1().Deployments(namespace).List(opts)\n\tif err != nil {\n\t\treturn \"\", pkgerrors.Wrap(err, \"Get Deployment error\")\n\t}\n\n\tfor _, deployment := range list.Items {\n\t\tif deployment.Name == name {\n\t\t\treturn name, nil\n\t\t}\n\t}\n\treturn \"\", nil\n}", "func (m *ProgramControl) GetStatus()(*string) {\n val, err := m.GetBackingStore().Get(\"status\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*string)\n }\n return nil\n}", "func (r *ScaleREST) Get(ctx kapi.Context, name string) (runtime.Object, error) {\n\tdeploymentConfig, err := r.registry.GetDeploymentConfig(ctx, name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn api.ScaleFromConfig(deploymentConfig), nil\n}", "func (client *Client) GetDeployment(id int64, req *Request) (*Response, error) {\n\treturn client.Execute(&Request{\n\t\tMethod: \"GET\",\n\t\tPath: fmt.Sprintf(\"%s/%d\", DeploymentsPath, id),\n\t\tQueryParams: req.QueryParams,\n\t\tResult: &GetDeploymentResult{},\n\t})\n}", "func GetDeployment(k8s *Client, ns, name string) (*appsv1.Deployment, error) {\n\tdeploySvc := k8s.AppsV1().Deployments(ns)\n\treturn deploySvc.Get(name, metav1.GetOptions{})\n}", "func (sh *SimHandlerState) Get(c *mango.Context) {\n\tsiminfo := NewSimInfo(c.RouteParams[\"sim_id\"])\n\tsh.GetObject(siminfo, c)\n}", "func (c *Context) AppState() *ApplicationState {\n\treturn c.state\n}", "func GetDeployment(namespace string, deploymentName string) *v1beta1.Deployment {\n\treplicaset := int32(1)\n\treturn &v1beta1.Deployment{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: deploymentName,\n\t\t\tNamespace: namespace,\n\t\t\tLabels: map[string]string{\"firstLabel\": \"temp\"},\n\t\t\tAnnotations: map[string]string{\n\t\t\t\tconstants.ConfigmapUpdateOnChangeAnnotation: deploymentName,\n\t\t\t\tconstants.SecretUpdateOnChangeAnnotation: deploymentName},\n\t\t},\n\t\tSpec: v1beta1.DeploymentSpec{\n\t\t\tReplicas: &replicaset,\n\t\t\tStrategy: v1beta1.DeploymentStrategy{\n\t\t\t\tType: v1beta1.RollingUpdateDeploymentStrategyType,\n\t\t\t},\n\t\t\tTemplate: v1.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: map[string]string{\"secondLabel\": \"temp\"},\n\t\t\t\t},\n\t\t\t\tSpec: v1.PodSpec{\n\t\t\t\t\tContainers: []v1.Container{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tImage: \"tutum/hello-world\",\n\t\t\t\t\t\t\tName: deploymentName,\n\t\t\t\t\t\t\tEnv: []v1.EnvVar{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName: \"BUCKET_NAME\",\n\t\t\t\t\t\t\t\t\tValue: \"test\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}", "func (m *Machine) GetBOSHDeployment(namespace string, name string) (*bdv1.BOSHDeployment, error) {\n\tclient := m.VersionedClientset.BoshdeploymentV1alpha1().BOSHDeployments(namespace)\n\td, err := client.Get(context.Background(), name, metav1.GetOptions{})\n\treturn d, err\n}", "func (h *H) GetWorkload(name string) (string, bool) {\n\tif val, ok := h.InstalledWorkloads[name]; ok {\n\t\treturn val, true\n\t}\n\n\treturn \"\", false\n}", "func (s *AppsServiceOp) GetDeployment(ctx context.Context, appID, deploymentID string) (*Deployment, *Response, error) {\n\tpath := fmt.Sprintf(\"%s/%s/deployments/%s\", appsBasePath, appID, deploymentID)\n\treq, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\troot := new(deploymentRoot)\n\tresp, err := s.client.Do(ctx, req, root)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\treturn root.Deployment, resp, nil\n}", "func (s *DeploymentsService) GetActiveDeployment() (*ClusterBackupDeploymentsModel, error) {\n\treturn s.repository.FindFirst()\n}", "func (n *Netlify) GetDeploy(ctx context.Context, deployID string) (*models.Deploy, error) {\n\tauthInfo := context.GetAuthInfo(ctx)\n\tresp, err := n.Netlify.Operations.GetDeploy(operations.NewGetDeployParams().WithDeployID(deployID), authInfo)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn resp.Payload, nil\n}", "func (a *appHandler) GetApp(w http.ResponseWriter, r *http.Request) {\n\tvar app model.App\n\n\t// TODO : QUERY\n\n\tjsonR, err := json.Marshal(app)\n\tif err != nil {\n\t\ta.httpUtil.WriteJSONInternalServerErrorResponse(w, err)\n\t}\n\n\ta.httpUtil.WriteJSONSuccessResponse(w, jsonR)\n}", "func (m *ServiceMgr) getDeployedApps(w http.ResponseWriter, r *http.Request) {\n\tif !m.validateAuth(w, r, EventingPermissionManage) {\n\t\treturn\n\t}\n\n\taudit.Log(auditevent.ListDeployed, r, nil)\n\n\tnodeAddrs, err := m.getActiveNodeAddrs()\n\tif err != nil {\n\t\tlogging.Errorf(\"Failed to fetch active Eventing nodes, err: %v\", err)\n\t\tw.Header().Add(headerKey, strconv.Itoa(m.statusCodes.errActiveEventingNodes.Code))\n\t\tfmt.Fprintf(w, \"\")\n\t\treturn\n\t}\n\n\taggDeployedApps := make(map[string]map[string]string)\n\tutil.Retry(util.NewFixedBackoff(time.Second), getDeployedAppsCallback, &aggDeployedApps, nodeAddrs)\n\n\tappDeployedNodesCounter := make(map[string]int)\n\n\tfor _, apps := range aggDeployedApps {\n\t\tfor app := range apps {\n\t\t\tif _, ok := appDeployedNodesCounter[app]; !ok {\n\t\t\t\tappDeployedNodesCounter[app] = 0\n\t\t\t}\n\n\t\t\tappDeployedNodesCounter[app]++\n\t\t}\n\t}\n\n\tnumEventingNodes := len(nodeAddrs)\n\tif numEventingNodes <= 0 {\n\t\tw.Header().Add(headerKey, strconv.Itoa(m.statusCodes.errNoEventingNodes.Code))\n\t\tfmt.Fprintf(w, \"\")\n\t\treturn\n\t}\n\n\tdeployedApps := make(map[string]string)\n\tfor app, numNodesDeployed := range appDeployedNodesCounter {\n\t\tif numNodesDeployed == numEventingNodes {\n\t\t\tdeployedApps[app] = \"\"\n\t\t}\n\t}\n\n\tbuf, err := json.Marshal(deployedApps)\n\tif err != nil {\n\t\tlogging.Errorf(\"Failed to marshal list of deployed apps, err: %v\", err)\n\t\tw.Header().Add(headerKey, strconv.Itoa(m.statusCodes.errMarshalResp.Code))\n\t\tfmt.Fprintf(w, \"\")\n\t\treturn\n\t}\n\n\tw.Header().Add(headerKey, strconv.Itoa(m.statusCodes.ok.Code))\n\tfmt.Fprintf(w, \"%s\", string(buf))\n}", "func (s *Service) GetDeploymentStatus(serviceName string, image string) (ServiceStatus, error) {\n\tfilterService := filters.NewArgs()\n\tfilterService.Add(\"name\", serviceName)\n\tswarmService, err := s.GetService(filterService)\n\n\tdeploymentStatus := ServiceStatus{}\n\tif err != nil {\n\t\treturn deploymentStatus, err\n\t}\n\n\tdeploymentStatus.Name = serviceName\n\n\tif swarmService.ID == \"\" {\n\t\tdeploymentStatus.Err = fmt.Sprintf(\"The %s service was not found in the cluster.\", serviceName)\n\t\treturn deploymentStatus, nil\n\t}\n\n\tfilterTask := filters.NewArgs()\n\tfilterTask.Add(\"service\", swarmService.ID)\n\n\tswarmTask, err := s.GetTask(filterTask)\n\tif err != nil {\n\t\treturn deploymentStatus, err\n\t}\n\n\tdeploymentStatus.ID = swarmService.ID\n\n\tif s.isImageDeploy(swarmTask, image) == false {\n\t\tdeploymentStatus.Err = fmt.Sprintf(\"The %s image was not deployed or not found in the current tasks running.\", image)\n\t\treturn deploymentStatus, nil\n\t}\n\n\tdeploymentStatus.Replicas = swarmService.Spec.Mode.Replicated.Replicas\n\tdeploymentStatus.TaskStatus = s.parseTaskState(swarmTask)\n\tdeploymentStatus.UpdateStatus = swarmService.UpdateStatus\n\n\tdeploymentStatus.RunningReplicas, deploymentStatus.FailedReplicas = s.taskStateCount(deploymentStatus, image)\n\n\tif deploymentStatus.FailedReplicas > deploymentStatus.RunningReplicas && uint64(deploymentStatus.RunningReplicas) < *deploymentStatus.Replicas {\n\t\tdeploymentStatus.Err = fmt.Sprintf(\"Looks like something went wrong during the deployment, because the %s service failed %d time(s) since last deployment\", serviceName, deploymentStatus.FailedReplicas)\n\t}\n\n\tif deploymentStatus.UpdateStatus != nil && (deploymentStatus.UpdateStatus.State == swarm.UpdateStatePaused || deploymentStatus.UpdateStatus.State == swarm.UpdateStateRollbackCompleted || deploymentStatus.UpdateStatus.State == swarm.UpdateStateRollbackPaused) {\n\t\tdeploymentStatus.Err = fmt.Sprintf(\"Something went wrong during the deployment of the %s service. The error message is: %s\", serviceName, deploymentStatus.UpdateStatus.Message)\n\t}\n\n\treturn deploymentStatus, nil\n}", "func (m *ProgramControl) GetProgram()(Programable) {\n val, err := m.GetBackingStore().Get(\"program\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(Programable)\n }\n return nil\n}", "func (c *StateStore) Get(req *state.GetRequest) (*state.GetResponse, error) {\n\tkey := req.Key\n\n\titems := []CosmosItem{}\n\toptions := []documentdb.CallOption{documentdb.PartitionKey(req.Key)}\n\tif req.Options.Consistency == state.Strong {\n\t\toptions = append(options, documentdb.ConsistencyLevel(documentdb.Strong))\n\t}\n\tif req.Options.Consistency == state.Eventual {\n\t\toptions = append(options, documentdb.ConsistencyLevel(documentdb.Eventual))\n\t}\n\n\t_, err := c.client.QueryDocuments(\n\t\tc.collection.Self,\n\t\tdocumentdb.NewQuery(\"SELECT * FROM ROOT r WHERE r.id=@id\", documentdb.P{\"@id\", key}),\n\t\t&items,\n\t\toptions...,\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t} else if len(items) == 0 {\n\t\treturn nil, nil\n\t}\n\n\tb, err := jsoniter.ConfigFastest.Marshal(&items[0].Value)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &state.GetResponse{\n\t\tData: b,\n\t\tETag: items[0].Etag,\n\t}, nil\n}", "func GetValue(name string) (value interface{}, exists bool) {\n\tif log.RootLogger().TraceEnabled() {\n\t\tlog.RootLogger().Tracef(\"Getting App Value '%s': %v\", name)\n\t}\n\treturn appData.GetValue(name)\n}", "func (e *EurekaConnection) GetApp(name string) (*Application, error) {\n\tslug := fmt.Sprintf(\"%s/%s\", EurekaURLSlugs[\"Apps\"], name)\n\treqURL := e.generateURL(slug)\n\tlog.Debugf(\"Getting app %s from url %s\", name, reqURL)\n\tout, rcode, err := getBody(reqURL, e.UseJson)\n\tif err != nil {\n\t\tlog.Errorf(\"Couldn't get app %s, error: %s\", name, err.Error())\n\t\treturn nil, err\n\t}\n\tif rcode == 404 {\n\t\tlog.Errorf(\"App %s not found (received 404)\", name)\n\t\treturn nil, AppNotFoundError{specific: name}\n\t}\n\tif rcode > 299 || rcode < 200 {\n\t\tlog.Warningf(\"Non-200 rcode of %d\", rcode)\n\t}\n\n\tvar v *Application\n\tif e.UseJson {\n\t\tvar r GetAppResponseJson\n\t\terr = json.Unmarshal(out, &r)\n\t\tv = &r.Application\n\t} else {\n\t\terr = xml.Unmarshal(out, &v)\n\t}\n\tif err != nil {\n\t\tlog.Errorf(\"Unmarshalling error: %s\", err.Error())\n\t\treturn nil, err\n\t}\n\n\tv.ParseAllMetadata()\n\treturn v, nil\n}", "func GetDeployment(name, namespace string) *Deployment {\n\tfor _, v := range GetDeployments() {\n\t\tif v.Name == name && v.Namespace == namespace {\n\t\t\treturn v\n\t\t}\n\t}\n\treturn nil\n}", "func (w *DeploymentWrite) show(q *msg.Request, mr *msg.Result) {\n\tvar (\n\t\tinstanceConfigID, status, nextStatus string\n\t\tnewCurrentStatus, details, newNextStatus, deprovisionTask string\n\t\tstatusUpdateRequired, hasUpdate bool\n\t\terr error\n\t\tres sql.Result\n\t)\n\n\tif err = w.stmtGet.QueryRow(\n\t\tq.Deployment.ID,\n\t).Scan(\n\t\t&instanceConfigID,\n\t\t&status,\n\t\t&nextStatus,\n\t\t&details,\n\t); err == sql.ErrNoRows {\n\t\tmr.NotFound(err, q.Section)\n\t\treturn\n\t} else if err != nil {\n\t\tmr.ServerError(err, q.Section)\n\t\treturn\n\t}\n\n\tdepl := proto.Deployment{}\n\tif err = json.Unmarshal([]byte(details), &depl); err != nil {\n\t\tmr.ServerError(err, q.Section)\n\t\treturn\n\t}\n\n\t// returns true if there is a updated version blocked, ie.\n\t// after this deprovisioning a new version will be rolled out\n\t// -- statement always returns true or false, never null\n\tif err = w.stmtDeprovisionForUpdate.QueryRow(\n\t\tq.Deployment.ID,\n\t).Scan(\n\t\t&hasUpdate,\n\t); err != nil {\n\t\tmr.ServerError(err, q.Section)\n\t\treturn\n\t}\n\n\tswitch hasUpdate {\n\tcase false:\n\t\tdeprovisionTask = proto.TaskDelete\n\tdefault:\n\t\tdeprovisionTask = proto.TaskDeprovision\n\t}\n\n\tswitch status {\n\tcase proto.DeploymentAwaitingRollout:\n\t\tnewCurrentStatus = proto.DeploymentRolloutInProgress\n\t\tnewNextStatus = proto.DeploymentActive\n\t\tdepl.Task = proto.TaskRollout\n\t\tstatusUpdateRequired = true\n\tcase proto.DeploymentRolloutInProgress:\n\t\tdepl.Task = proto.TaskRollout\n\t\tstatusUpdateRequired = false\n\tcase proto.DeploymentActive:\n\t\tdepl.Task = proto.TaskRollout\n\t\tstatusUpdateRequired = false\n\tcase proto.DeploymentRolloutFailed:\n\t\tnewCurrentStatus = proto.DeploymentRolloutInProgress\n\t\tnewNextStatus = proto.DeploymentActive\n\t\tdepl.Task = proto.TaskRollout\n\t\tstatusUpdateRequired = true\n\tcase proto.DeploymentAwaitingDeprovision:\n\t\tnewCurrentStatus = proto.DeploymentDeprovisionInProgress\n\t\tnewNextStatus = proto.DeploymentDeprovisioned\n\t\tdepl.Task = deprovisionTask\n\t\tstatusUpdateRequired = true\n\tcase proto.DeploymentDeprovisionInProgress:\n\t\tdepl.Task = deprovisionTask\n\t\tstatusUpdateRequired = false\n\tcase proto.DeploymentDeprovisionFailed:\n\t\tnewCurrentStatus = proto.DeploymentDeprovisionInProgress\n\t\tnewNextStatus = proto.DeploymentDeprovisioned\n\t\tdepl.Task = deprovisionTask\n\t\tstatusUpdateRequired = true\n\tdefault:\n\t\t// the SQL query filters for the above statuses, a different\n\t\t// status should never appear\n\t\tmr.ServerError(\n\t\t\tfmt.Errorf(\n\t\t\t\t\"Impossible deployment state %s encountered\",\n\t\t\t\tstatus,\n\t\t\t),\n\t\t\tq.Section,\n\t\t)\n\t\treturn\n\t}\n\n\tif statusUpdateRequired {\n\t\tif res, err = w.stmtSetStatusUpdate.Exec(\n\t\t\tnewCurrentStatus,\n\t\t\tnewNextStatus,\n\t\t\tinstanceConfigID,\n\t\t); err != nil {\n\t\t\tmr.ServerError(err, q.Section)\n\t\t\treturn\n\t\t}\n\t\tif mr.RowCnt(res.RowsAffected()) {\n\t\t\tmr.Deployment = append(mr.Deployment, depl)\n\t\t}\n\t} else {\n\t\tmr.Deployment = append(mr.Deployment, depl)\n\t\tmr.OK()\n\t}\n}", "func (s *Store) Get(name string) (config transaction.Config, err error) {\n\tc, exists := s.data.Load(name)\n\tif !exists {\n\t\treturn config, store.ErrTransactionNotFound\n\t}\n\n\tconfig = c.(transaction.Config)\n\treturn config, nil\n\n}", "func (hc *Actions) Get(name string) (*release.Release, error) {\n\tactGet := action.NewGet(hc.Config)\n\treturn actGet.Run(name)\n}", "func Get(state string) (int, error) {\n\tif pop, ok := populations[state]; ok {\n\t\treturn pop, nil\n\t}\n\treturn 0, errors.New(\"State not found\")\n}", "func (d *dispatcher) Get(state string) *Task {\n\ttask, ok := d.Tasks[state]\n\tif !ok {\n\t\treturn &Task{\n\t\t\tHandler: NotFoundHandler,\n\t\t}\n\t}\n\treturn task\n}", "func (i *ibm) GetNodeState(node node.Node) (string, error) {\n\terr := loginToIBMCloud()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tcmd := fmt.Sprintf(\"ibmcloud ks worker get -w %s -c %s --output json\", node.Hostname, i.clusterConfig.ClusterName)\n\tstdout, stderr, err := osutils.ExecShell(cmd)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed node [%s] info. Error: %v %v %v\", node.Hostname, stderr, err, stdout)\n\t}\n\tworker := &Worker{}\n\terr = json.Unmarshal([]byte(stdout), worker)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn worker.Lifecycle.ActualState, nil\n}", "func GetFUOTADeployment(ctx context.Context, handler *store.Handler, id uuid.UUID, forUpdate bool) (FUOTADeployment, error) {\n\tres, err := handler.GetFUOTADeployment(ctx, id, forUpdate)\n\treturn FUOTADeployment(res), err\n}", "func (s *appInfoBaseline) GetAppInfo() (AppInfo, error) {\n\n\tinfo := AppInfo{}\n\tinfo.Labels = make(map[string]string)\n\n\tinfo.PodName = os.Getenv(\"MY_POD_NAME\") //custom defined in the deployment spec\n\t//time.Sleep(3*time.Second)\n\t//info.Namespace = os.Getenv(\"MY_POD_NAMESPACE\") //custom defined in the deployment spec\n\n\tfile, err := os.Open(\"/etc/labels\")\n\tif err != nil {\n\t\treturn info, err\n\t}\n\tdefer file.Close()\n\n\t//overkill, but read it fresh each time\n\treader := bufio.NewReader(file)\n\n\tfor {\n\t\tline, err := reader.ReadString('\\n')\n\n\t\t// check if the line has = sign\n\t\t// and process the line. Ignore the rest.\n\t\tif equal := strings.Index(line, \"=\"); equal >= 0 {\n\t\t\tif key := strings.TrimSpace(line[:equal]); len(key) > 0 {\n\t\t\t\tvalue := \"\"\n\t\t\t\tif len(line) > equal {\n\t\t\t\t\tvalue = strings.TrimSpace(line[equal+1:])\n\t\t\t\t}\n\n\t\t\t\tvalue = strings.Replace(value, \"\\\"\", \"\", -1)\n\t\t\t\tswitch key {\n\t\t\t\tcase \"app\":\n\t\t\t\t\tinfo.AppName = value\n\t\t\t\tcase \"release\":\n\t\t\t\t\tinfo.Release = value\n\t\t\t\tdefault:\n\t\t\t\t\tinfo.Labels[key] = value\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\treturn info, err\n\t\t}\n\t}\n\n\t//return info, fmt.Errorf(\"Forced error\")\n\treturn info, err\n}", "func (s DeploymentState) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s *inMemoryJobStore) Get(ctx context.Context, name string) (*v1.JobStatus, error) {\n\ts.mu.RLock()\n\tjob, ok := s.jobs[name]\n\ts.mu.RUnlock()\n\n\tif !ok {\n\t\treturn nil, ErrNotFound\n\t}\n\n\treturn &job, nil\n}", "func (m *SynchronizationJob) GetStatus()(SynchronizationStatusable) {\n val, err := m.GetBackingStore().Get(\"status\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(SynchronizationStatusable)\n }\n return nil\n}", "func (r *StatusREST) Get(ctx context.Context, name string, options *metav1.GetOptions) (runtime.Object, error) {\n\treturn r.store.Get(ctx, name, options)\n}", "func (m *SimulationAutomationRun) GetStatus()(*SimulationAutomationRunStatus) {\n val, err := m.GetBackingStore().Get(\"status\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*SimulationAutomationRunStatus)\n }\n return nil\n}", "func (client AppsClient) Get(ctx context.Context, appID uuid.UUID) (result ApplicationInfoResponse, err error) {\n\treq, err := client.GetPreparer(ctx, appID)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"programmatic.AppsClient\", \"Get\", nil, \"Failure preparing request\")\n\t\treturn\n\t}\n\n\tresp, err := client.GetSender(req)\n\tif err != nil {\n\t\tresult.Response = autorest.Response{Response: resp}\n\t\terr = autorest.NewErrorWithError(err, \"programmatic.AppsClient\", \"Get\", resp, \"Failure sending request\")\n\t\treturn\n\t}\n\n\tresult, err = client.GetResponder(resp)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"programmatic.AppsClient\", \"Get\", resp, \"Failure responding to request\")\n\t}\n\n\treturn\n}", "func (r *DeviceManagementScriptDeviceStateRequest) Get(ctx context.Context) (resObj *DeviceManagementScriptDeviceState, err error) {\n\tvar query string\n\tif r.query != nil {\n\t\tquery = \"?\" + r.query.Encode()\n\t}\n\terr = r.JSONRequest(ctx, \"GET\", query, nil, &resObj)\n\treturn\n}", "func (a *Client) GetDeployment(params *GetDeploymentParams, authInfo runtime.ClientAuthInfoWriter) (*GetDeploymentOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetDeploymentParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"get-deployment\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/deployments/{deployment_id}\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &GetDeploymentReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*GetDeploymentOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for get-deployment: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}", "func (b *backend) GetAppOperation(id string) (*storage.AppOperation, error) {\n\tvar op storage.AppOperation\n\tif err := b.getVal(b.key(appOperationsP, id, valP), &op); err != nil {\n\t\tif trace.IsNotFound(err) {\n\t\t\treturn nil, trace.NotFound(\"operation(%v) not found\", id)\n\t\t}\n\t\treturn nil, trace.Wrap(err)\n\t}\n\tutils.UTC(&op.Created)\n\tutils.UTC(&op.Updated)\n\treturn &op, nil\n}", "func (s *SecretManager) getCurrentState(namespace string, name string) (map[string][]byte, error) {\n\tcurrentState, err := s.kubernetes.ReadSecret(namespace, name)\n\tif err != nil {\n\t\tlogger.Debugf(\"failed to read '%s/%s' secret from kubernetes api: %v\", namespace, name, err)\n\t}\n\treturn currentState, err\n}", "func GetApplication(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *ApplicationState, opts ...pulumi.ResourceOption) (*Application, error) {\n\tvar resource Application\n\terr := ctx.ReadResource(\"alicloud:edas/application:Application\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (cache *LedisCacheStorage) GetApp(appID string) *core.App {\n\tdata, err := cache.db.Get([]byte(\"app:\" + appID))\n\n\tif err != nil {\n\t\t_, _ = fmt.Fprintf(os.Stderr, \"Ledis Cache: failed to parse cached app %v\\n\", err)\n\t\treturn nil\n\t}\n\n\tif data == nil {\n\t\treturn nil\n\t}\n\n\treturn &core.App{\n\t\tAppID: appID,\n\t\tName: string(data),\n\t}\n}", "func (taker *TakerGCP) GetApplication(rp *reportProject) (application *appengine.Application, err error) {\n\tgetResponse, getErr := taker.appEngine.Apps.Get(rp.gcpProject.ProjectId).Do()\n\tif getErr != nil {\n\t\t//\t\t\t\tlog.Println(\"cannot get application for project:\", appErr)\n\t\terr = getErr\n\t} else {\n\t\tapplication = getResponse\n\t}\n\treturn\n}", "func GetApp(host string, port int, configPath string, debug bool, logger zap.Logger, fast, test bool) *App {\n\tapp := &App{\n\t\tID: \"default\",\n\t\tTest: test,\n\t\tFast: fast,\n\t\tHost: host,\n\t\tPort: port,\n\t\tConfigPath: configPath,\n\t\tConfig: viper.New(),\n\t\tDebug: debug,\n\t\tLogger: logger,\n\t\tReadBufferSize: 30000,\n\t}\n\n\tapp.Configure()\n\treturn app\n}", "func get(c *cli.Context) error {\n\t// get org and repo information from cmd flags\n\torg, repo := c.String(\"org\"), c.String(\"repo\")\n\n\t// create a vela client\n\tclient, err := vela.NewClient(c.String(\"addr\"), nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// set token from global config\n\tclient.Authentication.SetTokenAuth(c.String(\"token\"))\n\n\t// set the page options based on user input\n\topts := &vela.ListOptions{\n\t\tPage: c.Int(\"page\"),\n\t\tPerPage: c.Int(\"per-page\"),\n\t}\n\n\tdeployments, _, err := client.Deployment.GetAll(org, repo, opts)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tswitch c.String(\"output\") {\n\tcase \"json\":\n\t\toutput, err := json.MarshalIndent(deployments, \"\", \" \")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfmt.Println(string(output))\n\n\tcase \"yaml\":\n\t\toutput, err := yaml.Marshal(deployments)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfmt.Println(string(output))\n\n\tcase \"wide\":\n\t\ttable := uitable.New()\n\t\ttable.MaxColWidth = 200\n\t\ttable.Wrap = true\n\t\t// spaces after status widen column for better readability\n\t\ttable.AddRow(\"ID\", \"TASK\", \"USER\", \"REF\", \"TARGET\", \"COMMIT\", \"URL\", \"DESCRIPTION\")\n\n\t\tfor _, d := range reverse(*deployments) {\n\t\t\ttable.AddRow(d.GetID(), d.GetTask(), d.GetUser(), d.GetRef(), d.GetTarget(), d.GetCommit(), d.GetURL(), d.GetDescription())\n\t\t}\n\n\t\tfmt.Println(table)\n\n\tdefault:\n\t\ttable := uitable.New()\n\t\ttable.MaxColWidth = 50\n\t\ttable.Wrap = true\n\n\t\ttable.AddRow(\"ID\", \"TASK\", \"USER\", \"REF\", \"TARGET\")\n\n\t\tfor _, d := range reverse(*deployments) {\n\t\t\ttable.AddRow(d.GetID(), d.GetTask(), d.GetUser(), d.GetRef(), d.GetTarget())\n\t\t}\n\n\t\tfmt.Println(table)\n\t}\n\n\treturn nil\n}", "func (b *Build) Get() error {\n\tappendum := \"/builds('\" + b.BuildID + \"')\"\n\tbody, err := b.Connector.Get(appendum)\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar jBuild jsonBuild\n\tjson.Unmarshal(body, &jBuild)\n\tb.RunState = jBuild.Build.RunState\n\tb.ResultState = jBuild.Build.ResultState\n\tb.Phase = jBuild.Build.Phase\n\tb.Entitytype = jBuild.Build.Entitytype\n\tb.Startedby = jBuild.Build.Startedby\n\tb.StartedAt = jBuild.Build.StartedAt\n\tb.FinishedAt = jBuild.Build.FinishedAt\n\treturn nil\n}", "func GetApplication(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *ApplicationState, opts ...pulumi.ResourceOption) (*Application, error) {\n\tvar resource Application\n\terr := ctx.ReadResource(\"google-native:appengine/v1beta:Application\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (c *Cluster) GetConfig(ctx context.Context, namespace string) (model.Configs, error) {\n\tresult := model.Configs{}\n\n\terr := c.Config()\n\n\tif err != nil {\n\t\treturn result, err\n\t}\n\n\tdata, err := c.ClientSet.AppsV1().Deployments(namespace).List(ctx, metav1.ListOptions{\n\t\tLabelSelector: fmt.Sprintf(\n\t\t\t\"%s=%s\",\n\t\t\t\"beetle.clivern.com/status\",\n\t\t\t\"enabled\",\n\t\t),\n\t})\n\n\tif err != nil {\n\t\treturn result, err\n\t}\n\n\tfor _, deployment := range data.Items {\n\t\tapplicationName := \"\"\n\t\timageFormat := \"\"\n\t\tapplicationID := \"\"\n\t\tstatus := \"disabled\"\n\n\t\tfor key, value := range deployment.ObjectMeta.Annotations {\n\t\t\tif key == \"beetle.clivern.com/application-name\" {\n\t\t\t\tapplicationName = value\n\t\t\t}\n\t\t\tif key == \"beetle.clivern.com/image-format\" {\n\t\t\t\timageFormat = value\n\t\t\t}\n\t\t}\n\t\tfor key, value := range deployment.ObjectMeta.Labels {\n\t\t\tif key == \"beetle.clivern.com/status\" {\n\t\t\t\tstatus = value\n\t\t\t}\n\t\t\tif key == \"beetle.clivern.com/application-id\" {\n\t\t\t\tapplicationID = value\n\t\t\t}\n\t\t}\n\n\t\tif status == \"enabled\" && applicationID != \"\" && imageFormat != \"\" {\n\t\t\tresult.Applications = append(result.Applications, model.App{\n\t\t\t\tID: applicationID,\n\t\t\t\tName: applicationName,\n\t\t\t\tImageFormat: imageFormat,\n\t\t\t})\n\t\t} else {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"application_id\": applicationID,\n\t\t\t}).Debug(`Application status disabled`)\n\t\t}\n\t}\n\n\tresult.Exists = true\n\n\treturn result, nil\n}", "func (s GetDeploymentOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (s GetDeploymentOutput) String() string {\n\treturn awsutil.Prettify(s)\n}", "func (c *Cache) GetWorkload(meta *api.ObjectMeta) *defs.VCHubWorkload {\n\t// checking for ok will prevent nil type conversion errors\n\tpObj, _ := c.pCache.Get(workloadKind, meta).(*defs.VCHubWorkload)\n\n\tvar smObj *ctkit.Workload\n\tif obj, err := c.stateMgr.Controller().FindObject(workloadKind, meta); err == nil {\n\t\tsmObj = obj.(*ctkit.Workload)\n\t}\n\tret, _ := mergeWorkload(pObj, smObj).(*defs.VCHubWorkload)\n\treturn ret\n}", "func (m *IndustryDataRunActivity) GetStatus()(*IndustryDataActivityStatus) {\n val, err := m.GetBackingStore().Get(\"status\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*IndustryDataActivityStatus)\n }\n return nil\n}", "func (t *task) getStage() remoteexecution.ExecutionStage_Value {\n\tif t.executeResponse != nil {\n\t\treturn remoteexecution.ExecutionStage_COMPLETED\n\t}\n\tif t.currentWorker != nil {\n\t\treturn remoteexecution.ExecutionStage_EXECUTING\n\t}\n\treturn remoteexecution.ExecutionStage_QUEUED\n}" ]
[ "0.60578436", "0.5990617", "0.5870273", "0.5749326", "0.5747082", "0.5648761", "0.5562212", "0.5540046", "0.54830396", "0.5407057", "0.5313466", "0.5292182", "0.5255391", "0.52393395", "0.52236474", "0.5207524", "0.51852775", "0.5173332", "0.5171748", "0.51617277", "0.51474947", "0.51406527", "0.5129676", "0.51245433", "0.5121962", "0.5076741", "0.5066071", "0.50643796", "0.50626326", "0.5061386", "0.50605893", "0.50355583", "0.5026525", "0.50200725", "0.5005195", "0.49979836", "0.49871242", "0.49835163", "0.49791524", "0.49744228", "0.4967578", "0.4959901", "0.49463847", "0.49382007", "0.4935134", "0.49329126", "0.49207607", "0.49167368", "0.49073318", "0.4900685", "0.49002483", "0.4892151", "0.48821512", "0.48804986", "0.48721308", "0.48623776", "0.48580998", "0.48573565", "0.4854207", "0.48375207", "0.4833572", "0.48298603", "0.48240292", "0.4813161", "0.48080823", "0.48043627", "0.48018497", "0.48018205", "0.4800483", "0.4796787", "0.47896817", "0.47823912", "0.4776813", "0.4776603", "0.4772689", "0.47689727", "0.47647822", "0.47623405", "0.4761696", "0.47608545", "0.47535023", "0.47508228", "0.47487488", "0.4746465", "0.4743892", "0.4739522", "0.47379905", "0.47370133", "0.47342393", "0.47335494", "0.47330007", "0.47286716", "0.47227752", "0.47211453", "0.47209066", "0.47179842", "0.47179842", "0.47094828", "0.4708518", "0.47016668" ]
0.7768766
0
Uses figlet to generate ascii art text
func Figlet(msg string) (string, error) { cmd := exec.Command(figletcmd, msg) b, err := cmd.Output() if err != nil { return msg, err } return string(b), nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Figlet(msg string) (string, error) {\n\t// Figlet4go also support using figlet fonts (/usr/share/figlet/*.flf).\n\t// These also supports lowercase characters.\n\t// Look at slant.flf and big.flf, for instance.\n\t// TODO: Use big.flf, if available\n\tar := figlet4go.NewAsciiRender()\n\treturn ar.Render(msg)\n}", "func ShowArt() {\r\n\tfmt.Println(asciiArt)\r\n}", "func (c *Canvas) EText(x, y float64, size float64, s string, color color.RGBA) {\n\tx, y = dimen(x, y, c.Width, c.Height)\n\tsize = pct(size, c.Width)\n\tAbsTextEnd(c.Container, int(x), int(y), s, int(size), color)\n}", "func (win *window) Text(pt image.Point, text string) {\n\tif len(text) == 0 {\n\t\treturn\n\t}\n\tif len(text) > 128 {\n\t\ttext = text[:128]\n\t}\n\ttx := utf16.Encode([]rune(text))\n\ttx16 := make([]xgb.Char2b, len(tx))\n\tfor i, v := range tx {\n\t\ttx16[i].Byte1 = byte(v >> 8)\n\t\ttx16[i].Byte2 = byte(v)\n\t}\n\tx16, y16 := int16(pt.X), int16(pt.Y+9)\n\txwin.ImageText16(win.id, win.gc, x16, y16, tx16)\n}", "func label(deck *generate.Deck, x, y float64, s, font string, size, ls float64, color string) {\n\tlines := strings.Split(s, \"\\n\")\n\tfor _, t := range lines {\n\t\tdeck.Text(x, y, t, font, size, color)\n\t\ty -= ls\n\t}\n}", "func DrawText(x, y int, str string) {\n\tRawEIPS(strconv.Itoa(x), strconv.Itoa(y), str)\n}", "func drawASCII(text string) {\n\tio.Copy(os.Stdout, strings.NewReader(text))\n}", "func PrintArt(letters []string, letnum int) {\n\tfor i := 0; i < 8; i++ {\n\t\tj := 8\n\t\tfmt.Print(letters[i])\n\t\tif len(letters) > 8 {\n\t\t\tfor k := 1; k < letnum; k++ {\n\t\t\t\tfmt.Print(letters[i+j])\n\t\t\t\tif k == letnum-1 {\n\t\t\t\t\tfmt.Print(\"\\n\")\n\t\t\t\t}\n\t\t\t\tj += 8\n\t\t\t}\n\t\t} else {\n\t\t\tfmt.Print(\"\\n\")\n\t\t}\n\t}\n}", "func (c *Canvas) Text(x, y float64, size float64, s string, color color.RGBA) {\n\tx, y = dimen(x, y, c.Width, c.Height)\n\tsize = pct(size, c.Width)\n\tAbsText(c.Container, int(x), int(y), s, int(size), color)\n}", "func (this *Text) Visualize() {\n\tgraphicContext := GetTheImageGraphicContext()\n\tgraphicContext.SetFontSize(this.FontSize)\n\tgraphicContext.SetFontData(this.FontData)\n\n\t_, top, _, bottom := graphicContext.GetStringBounds(this.Text)\n\tgraphicContext.Translate(0, bottom-top)\n\n\tif this.isFilled {\n\t\tgraphicContext.FillString(this.Text)\n\t} else {\n\t\tgraphicContext.StrokeString(this.Text)\n\t}\n}", "func Text(img, lang string) ([]byte, error) {\n\tif lang == \"\" {\n\t\tlang = \"chi_sim\"\n\t}\n\treturn exc.NewCMDf(\"tesseract %s stdout -l %s\", img, lang).Do()\n}", "func Text(x, y, width, height int, fg, bg termbox.Attribute, text string) {\n\tj := -1\n\tscanner := bufio.NewScanner(strings.NewReader(text))\n\tscanner.Split(bufio.ScanLines)\n\tfor scanner.Scan() {\n\t\tj++\n\t\tif height > 0 && j >= height {\n\t\t\tbreak\n\t\t}\n\t\tLabel(x, y+j, width, fg, bg, scanner.Text())\n\t}\n\t// Fill the rest of the height\n\tfor j = j + 1; height > 0 && j < height; j++ {\n\t\tLabel(x, y+j, width, fg, bg, \"\")\n\t}\n}", "func PrettyPrints(print int) {\r\n\t/*this section is going to define all my big pretty printing and follow with a simple switch that chooses the particular graphic by its integer number associated with it.\r\n\tthe images come out distorted without modifying them here piece by piece so they look weird here but they look great in use.*/\r\n\r\n\t//this graphic and a few other presented weird so adjustments were made to achieve correct appearance\r\n\tvar pridePic string = `\r\n\t _____ _____ _____ _____\r\n\t| | | __| _ | | __|___ _____ ___ ___\r\n\t| | | |__ | | | | | .'| | -_|_ -|\r\n\t|_|___|_____|__|__| |_____|__,|_|_|_|___|___|\r\n\t _____ _\r\n\t\t| _ |___ ___ ___ ___ ___| |_ ___\r\n\t\t| __| _| -_|_ -| -_| | _|_ -|\r\n\t\t|__| |_| |___|___|___|_|_|_| |___|\r\n`\r\n\tvar titlePic string = `\r\n\t _____ _ _\r\n\t| _ |___ ___ |_|___ ___| |_\r\n\t| __| _| . | | | -_| _| _|\r\n\t|__| |_| |___|_| |___|___|_|\r\n\t\t |___|\r\n\t \t _ _\r\n\t\t _| | |_ ___\r\n\t |_ _| |_ |\r\n\t\t |_ _| | _|\r\n\t\t |_|_| |___|\r\n`\r\n\r\n\tvar spellPic string = `\r\n /\\\r\n / \\\r\n | |\r\n --:'''':--\r\n :'_' :\r\n _:\"\":\\___\r\n ' ' ____.' ::: '._\r\n . *=====<<=) \\ :\r\n . ' '-'-'\\_ /'._.'\r\n \\====:_ \"\"\r\n .' \\\\\r\n : :\r\n / : \\\r\n : . '.\r\n ,. _ : : : :\r\n '-' ). :__:-:__.;--'\r\n ( ' ) '-' '-'\r\n ( - .00. - _\r\n( .' O ) )\r\n'- ()_.\\,\\, -\r\n`\r\n\t//\r\n\tvar swordPic string = `\r\n /\r\nO===[====================-\r\n \\\r\n`\r\n\r\n\tvar shieldPic string = `\r\n\t\\_ _/\r\n\t] --__________-- [\r\n\t| || |\r\n\t\\ || /\r\n\t [ || ]\r\n\t |______||______|\r\n\t |------..------|\r\n\t ] || [\r\n\t \\ || /\r\n\t [ || ]\r\n\t \\ || /\r\n\t [ || ]\r\n\t \\__||__/\r\n\t --\r\n\t`\r\n\tvar winPic string = `\r\n\t __ __ _ _ _ __\r\n\t| | |___ _ _ | | | |___ ___| |\r\n\t|_ _| . | | | | | | | . | |__|\r\n\t |_| |___|___| |_____|___|_|_|__|\r\n\t`\r\n\r\n\tvar losePic string = `\r\n\t __ __ ____ _ _\r\n\t| | |___ _ _ | \\|_|___ _| |\r\n\t|_ _| . | | | | | | | -_| . |\r\n\t |_| |___|___| |____/|_|___|___|\r\n\t`\r\n\tvar tiePic string = `\r\n\t _____ _\r\n\t|_ _|_|___\r\n\t | | | | -_|\r\n\t |_| |_|___|\r\n\t`\r\n\tvar sssPic string = `\r\n\t _____ _\r\n\t| __|_ _ _ ___ ___ _| |\r\n\t|__ | | | | . | _| . |\r\n\t|_____|_____|___|_| |___|\r\n\t _____ _ _ _ _\r\n\t| __| |_|_|___| |_| |\r\n\t|__ | | | -_| | . |\r\n\t|_____|_|_|_|___|_|___|\r\n\t _____ _ _\r\n\t| __|___ ___| | |\r\n\t|__ | . | -_| | |\r\n\t|_____| _|___|_|_|\r\n\t\t|_|\r\n\t`\r\n\tswitch print {\r\n\tcase 1:\r\n\t\tfmt.Printf(\"%s\\n\", pridePic)\r\n\tcase 2:\r\n\t\tfmt.Printf(\"%s\\n\", titlePic)\r\n\tcase 3:\r\n\t\tfmt.Printf(\"%s\\n\", spellPic)\r\n\tcase 4:\r\n\t\tfmt.Printf(\"%s\\n\", swordPic)\r\n\tcase 5:\r\n\t\tfmt.Printf(\"%s\\n\", shieldPic)\r\n\tcase 6:\r\n\t\tfmt.Printf(\"%s\\n\", winPic)\r\n\tcase 7:\r\n\t\tfmt.Printf(\"%s\\n\", losePic)\r\n\tcase 8:\r\n\t\tfmt.Printf(\"%s\\n\", sssPic)\r\n\tcase 9:\r\n\t\tfmt.Printf(\"%s\\n\", tiePic)\r\n\r\n\t}\r\n}", "func PrintItalicLogo() {\n\tfmt.Println(`\n// // //////////// ////////////\n // // // // ////// //////// // // // // //\n // // /// // // // // // //\n // // // // // // // // ////// // //\n // // // // // // // // // // // //\n // // // // //////// // ///////////// ///////////\n\n ////////// // \n // ////// // //////// // \n // // // //////// // // // /////// \n //////// ////////// // // /// // \n // // // // // // // \n // ////// // //////// // // \n\n // // // \n // // ////// ////// // // ////// // /////\n // // // // // // // //////// // /////// // // /// \n // // // // ////////// // // // /// // ////////// //\n //// //// // // // // // // // //\n // // ////// ///////// // // // ////// //\n `)\n}", "func ascii() {\n\tfmt.Printf(\"\\n\")\n\tcolor.Set(color.FgGreen, color.Bold)\n\tfmt.Printf(\"| _ _ _ .\\n\")\n\tfmt.Printf(\"|( (_| | (_| |\\n\")\n}", "func drawText(dst draw.Image, p image.Point, t string) {\n\tconst base = 0x21\n\tr := image.Rect(0, 0, 7, 13).Add(p)\n\tu := image.Uniform{C: image1bit.On}\n\tfor _, c := range t {\n\t\tif c >= base && int(c-base) < len(glyphs) {\n\t\t\tdraw.DrawMask(dst, r, &u, image.Point{}, &glyphs[c-base], image.Point{}, draw.Over)\n\t\t}\n\t\tr = r.Add(image.Point{7, 0})\n\t}\n}", "func main() {\n\tpdf := gopdf.GoPdf{}\n\tpdf.Start(gopdf.Config{Unit: gopdf.Unit_PT, PageSize: gopdf.Rect{W: 595.28, H: 841.89}}) //595.28, 841.89 = A4\n\tpdf.AddPage()\n\n\terr := pdf.AddTTFFont(\"DejaVuSerif\", \"../ttf/DejaVuSerif.ttf\")\n\tif err != nil {\n\t\tlog.Print(err.Error())\n\t\treturn\n\t}\n\terr = pdf.AddTTFFont(\"DejaVuSerif-Italic\", \"../ttf/DejaVuSerif-Italic.ttf\")\n\tif err != nil {\n\t\tlog.Print(err.Error())\n\t\treturn\n\t}\n\n\terr = pdf.SetFont(\"DejaVuSerif\", \"\", 14)\n\tif err != nil {\n\t\tlog.Print(err.Error())\n\t\treturn\n\t}\n\tpdf.Cell(nil, \"Hi! This is nomal.\")\n\tpdf.Br(20)\n\n\terr = pdf.SetFont(\"DejaVuSerif-Italic\", \"\", 14)\n\tif err != nil {\n\t\tlog.Print(err.Error())\n\t\treturn\n\t}\n\tpdf.Cell(nil, \"Hi! This is italic.\")\n\tpdf.WritePdf(\"italic.pdf\")\n}", "func (e *Escpos) PrintTextImage(text string, fontFilePath string, fontSize float64, lineSpacing float64, dpi float64, imageHeight int, useFullHinting bool, useWhiteBackground bool) error {\n\t// flag.Parse()\n\t// Read the font data.\n\tfontBytes, err := ioutil.ReadFile(fontFilePath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tf, err := freetype.ParseFont(fontBytes)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Initialize the context.\n\tfg, bg := image.Black, image.White\n\truler := color.RGBA{0xdd, 0xdd, 0xdd, 0xff}\n\tif useWhiteBackground {\n\t\tfg, bg = image.White, image.Black\n\t\truler = color.RGBA{0x22, 0x22, 0x22, 0xff}\n\t}\n\trgba := image.NewRGBA(image.Rect(0, 0, 760, imageHeight))\n\tdraw.Draw(rgba, rgba.Bounds(), bg, image.Point{}, draw.Src)\n\tc := freetype.NewContext()\n\tc.SetDPI(dpi)\n\tc.SetFont(f)\n\tc.SetFontSize(fontSize)\n\tc.SetClip(rgba.Bounds())\n\tc.SetDst(rgba)\n\tc.SetSrc(fg)\n\tif useFullHinting {\n\t\tc.SetHinting(font.HintingFull)\n\t} else {\n\t\tc.SetHinting(font.HintingNone)\n\t}\n\n\t// Draw the guidelines.\n\tfor i := 0; i < 200; i++ {\n\t\trgba.Set(10, 10+i, ruler)\n\t\trgba.Set(10+i, 10, ruler)\n\t}\n\n\t// Draw the text.\n\tpt := freetype.Pt(10, 10+int(c.PointToFixed(fontSize)>>6))\n\t_, err = c.DrawString(text, pt)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpt.Y += c.PointToFixed(fontSize * lineSpacing)\n\n\tvar data []byte\n\tb := bufio.NewWriter(bytes.NewBuffer(data))\n\terr = png.Encode(b, rgba)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = b.Flush()\n\tif err != nil {\n\t\treturn err\n\t}\n\te.PrintImageFromBytes(data)\n\treturn nil\n}", "func Image2AsciiArt(img image.Image, chars []byte, width, height int) []byte {\n\timg = imaging.Resize(img, width, height, imaging.Lanczos)\n\trect := img.Bounds()\n\tbuf := new(bytes.Buffer)\n\tfor y := rect.Min.Y; y < rect.Max.Y; y++ {\n\t\tfor x := rect.Min.X; x < rect.Max.X; x++ {\n\t\t\tvar c uint8\n\t\t\tif len(chars) <= 256 {\n\t\t\t\tg := ColorToGray(img.At(x, y))\n\t\t\t\tc = GrayToChar(g, chars)\n\t\t\t} else {\n\t\t\t\tg := ColorToGray16(img.At(x, y))\n\t\t\t\tc = Gray16ToChar(g, chars)\n\t\t\t}\n\t\t\tbuf.WriteByte(c)\n\t\t}\n\t\tbuf.WriteByte('\\n')\n\t}\n\treturn buf.Bytes()\n}", "func (w *SVGWriter) Text(text string, x, y float64) {\n\tw.appendElement(&textag{x, y, text})\n}", "func AbsText(cont *fyne.Container, x, y int, s string, size int, color color.RGBA) {\n\tfx, fy, fsize := float32(x), float32(y), float32(size)\n\tt := &canvas.Text{Text: s, Color: color, TextSize: fsize}\n\tadj := fsize / 5\n\tp := fyne.Position{X: fx, Y: fy - (fsize + adj)}\n\tt.Move(p)\n\tcont.AddObject(t)\n}", "func (r renderer) TitleBlock(out *bytes.Buffer, text []byte) {}", "func render(ctx Config, com Component, gfx *dot.Graph) *dot.Node {\n\n\timg := iconPath(ctx, com)\n\n\tif fc := strings.TrimSpace(com.FontColor); len(fc) == 0 {\n\t\tcom.FontColor = \"#000000ff\"\n\t}\n\n\tif imp := strings.TrimSpace(com.Impl); len(imp) == 0 {\n\t\tcom.Impl = \"&nbsp;\"\n\t}\n\n\tvar sb strings.Builder\n\tsb.WriteString(`<table border=\"0\" cellborder=\"0\">`)\n\tif ctx.showImpl {\n\t\tfmt.Fprintf(&sb, `<tr><td><font point-size=\"8\">%s</font></td></tr>`, com.Impl)\n\t}\n\n\tsb.WriteString(\"<tr>\")\n\tfmt.Fprintf(&sb, `<td fixedsize=\"true\" width=\"50\" height=\"50\"><img src=\"%s\" /></td>`, img)\n\tsb.WriteString(\"</tr>\")\n\n\tlabel := \"&nbsp;\"\n\tif s := strings.TrimSpace(com.Label); len(s) > 0 {\n\t\tlabel = s\n\t}\n\tfmt.Fprintf(&sb, `<tr><td><font point-size=\"7\">%s</font></td></tr>`, label)\n\tsb.WriteString(\"</table>\")\n\n\treturn node.New(gfx, com.ID,\n\t\tnode.Label(sb.String(), true),\n\t\tnode.FillColor(\"transparent\"),\n\t\tnode.Shape(\"plain\"),\n\t)\n}", "func (el *Fill) Text() {}", "func Title(){\n fmt.Printf(\"\\n\\n\\n\\n\\n\")\n\n fmt.Printf(\" ████████╗██╗ ██╗███████╗ ███████╗███████╗██╗ ██╗███████╗██████╗\\n\")\n fmt.Printf(\" ╚══██╔══╝██║ ██║██╔════╝ ██╔════╝██╔════╝██║ ██║██╔════╝██╔══██╗\\n\")\n fmt.Printf(\" ██║ ███████║█████╗ ███████╗█████╗ ██║ █╗ ██║█████╗ ██████╔╝\\n\")\n fmt.Printf(\" ██║ ██╔══██║██╔══╝ ╚════██║██╔══╝ ██║███╗██║██╔══╝ ██╔══██╗\\n\")\n fmt.Printf(\" ██║ ██║ ██║███████╗ ███████║███████╗╚███╔███╔╝███████╗██║ ██║\\n\")\n fmt.Printf(\" ╚═╝ ╚═╝ ╚═╝╚══════╝ ╚══════╝╚══════╝ ╚══╝╚══╝ ╚══════╝╚═╝ ╚═╝\\n\")\n\n\n}", "func (t *TextRenderer) Print(text string, x, y float32, scale float32) error {\n\tindices := []rune(text)\n\tif len(indices) == 0 {\n\t\treturn nil\n\t}\n\tt.shader.Use()\n\n\tlowChar := rune(32)\n\n\tgl.ActiveTexture(gl.TEXTURE0)\n\tgl.BindVertexArray(t.vao)\n\n\tfor i := range indices {\n\t\truneIndex := indices[i]\n\n\t\tif int(runeIndex)-int(lowChar) > len(t.fontChar) || runeIndex < lowChar {\n\t\t\tcontinue\n\t\t}\n\n\t\tch := t.fontChar[runeIndex-lowChar]\n\n\t\txpos := x + float32(ch.bearingH)*scale\n\t\typos := y - float32(ch.height-ch.bearingV)*scale\n\t\tw := float32(ch.width) * scale\n\t\th := float32(ch.height) * scale\n\n\t\tvar vertices = []float32{\n\t\t\txpos, ypos + h, 0.0, 1.0,\n\t\t\txpos + w, ypos, 1.0, 0.0,\n\t\t\txpos, ypos, 0.0, 0.0,\n\t\t\txpos, ypos + h, 0.0, 1.0,\n\t\t\txpos + w, ypos + h, 1.0, 1.0,\n\t\t\txpos + w, ypos, 1.0, 0.0,\n\t\t}\n\n\t\tgl.BindTexture(gl.TEXTURE_2D, ch.textureID)\n\t\tgl.BindBuffer(gl.ARRAY_BUFFER, t.vbo)\n\t\tgl.BufferSubData(gl.ARRAY_BUFFER, 0, len(vertices)*4, gl.Ptr(vertices))\n\n\t\tgl.BindBuffer(gl.ARRAY_BUFFER, 0)\n\t\tgl.DrawArrays(gl.TRIANGLES, 0, 6)\n\n\t\tx += float32(ch.advance>>6) * scale\n\t}\n\tgl.BindVertexArray(0)\n\tgl.BindTexture(gl.TEXTURE_2D, 0)\n\tgl.UseProgram(0)\n\treturn nil\n}", "func (g *Gopher) String() string { return \"`( ◔ ౪◔)´\" }", "func (vr *vectorRenderer) Text(body string, x, y int) {\n\tstyle := vr.s.SVGText()\n\tvr.c.Text(x, y, body, &style)\n}", "func (d *Draw) Text(text string, p Point, color sdl.Color, size int) {\n\tsurf, err := font.Size(size).RenderUTF8Blended(text, color)\n\tif err != nil {\n\t\td.err(err, \"error on render text\")\n\t\treturn\n\t}\n\tdefer surf.Free()\n\ttextTexture, err := d.renderer.CreateTextureFromSurface(surf)\n\tif err != nil {\n\t\td.err(err, \"error on create texture\")\n\t\treturn\n\t}\n\tdefer textTexture.Destroy()\n\terr = d.renderer.Copy(textTexture, nil, &sdl.Rect{X: p.X, Y: p.Y, W: surf.W, H: surf.H})\n\tif err != nil {\n\t\td.err(err, \"error on copy texture to surface\")\n\t\treturn\n\t}\n}", "func (m Cuepoint) Text() string {\n\treturn string(m)\n}", "func RenderTextToImage(text []string, w, h int, fntSize float64, fnt *truetype.Font, textColor color.Color) (*ebiten.Image, error) {\n\n\tdrawImage, err := ebiten.NewImage(w, h, ebiten.FilterNearest)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\t//w, h := t.GetSize()\n\tdst := image.NewRGBA(image.Rect(0, 0, w, h))\n\t//const size = 24\n\tconst dpi = 72\n\td := &font.Drawer{\n\t\tDst: dst,\n\t\tSrc: image.NewUniform(textColor), //image.White,\n\t\tFace: truetype.NewFace(fnt, &truetype.Options{\n\t\t\tSize: fntSize,\n\t\t\tDPI: dpi,\n\t\t\tHinting: font.HintingNone,\n\t\t}),\n\t}\n\t/*highlight := color.White\n\tif t.textColor != color.Black {\n\t\thighlight = color.Black\n\t}\n\td2 := &font.Drawer{\n\t\tDst: dst,\n\t\tSrc: image.NewUniform(highlight),\n\t\tFace: truetype.NewFace(t.font, &truetype.Options{\n\t\t\tSize: t.fntSize,\n\t\t\tDPI: t.fntDpi,\n\t\t\tHinting: font.HintingFull,\n\t\t}),\n\t}*/\n\ty := fntSize\n\tfor _, s := range text {\n\t\t//d2.Dot = fixed.P(+1, int(y+1))\n\t\t//d2.DrawString(s)\n\t\td.Dot = fixed.P(0, int(y))\n\t\td.DrawString(s)\n\t\ty += fntSize\n\t}\n\n\terr = drawImage.ReplacePixels(dst.Pix)\n\treturn drawImage, err\n\n}", "func encodeText(text string) (string, error) {\n\tif len(text) > 26 {\n\t\treturn \"\", fmt.Errorf(\"Maximum text encoding length is 25 characters.\")\n\t}\n\tcoords := make(Coords, 0)\n\tfontSize := 16.0\n\tkerning := 16.0\n\tlineHeight := 1.25\n\tfor n, c := range text {\n\t\tax := fontSize * float64((n % 13) + 1) + (kerning * float64(n % 13))\n\t\tvar ay float64\n\t\tif len(text) <= 13 {\n\t\t\tay = IMG_HEIGHT / 2.0\n\t\t} else {\n\t\t\tay = IMG_HEIGHT / 3.0 + IMG_HEIGHT / 3.0 * float64(n / 13) * lineHeight\n\t\t}\n\t\tsegments, err := CharacterSegments(c)\n\t\tfor i := 0; i < len(segments); i += 1 {\n\t\t\tif segments[i] == DIVIDER {\n\t\t\t\tcoords = append(coords, EMPTY_POINT)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tx := math.Round(scale(int(ax + segments[i].x * fontSize), IMG_WIDTH, HRM_MAX))\n\t\t\ty := math.Round(scale(int(ay - segments[i].y * fontSize), IMG_HEIGHT, HRM_MAX))\n\t\t\tcoords = append(coords, [2]float64{x, y})\n\t\t}\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tif coords[len(coords) - 1] != EMPTY_POINT {\n\t\t\tcoords = append(coords, EMPTY_POINT)\n\t\t}\n\t}\n\treturn encodeComment(coords)\n}", "func (a ASCIIArt) String() string {\n\t// TODO: could use Builder here?\n\tresult := \"\"\n\n\tfor _, x := range a {\n\t\tresult += strings.Join(x, \"\") + \"\\n\"\n\t}\n\n\treturn result\n}", "func drawText(ctx *gg.Context, text string, x float64, y float64, ax float64, ay float64, divisor float64) {\n\ttext = strings.ToUpper(text)\n\twidth := float64(ctx.Width()) - (IMAGE_MARGIN * 2)\n\theight := float64(ctx.Height()) / divisor\n\tcalculateFontSize(ctx, text, width, height)\n\n\t// Draw the text border.\n\tctx.SetHexColor(\"#000\")\n\tfor angle := 0.0; angle < (2 * math.Pi); angle += 0.35 {\n\t\tbx := x + (math.Sin(angle) * FONT_BORDER_RADIUS)\n\t\tby := y + (math.Cos(angle) * FONT_BORDER_RADIUS)\n\t\tctx.DrawStringWrapped(text, bx, by, ax, ay, width, FONT_LEADING, gg.AlignCenter)\n\t}\n\n\t// Draw the text itself.\n\tctx.SetHexColor(\"#FFF\")\n\tctx.DrawStringWrapped(text, x, y, ax, ay, width, FONT_LEADING, gg.AlignCenter)\n}", "func GetText(data []string) string {\n\tpoints := parseData(data)\n\tbounds := boundaries{math.MaxInt64 / 4, math.MinInt64 / 4, math.MaxInt64 / 4, math.MinInt64 / 4}\n\tdidGrow := false\n\tfor !didGrow {\n\t\tpoints, bounds, didGrow = simulate(points, bounds)\n\t}\n\tprintState(points, bounds)\n\treturn \"\"\n}", "func letters(abc *st.Art) {\n\t// count text blocks\n\tl := chk.Lines(abc)\n\tabc.Output.Final = make([]string, l)\n\tabc.Output.Index = 8\n\t// if method coloring all words, add color to the begining of the line\n\tif abc.Flag.Color.MethodColoring == \"all\" {\n\t\tcl.AddStartColor(abc)\n\t}\n\n\tindex := 0\n\tfor _, symbol := range abc.Text.Rune {\n\t\tif symbol == '\\n' {\n\t\t\tif abc.Alphabet.Letter != 0 && abc.Alphabet.Letter != '\\n' {\n\t\t\t\tabc.Output.Index += 9 // 8 lines text + 1 newline\n\t\t\t} else {\n\t\t\t\tabc.Output.Index += 1\n\t\t\t}\n\t\t\tabc.Alphabet.Letter = symbol\n\t\t\tcontinue\n\t\t}\n\t\t// remember current letter\n\t\tabc.Alphabet.Letter = symbol\n\t\t// by default coloring is false\n\t\tabc.Alphabet.Coloring = false\n\t\t// coloring for letter by letter\n\t\tif symbol != ' ' && symbol != '\\t' {\n\t\t\tif abc.Flag.Color.MethodColoring == \"or\" {\n\t\t\t\tif abc.Flag.Color.MethodBy == \"bySymbol\" {\n\t\t\t\t\tif findOrSymbol(abc, symbol) {\n\t\t\t\t\t\t// coloring in range between one letter\n\t\t\t\t\t\tabc.Flag.Color.BySymbol.Range1, abc.Flag.Color.BySymbol.Range2 = symbol, symbol\n\t\t\t\t\t} else {\n\t\t\t\t\t\t// clear range for the next one\n\t\t\t\t\t\tabc.Flag.Color.BySymbol.Range1, abc.Flag.Color.BySymbol.Range2 = 0, 0\n\t\t\t\t\t}\n\t\t\t\t} else if abc.Flag.Color.MethodBy == \"byIndex\" {\n\t\t\t\t\tif findOrIndex(abc, index) {\n\t\t\t\t\t\t// coloring in range between one letter\n\t\t\t\t\t\tabc.Flag.Color.ByIndex.Range1, abc.Flag.Color.ByIndex.Range2 = index, index\n\t\t\t\t\t} else {\n\t\t\t\t\t\t// clear range for the next one\n\t\t\t\t\t\tabc.Flag.Color.ByIndex.Range1, abc.Flag.Color.ByIndex.Range2 = -1, -1\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tif abc.Flag.Color.MethodBy == \"bySymbol\" {\n\t\t\t\t// if choosen letter in range - make color\n\t\t\t\tif symbol >= abc.Flag.Color.BySymbol.Range1 && symbol <= abc.Flag.Color.BySymbol.Range2 {\n\t\t\t\t\tabc.Alphabet.Coloring = true\n\t\t\t\t}\n\t\t\t} else if abc.Flag.Color.MethodBy == \"byIndex\" {\n\t\t\t\t// if choosen letter in range - make color\n\t\t\t\tif index >= abc.Flag.Color.ByIndex.Range1 && index <= abc.Flag.Color.ByIndex.Range2 {\n\t\t\t\t\tabc.Alphabet.Coloring = true\n\t\t\t\t}\n\t\t\t}\n\t\t\tindex++\n\t\t}\n\t\tmakeOutputByMethod(abc)\n\t}\n}", "func PNG(filename string, l Level, text string) error {\n\treturn NewPNGWriter(l).QRFile(filename, text)\n}", "func (complexShaperThai) preprocessText(plan *otShapePlan, buffer *Buffer, font *Font) {\n\t/* The following is NOT specified in the MS OT Thai spec, however, it seems\n\t* to be what Uniscribe and other engines implement. According to Eric Muller:\n\t*\n\t* When you have a SARA AM, decompose it in NIKHAHIT + SARA AA, *and* move the\n\t* NIKHAHIT backwards over any tone mark (0E48-0E4B).\n\t*\n\t* <0E14, 0E4B, 0E33> . <0E14, 0E4D, 0E4B, 0E32>\n\t*\n\t* This reordering is legit only when the NIKHAHIT comes from a SARA AM, not\n\t* when it's there to start with. The string <0E14, 0E4B, 0E4D> is probably\n\t* not what a user wanted, but the rendering is nevertheless nikhahit above\n\t* chattawa.\n\t*\n\t* Same for Lao.\n\t*\n\t* Note:\n\t*\n\t* Uniscribe also does some below-marks reordering. Namely, it positions U+0E3A\n\t* after U+0E38 and U+0E39. We do that by modifying the ccc for U+0E3A.\n\t* See unicode.modified_combining_class (). Lao does NOT have a U+0E3A\n\t* equivalent.\n\t */\n\n\t/*\n\t* Here are the characters of significance:\n\t*\n\t*\t\t\tThai\tLao\n\t* SARA AM:\t\tU+0E33\tU+0EB3\n\t* SARA AA:\t\tU+0E32\tU+0EB2\n\t* Nikhahit:\t\tU+0E4D\tU+0ECD\n\t*\n\t* Testing shows that Uniscribe reorder the following marks:\n\t* Thai:\t<0E31,0E34..0E37,0E47..0E4E>\n\t* Lao:\t<0EB1,0EB4..0EB7,0EC7..0ECE>\n\t*\n\t* Note how the Lao versions are the same as Thai + 0x80.\n\t */\n\n\tbuffer.clearOutput()\n\tcount := len(buffer.Info)\n\tfor buffer.idx = 0; buffer.idx < count; {\n\t\tu := buffer.cur(0).codepoint\n\t\tif !isSaraAm(u) {\n\t\t\tbuffer.nextGlyph()\n\t\t\tcontinue\n\t\t}\n\n\t\t/* Is SARA AM. Decompose and reorder. */\n\t\tbuffer.outputRune(nikhahitFromSaraAm(u))\n\t\tbuffer.prev().setContinuation()\n\t\tbuffer.replaceGlyph(saraAaFromSaraAm(u))\n\n\t\t/* Make Nikhahit be recognized as a ccc=0 mark when zeroing widths. */\n\t\tend := len(buffer.outInfo)\n\t\tbuffer.outInfo[end-2].setGeneralCategory(nonSpacingMark)\n\n\t\t/* Ok, let's see... */\n\t\tstart := end - 2\n\t\tfor start > 0 && isToneMark(buffer.outInfo[start-1].codepoint) {\n\t\t\tstart--\n\t\t}\n\n\t\tif start+2 < end {\n\t\t\t/* Move Nikhahit (end-2) to the beginning */\n\t\t\tbuffer.mergeOutClusters(start, end)\n\t\t\tt := buffer.outInfo[end-2]\n\t\t\tcopy(buffer.outInfo[start+1:], buffer.outInfo[start:end-2])\n\t\t\tbuffer.outInfo[start] = t\n\t\t} else {\n\t\t\t/* Since we decomposed, and NIKHAHIT is combining, merge clusters with the\n\t\t\t* previous cluster. */\n\t\t\tif start != 0 && buffer.ClusterLevel == MonotoneGraphemes {\n\t\t\t\tbuffer.mergeOutClusters(start-1, end)\n\t\t\t}\n\t\t}\n\t}\n\tbuffer.swapBuffers()\n\n\t/* If font has Thai GSUB, we are done. */\n\tif plan.props.Script == language.Thai && !plan.map_.foundScript[0] {\n\t\tdoThaiPuaShaping(buffer, font)\n\t}\n}", "func (r renderer) Footnotes(out *bytes.Buffer, text func() bool) {}", "func Label(x, y, width int, fg, bg termbox.Attribute, text string) {\n\t// We cannot rely on range index because it shows byte position\n\t// instead of rune position\n\ti := -1\n\tfor _, r := range text {\n\t\ti++\n\t\tif width > 0 && i >= width {\n\t\t\tbreak\n\t\t}\n\t\ttermbox.SetCell(x+i, y, r, fg, bg)\n\t}\n\t// Fill the rest of the width with spaces\n\tfor i = i + 1; width > 0 && i < width; i++ {\n\t\ttermbox.SetCell(x+i, y, ' ', fg, bg)\n\t}\n}", "func (s Shaper) Shape(text string, ppem uint16, direction Direction, script Script, language string, features string, variations string) []Glyph {\n\tglyphs := make([]Glyph, len([]rune(text)))\n\ti := 0\n\tvar prevIndex uint16\n\tfor cluster, r := range text {\n\t\tindex := s.sfnt.GlyphIndex(r)\n\t\tglyphs[i].Text = string(r)\n\t\tglyphs[i].ID = index\n\t\tglyphs[i].Cluster = uint32(cluster)\n\t\tglyphs[i].XAdvance = int32(s.sfnt.GlyphAdvance(index))\n\t\tif 0 < i {\n\t\t\tglyphs[i-1].XAdvance += int32(s.sfnt.Kerning(prevIndex, index))\n\t\t}\n\t\tprevIndex = index\n\t\ti++\n\t}\n\treturn glyphs\n}", "func SaveArt(letters []string, letnum int, f *os.File) {\n\tfor i := 0; i < 8; i++ {\n\t\tj := 8\n\t\t_, err := f.WriteString(letters[i])\n\t\tEscape(err)\n\t\tif len(letters) > 8 {\n\t\t\tfor k := 1; k < letnum; k++ {\n\t\t\t\t_, err := f.WriteString(letters[i+j])\n\t\t\t\tEscape(err)\n\t\t\t\tif k == letnum-1 {\n\t\t\t\t\t_, err := f.WriteString(\"\\n\")\n\t\t\t\t\tEscape(err)\n\t\t\t\t}\n\t\t\t\tj += 8\n\t\t\t}\n\t\t} else {\n\t\t\t_, err := f.WriteString(\"\\n\")\n\t\t\tEscape(err)\n\t\t}\n\t}\n}", "func (d Decomposition) LaTeX() string {\n\treturn d.string(\"\\times\", \"{\", \"}\")\n}", "func MakeArt(abc *st.Art) {\n\t// choose color start / end (reset)\n\tcl.ChooseColor(abc)\n\tif abc.Flag.Color.MethodBy == \"bySymbol\" {\n\t\tif abc.Flag.Color.MethodColoring == \"and\" {\n\t\t\t// coloring in range between letter1 and letter2\n\t\t\tabc.Flag.Color.BySymbol.Range1, abc.Flag.Color.BySymbol.Range2 = findAndSymbol(abc)\n\t\t}\n\t} else if abc.Flag.Color.MethodBy == \"byIndex\" {\n\t\t// add max number for range if parameter was [5:]\n\t\tif abc.Flag.Color.ByIndex.MaxIndex {\n\t\t\tlRune := len(abc.Text.Rune)\n\t\t\tabc.Flag.Color.ByIndex.Range = append(abc.Flag.Color.ByIndex.Range, lRune)\n\t\t\tabc.Flag.Color.MethodColoring = \"and\"\n\t\t}\n\t\tif abc.Flag.Color.MethodColoring == \"and\" {\n\t\t\t// coloring in range between letter1 and letter2\n\t\t\tabc.Flag.Color.ByIndex.Range1, abc.Flag.Color.ByIndex.Range2 = findAndIndex(abc)\n\t\t}\n\t}\n\t// read letters from argument\n\tletters(abc)\n\t// add last newline if not exist\n\t// LastNewLine(abc)\n}", "func Shape(input Input) (Output, error) {\n\t// Prepare to shape the text.\n\t// TODO: maybe reuse these buffers for performance?\n\tbuf := harfbuzz.NewBuffer()\n\trunes, start, end := input.Text, input.RunStart, input.RunEnd\n\tif end < start {\n\t\treturn Output{}, InvalidRunError{RunStart: start, RunEnd: end, TextLength: len(input.Text)}\n\t}\n\tbuf.AddRunes(runes, start, end-start)\n\t// TODO: handle vertical text?\n\tswitch input.Direction {\n\tcase di.DirectionLTR:\n\t\tbuf.Props.Direction = harfbuzz.LeftToRight\n\tcase di.DirectionRTL:\n\t\tbuf.Props.Direction = harfbuzz.RightToLeft\n\tdefault:\n\t\treturn Output{}, UnimplementedDirectionError{\n\t\t\tDirection: input.Direction,\n\t\t}\n\t}\n\tbuf.Props.Language = input.Language\n\tbuf.Props.Script = input.Script\n\t// TODO: figure out what (if anything) to do if this type assertion fails.\n\tfont := harfbuzz.NewFont(input.Face.(harfbuzz.Face))\n\tfont.XScale = int32(input.Size.Ceil()) << scaleShift\n\tfont.YScale = font.XScale\n\n\t// Actually use harfbuzz to shape the text.\n\tbuf.Shape(font, nil)\n\n\t// Convert the shaped text into an Output.\n\tglyphs := make([]Glyph, len(buf.Info))\n\tfor i := range glyphs {\n\t\tg := buf.Info[i].Glyph\n\t\textents, ok := font.GlyphExtents(g)\n\t\tif !ok {\n\t\t\t// TODO: can this error happen? Will harfbuzz return a\n\t\t\t// GID for a glyph that isn't in the font?\n\t\t\treturn Output{}, MissingGlyphError{GID: g}\n\t\t}\n\t\tglyphs[i] = Glyph{\n\t\t\tWidth: fixed.I(int(extents.Width)) >> scaleShift,\n\t\t\tHeight: fixed.I(int(extents.Height)) >> scaleShift,\n\t\t\tXBearing: fixed.I(int(extents.XBearing)) >> scaleShift,\n\t\t\tYBearing: fixed.I(int(extents.YBearing)) >> scaleShift,\n\t\t\tXAdvance: fixed.I(int(buf.Pos[i].XAdvance)) >> scaleShift,\n\t\t\tYAdvance: fixed.I(int(buf.Pos[i].YAdvance)) >> scaleShift,\n\t\t\tXOffset: fixed.I(int(buf.Pos[i].XOffset)) >> scaleShift,\n\t\t\tYOffset: fixed.I(int(buf.Pos[i].YOffset)) >> scaleShift,\n\t\t\tClusterIndex: buf.Info[i].Cluster,\n\t\t\tGlyphID: g,\n\t\t\tMask: buf.Info[i].Mask,\n\t\t}\n\t}\n\tcountClusters(glyphs, input.RunEnd-input.RunStart, input.Direction)\n\tout := Output{\n\t\tGlyphs: glyphs,\n\t\tDirection: input.Direction,\n\t}\n\tfontExtents := font.ExtentsForDirection(buf.Props.Direction)\n\tout.LineBounds = Bounds{\n\t\tAscent: fixed.I(int(fontExtents.Ascender)) >> scaleShift,\n\t\tDescent: fixed.I(int(fontExtents.Descender)) >> scaleShift,\n\t\tGap: fixed.I(int(fontExtents.LineGap)) >> scaleShift,\n\t}\n\treturn out, out.RecalculateAll()\n}", "func text2matrix(set noteMap, figures map[string]figure, txt []string) (channels, notes matrix, figuremap map[int][]midiFigure) {\n\tfiguremap = make(map[int][]midiFigure)\n\tfor _, line := range txt {\n\t\tvar chanV, noteV []int\n\t\tlane := strings.Split(strings.TrimSpace(line), laneSplitStr)\n\t\t// use our own index because we need to be able to decrease it\n\t\t// if we encounter unwanted (uncounted) characters\n\t\tvar i = -1\n\t\tfor _, elem := range lane {\n\t\t\ti++\n\t\t\tif elem == \"\" {\n\t\t\t\ti--\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfirst, rest := splitFirstRune(elem)\n\t\t\tif first == figurePrefix {\n\t\t\t\t// generate a micro pattern\n\t\t\t\tfiguremap[i] = append(figuremap[i], translateFigure(set, figures, rest))\n\t\t\t\t// also generate an empty single note\n\t\t\t\tchanV = append(chanV, 0)\n\t\t\t\tnoteV = append(noteV, 0)\n\t\t\t} else {\n\t\t\t\t// generate a normal note\n\t\t\t\tc, n := translateKit(set, elem)\n\t\t\t\tchanV = append(chanV, c)\n\t\t\t\tnoteV = append(noteV, n)\n\t\t\t}\n\t\t}\n\t\tchannels = append(channels, row(chanV))\n\t\tnotes = append(notes, row(noteV))\n\t}\n\tdebugf(\"text2matrix(): figuremap: %v\", figuremap)\n\treturn\n}", "func FigletizeText(ctx workflow.Context, inputText string) (string, error) {\n\tao := workflow.ActivityOptions{\n\t\tTaskList: TaskList,\n\t\tScheduleToCloseTimeout: time.Second * 60,\n\t\tScheduleToStartTimeout: time.Second * 60,\n\t\tStartToCloseTimeout: time.Second * 60,\n\t\tHeartbeatTimeout: time.Second * 10,\n\t\tWaitForCancellation: false,\n\t}\n\n\tctx = workflow.WithActivityOptions(ctx, ao)\n\tfuture := workflow.ExecuteActivity(ctx, activity.FigletizeText, inputText)\n\tvar figletedText string\n\tif err := future.Get(ctx, &figletedText); err != nil {\n\n\t\tworkflow.GetLogger(ctx).Error(\"Executing FigletizeText activity\", zap.Error(err))\n\t\treturn \"\", err\n\t}\n\n\tworkflow.GetLogger(ctx).Info(\"FigletizeText workflow done\")\n\treturn figletedText, nil\n}", "func makeOutput(abc *st.Art, coloring bool) {\n\tindex := 0\n\tfor i := abc.Output.Index - 8; i < abc.Output.Index; i++ {\n\t\tline := abc.Alphabet.Rune[abc.Alphabet.Letter][index]\n\t\tif coloring {\n\t\t\tabc.Output.Final[i] += abc.Flag.Color.Case1 + line + abc.Flag.Color.Case2\n\t\t} else {\n\t\t\tabc.Output.Final[i] += line\n\t\t}\n\t\tindex++\n\t}\n}", "func (c *canvasRenderer) FillText(text string, position sprec.Vec2, typography Typography) {\n\tcurrentLayer := c.currentLayer\n\ttransformMatrix := currentLayer.Transform\n\tclipMatrix := currentLayer.ClipTransform\n\n\tfont := typography.Font\n\tfontSize := typography.Size\n\tcolor := uiColorToVec(typography.Color)\n\n\tvertexOffset := c.textMesh.Offset()\n\toffset := position\n\tlastGlyph := (*fontGlyph)(nil)\n\n\tfor _, ch := range text {\n\t\tlineHeight := font.lineHeight * fontSize\n\t\tlineAscent := font.lineAscent * fontSize\n\t\tif ch == '\\r' {\n\t\t\toffset.X = position.X\n\t\t\tlastGlyph = nil\n\t\t\tcontinue\n\t\t}\n\t\tif ch == '\\n' {\n\t\t\toffset.X = position.X\n\t\t\toffset.Y += lineHeight\n\t\t\tlastGlyph = nil\n\t\t\tcontinue\n\t\t}\n\n\t\tif glyph, ok := font.glyphs[ch]; ok {\n\t\t\tadvance := glyph.advance * fontSize\n\t\t\tleftBearing := glyph.leftBearing * fontSize\n\t\t\trightBearing := glyph.rightBearing * fontSize\n\t\t\tascent := glyph.ascent * fontSize\n\t\t\tdescent := glyph.descent * fontSize\n\n\t\t\tvertTopLeft := textVertex{\n\t\t\t\tposition: sprec.Vec2Sum(\n\t\t\t\t\tsprec.NewVec2(\n\t\t\t\t\t\tleftBearing,\n\t\t\t\t\t\tlineAscent-ascent,\n\t\t\t\t\t),\n\t\t\t\t\toffset,\n\t\t\t\t),\n\t\t\t\ttexCoord: sprec.NewVec2(glyph.leftU, glyph.topV),\n\t\t\t}\n\t\t\tvertTopRight := textVertex{\n\t\t\t\tposition: sprec.Vec2Sum(\n\t\t\t\t\tsprec.NewVec2(\n\t\t\t\t\t\tadvance-rightBearing,\n\t\t\t\t\t\tlineAscent-ascent,\n\t\t\t\t\t),\n\t\t\t\t\toffset,\n\t\t\t\t),\n\t\t\t\ttexCoord: sprec.NewVec2(glyph.rightU, glyph.topV),\n\t\t\t}\n\t\t\tvertBottomLeft := textVertex{\n\t\t\t\tposition: sprec.Vec2Sum(\n\t\t\t\t\tsprec.NewVec2(\n\t\t\t\t\t\tleftBearing,\n\t\t\t\t\t\tlineAscent+descent,\n\t\t\t\t\t),\n\t\t\t\t\toffset,\n\t\t\t\t),\n\t\t\t\ttexCoord: sprec.NewVec2(glyph.leftU, glyph.bottomV),\n\t\t\t}\n\t\t\tvertBottomRight := textVertex{\n\t\t\t\tposition: sprec.Vec2Sum(\n\t\t\t\t\tsprec.NewVec2(\n\t\t\t\t\t\tadvance-rightBearing,\n\t\t\t\t\t\tlineAscent+descent,\n\t\t\t\t\t),\n\t\t\t\t\toffset,\n\t\t\t\t),\n\t\t\t\ttexCoord: sprec.NewVec2(glyph.rightU, glyph.bottomV),\n\t\t\t}\n\n\t\t\tc.textMesh.Append(vertTopLeft)\n\t\t\tc.textMesh.Append(vertBottomLeft)\n\t\t\tc.textMesh.Append(vertBottomRight)\n\n\t\t\tc.textMesh.Append(vertTopLeft)\n\t\t\tc.textMesh.Append(vertBottomRight)\n\t\t\tc.textMesh.Append(vertTopRight)\n\n\t\t\toffset.X += advance\n\t\t\tif lastGlyph != nil {\n\t\t\t\toffset.X += lastGlyph.kerns[ch] * fontSize\n\t\t\t}\n\t\t\tlastGlyph = glyph\n\t\t}\n\t}\n\tvertexCount := c.textMesh.Offset() - vertexOffset\n\n\tif vertexCount == 0 {\n\t\treturn\n\t}\n\n\tc.commandQueue.BindPipeline(c.textPipeline)\n\tc.commandQueue.Uniform4f(c.textMaterial.colorLocation, color.Array())\n\tc.commandQueue.UniformMatrix4f(c.textMaterial.projectionMatrixLocation, c.projectionMatrix.ColumnMajorArray())\n\tc.commandQueue.UniformMatrix4f(c.textMaterial.transformMatrixLocation, transformMatrix.ColumnMajorArray())\n\tc.commandQueue.UniformMatrix4f(c.textMaterial.clipMatrixLocation, clipMatrix.ColumnMajorArray())\n\tc.commandQueue.TextureUnit(0, font.texture)\n\tc.commandQueue.Uniform1i(c.textMaterial.textureLocation, 0)\n\tc.commandQueue.Draw(vertexOffset, vertexCount, 1)\n}", "func ParseText(d *drawing.Drawing, data [][2]string) (entity.Entity, error) {\n\tt := entity.NewText()\n\tvar err error\n\tfor _, dt := range data {\n\t\tswitch dt[0] {\n\t\tdefault:\n\t\t\tcontinue\n\t\tcase \"8\":\n\t\t\tlayer, err := d.Layer(dt[1], false)\n\t\t\tif err == nil {\n\t\t\t\tt.SetLayer(layer)\n\t\t\t}\n\t\tcase \"48\":\n\t\t\terr = setFloat(dt, func(val float64) { t.SetLtscale(val) })\n\t\tcase \"10\":\n\t\t\terr = setFloat(dt, func(val float64) { t.Coord1[0] = val })\n\t\tcase \"20\":\n\t\t\terr = setFloat(dt, func(val float64) { t.Coord1[1] = val })\n\t\tcase \"30\":\n\t\t\terr = setFloat(dt, func(val float64) { t.Coord1[2] = val })\n\t\tcase \"11\":\n\t\t\terr = setFloat(dt, func(val float64) { t.Coord2[0] = val })\n\t\tcase \"21\":\n\t\t\terr = setFloat(dt, func(val float64) { t.Coord2[1] = val })\n\t\tcase \"31\":\n\t\t\terr = setFloat(dt, func(val float64) { t.Coord2[2] = val })\n\t\tcase \"40\":\n\t\t\terr = setFloat(dt, func(val float64) { t.Height = val })\n\t\tcase \"50\":\n\t\t\terr = setFloat(dt, func(val float64) { t.Rotation = val })\n\t\tcase \"1\":\n\t\t\tt.Value = dt[1]\n\t\tcase \"7\":\n\t\t\tif s, ok := d.Styles[dt[1]]; ok {\n\t\t\t\tt.Style = s\n\t\t\t}\n\t\tcase \"71\":\n\t\t\terr = setInt(dt, func(val int) { t.GenFlag = val })\n\t\tcase \"72\":\n\t\t\terr = setInt(dt, func(val int) { t.HorizontalFlag = val })\n\t\tcase \"73\":\n\t\t\terr = setInt(dt, func(val int) { t.VerticalFlag = val })\n\t\t}\n\t\tif err != nil {\n\t\t\treturn t, err\n\t\t}\n\t}\n\treturn t, nil\n}", "func textwrap(x, y, w openvg.VGfloat, s string, font string, fs, leading, factor openvg.VGfloat) {\n\tsize := int(fs)\n\tif font == \"mono\" {\n\t\tfactor = 1.0\n\t}\n\twordspacing := openvg.TextWidth(\"m\", font, size)\n\twords := strings.FieldsFunc(s, whitespace)\n\txp := x\n\typ := y\n\tedge := x + w\n\tfor _, s := range words {\n\t\ttw := openvg.TextWidth(s, font, size)\n\t\topenvg.Text(xp, yp, s, font, size)\n\t\txp += tw + (wordspacing * factor)\n\t\tif xp > edge {\n\t\t\txp = x\n\t\t\typ -= leading\n\t\t}\n\t}\n}", "func diagrama() {\n\tlaberinto[1][1] = \"E\"\n\tlaberinto[12][13] = \"S\"\n\tlaberinto[1][2] = \" \"\n\tlaberinto[1][3] = \" \"\n\tlaberinto[1][4] = \" \"\n\tlaberinto[1][6] = \" \"\n\tlaberinto[1][7] = \" \"\n\tlaberinto[1][8] = \" \"\n\tlaberinto[1][10] = \" \"\n\tlaberinto[1][11] = \" \"\n\tlaberinto[1][12] = \" \"\n\n\tlaberinto[2][4] = \" \"\n\tlaberinto[2][6] = \" \"\n\tlaberinto[2][8] = \" \"\n\tlaberinto[2][10] = \" \"\n\tlaberinto[2][12] = \" \"\n\tlaberinto[3][1] = \" \"\n\tlaberinto[3][2] = \" \"\n\tlaberinto[3][4] = \" \"\n\tlaberinto[3][6] = \" \"\n\tlaberinto[3][8] = \" \"\n\tlaberinto[3][10] = \" \"\n\tlaberinto[3][12] = \" \"\n\n\tlaberinto[4][1] = \" \"\n\tlaberinto[4][4] = \" \"\n\tlaberinto[4][6] = \" \"\n\tlaberinto[4][8] = \" \"\n\tlaberinto[4][10] = \" \"\n\tlaberinto[4][12] = \" \"\n\n\tlaberinto[5][1] = \" \"\n\tlaberinto[5][4] = \" \"\n\tlaberinto[5][6] = \" \"\n\tlaberinto[5][8] = \" \"\n\tlaberinto[5][10] = \" \"\n\tlaberinto[5][12] = \" \"\n\n\tlaberinto[6][1] = \" \"\n\tlaberinto[6][2] = \" \"\n\tlaberinto[6][3] = \" \"\n\tlaberinto[6][4] = \" \"\n\tlaberinto[6][6] = \" \"\n\tlaberinto[6][8] = \" \"\n\tlaberinto[6][10] = \" \"\n\tlaberinto[6][12] = \" \"\n\n\tlaberinto[7][2] = \" \"\n\tlaberinto[7][6] = \" \"\n\tlaberinto[7][8] = \" \"\n\tlaberinto[7][10] = \" \"\n\tlaberinto[7][12] = \" \"\n\n\tlaberinto[8][2] = \" \"\n\tlaberinto[8][3] = \" \"\n\tlaberinto[8][4] = \" \"\n\tlaberinto[8][5] = \" \"\n\tlaberinto[8][6] = \" \"\n\tlaberinto[8][8] = \" \"\n\tlaberinto[8][10] = \" \"\n\tlaberinto[8][12] = \" \"\n\n\tlaberinto[9][5] = \" \"\n\tlaberinto[9][8] = \" \"\n\tlaberinto[9][10] = \" \"\n\tlaberinto[9][12] = \" \"\n\n\tlaberinto[10][5] = \" \"\n\tlaberinto[10][8] = \" \"\n\tlaberinto[10][10] = \" \"\n\tlaberinto[10][12] = \" \"\n\tlaberinto[10][13] = \" \"\n\n\tlaberinto[11][5] = \" \"\n\tlaberinto[11][8] = \" \"\n\tlaberinto[11][9] = \" \"\n\tlaberinto[11][10] = \" \"\n\tlaberinto[11][13] = \" \"\n\n\tlaberinto[12][5] = \" \"\n\tlaberinto[12][11] = \" \"\n\tlaberinto[12][12] = \" \"\n\n\tlaberinto[13][5] = \" \"\n\tlaberinto[13][6] = \" \"\n\tlaberinto[13][7] = \" \"\n\tlaberinto[13][8] = \" \"\n\tlaberinto[13][9] = \" \"\n\tlaberinto[13][10] = \" \"\n\tlaberinto[13][11] = \" \"\n\n}", "func Figure(attrs []htmlgo.Attribute, children ...HTML) HTML {\n\treturn &htmlgo.Tree{Tag: \"figure\", Attributes: attrs, Children: children}\n}", "func TestSparkling_Render(t *testing.T) {\n\tvar buf bytes.Buffer\n\tsp := New(&buf)\n\tsp.AddSeries([]float64{0, 30, 55, 80, 33, 150}, \"Awesome\")\n\tsp.Render()\n\n\twant := \"Awesome ▁▂▃▄▂█\\n\"\n\n\ts := buf.String()\n\tgot := s[len(s)-len(want):]\n\n\tif got != want {\n\t\tt.Errorf(\"sparkling.Render() = %s, want: %s\", got, want)\n\t}\n}", "func Text(v interface{}) HTML {\n return HTML(\"\\n\" + html.EscapeString(fmt.Sprint(v)))\n}", "func (f *Font) Print(x, y float32, text string) error {\n\n\tx = float32(math.Round(float64(x)))\n\ty = float32(math.Round(float64(y)))\n\n\tindices := []rune(text)\n\n\tif len(indices) == 0 {\n\t\treturn nil\n\t}\n\n\t//setup blending mode\n\tgl.Enable(gl.BLEND)\n\tgl.BlendFunc(gl.SRC_ALPHA, gl.ONE_MINUS_SRC_ALPHA)\n\n\t// Activate corresponding render state\n\tgl.UseProgram(f.program)\n\t//set text color\n\tgl.Uniform4f(gl.GetUniformLocation(f.program, gl.Str(\"textColor\\x00\")), f.color.r, f.color.g, f.color.b, f.color.a)\n\t//set screen resolution\n\t//resUniform := gl.GetUniformLocation(f.program, gl.Str(\"resolution\\x00\"))\n\t//gl.Uniform2f(resUniform, float32(2560), float32(1440))\n\n\tgl.ActiveTexture(gl.TEXTURE0)\n\tgl.BindVertexArray(f.vao)\n\n\t// Iterate through all characters in string\n\tfor i := range indices {\n\n\t\t//get rune\n\t\truneIndex := indices[i]\n\n\t\t//find rune in fontChar list\n\t\tch, err := f.GetRune(runeIndex)\n\t\tif err != nil {\n\t\t\treturn err // @todo ignore errors?\n\t\t}\n\n\t\t//calculate position and size for current rune\n\t\txpos := x + float32(ch.bearingH)\n\t\typos := y - float32(+ch.height-ch.bearingV)\n\t\tw := float32(ch.width)\n\t\th := float32(ch.height)\n\n\t\t//set quad positions\n\t\tvar x1 = xpos\n\t\tvar x2 = xpos + w\n\t\tvar y1 = ypos\n\t\tvar y2 = ypos + h\n\n\t\t//setup quad array\n\t\tvar vertices = []float32{\n\t\t\t// X, Y, Z, U, V\n\t\t\t// Front\n\t\t\tx1, y1, 0.0, 0.0,\n\t\t\tx2, y1, 1.0, 0.0,\n\t\t\tx1, y2, 0.0, 1.0,\n\t\t\tx1, y2, 0.0, 1.0,\n\t\t\tx2, y1, 1.0, 0.0,\n\t\t\tx2, y2, 1.0, 1.0}\n\n\t\t// Render glyph texture over quad\n\t\tgl.BindTexture(gl.TEXTURE_2D, ch.textureID)\n\t\t// Update content of VBO memory\n\t\tgl.BindBuffer(gl.ARRAY_BUFFER, f.vbo)\n\n\t\t//BufferSubData(target Enum, offset int, data []byte)\n\t\tgl.BufferSubData(gl.ARRAY_BUFFER, 0, len(vertices)*4, gl.Ptr(vertices)) // Be sure to use glBufferSubData and not glBufferData\n\t\t// Render quad\n\t\tgl.DrawArrays(gl.TRIANGLES, 0, 24)\n\n\t\tgl.BindBuffer(gl.ARRAY_BUFFER, 0)\n\t\t// Now advance cursors for next glyph (note that advance is number of 1/64 pixels)\n\t\tx += float32((ch.advance >> 6)) // Bitshift by 6 to get value in pixels (2^6 = 64 (divide amount of 1/64th pixels by 64 to get amount of pixels))\n\n\t}\n\n\t//clear opengl textures and programs\n\tgl.BindVertexArray(0)\n\tgl.BindTexture(gl.TEXTURE_2D, 0)\n\tgl.UseProgram(0)\n\tgl.Disable(gl.BLEND)\n\n\treturn nil\n}", "func (d *Diagram) String() string { return toString(d) }", "func Figcaption_(children ...HTML) HTML {\n return Figcaption(nil, children...)\n}", "func textwrap(x, y, w, fs, leading float64, s, font, align, color string, opacity float64, canvas *gensvg.SVG) {\n\tcanvas.Gstyle(fmt.Sprintf(twrapfmt, align, opacity, color, font, fs))\n\twords := strings.FieldsFunc(s, whitespace)\n\txp := x\n\typ := y\n\tvar line string\n\tfor _, s := range words {\n\t\tline += s + \" \"\n\t\tif float64(len(line)) > w {\n\t\t\tcanvas.Text(xp, yp, line)\n\t\t\typ += leading\n\t\t\tline = \"\"\n\t\t}\n\t}\n\tif len(line) > 0 {\n\t\tcanvas.Text(xp, yp, line)\n\t}\n\tcanvas.Gend()\n}", "func Generate(name string) string {\n\tname = strings.ToLower(name)\n\tvar (\n\t\tr, g, b int\n\t)\n\tif len(name) > 0 {\n\t\tr = (int(name[0]) - 97) * 155 / 26\n\t}\n\tif len(name) > 1 {\n\t\tg = (int(name[1]) - 97) * 155 / 26\n\t}\n\tif len(name) > 2 {\n\t\tb = (int(name[2]) - 97) * 155 / 26\n\t}\n\ttext := initials(name)\n\tfontSize := int(500 / math.Pow(float64(len(text)), 0.7))\n\n\tdata := fmt.Sprintf(`<?xml version=\"1.0\"?>\n<svg width=\"1000\" height=\"1000\" xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\">\n\t<circle cx=\"500\" cy=\"500\" r=\"500\" style=\"fill:rgb(%d,%d,%d)\" />\n\t<circle cx=\"500\" cy=\"500\" r=\"480\" style=\"fill:rgb(%d,%d,%d)\" />\n\t<text x=\"500\" y=\"500\" style=\"text-anchor:middle;font-size:%dpx;alignment-baseline:middle;font-family:Arial,Helvetica;fill:rgb(%d,%d,%d)\">%s</text>\n</svg>\n`, r, g, b, r+100, g+100, b+100, fontSize, r, g, b, text)\n\treturn data\n}", "func wordstack(x, y, fs float64, s []string, style string, canvas *gensvg.SVG) {\n\tls := fs + (fs / 2)\n\ty -= ls\n\tfor i := len(s); i > 0; i-- {\n\t\tcanvas.Text(x, y, s[i-1], style)\n\t\ty -= ls\n\t}\n}", "func story(Memories int) string {\n\tGamePlot := \"story.txt\"\n\tif len(plot) <= 0 {\n\t\tstory, err := os.Open(GamePlot)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"either incorrect format or associated with %s please rewrite.\\n\", err)\n\t\t} else {\n\t\t\tStoryReader := bio.NewScanner(story)\n\t\t\tStoryReader.Split(bio.ScanLines)\n\t\t\tdefer story.Close()\n\t\t\tfor StoryReader.Scan() {\n\t\t\t\tplot = append(plot, StoryReader.Text())\n\t\t\t}\n\t\t}\n\t}\n\treturn plot[Memories]\n}", "func (t Text) String() string {\n\tbuf := make([]byte, 0, 64)\n\tfor _, segment := range t {\n\t\tstyleBuf := make([]string, 0, 8)\n\n\t\tif segment.Bold {\n\t\t\tstyleBuf = append(styleBuf, \"1\")\n\t\t}\n\t\tif segment.Dim {\n\t\t\tstyleBuf = append(styleBuf, \"2\")\n\t\t}\n\t\tif segment.Italic {\n\t\t\tstyleBuf = append(styleBuf, \"3\")\n\t\t}\n\t\tif segment.Underlined {\n\t\t\tstyleBuf = append(styleBuf, \"4\")\n\t\t}\n\t\tif segment.Blink {\n\t\t\tstyleBuf = append(styleBuf, \"5\")\n\t\t}\n\t\tif segment.Inverse {\n\t\t\tstyleBuf = append(styleBuf, \"7\")\n\t\t}\n\n\t\tif segment.Foreground != \"\" {\n\t\t\tif col, ok := colorTranslationTable[segment.Foreground]; ok {\n\t\t\t\tstyleBuf = append(styleBuf, col)\n\t\t\t}\n\t\t}\n\t\tif segment.Background != \"\" {\n\t\t\tif col, ok := colorTranslationTable[\"bg-\"+segment.Background]; ok {\n\t\t\t\tstyleBuf = append(styleBuf, col)\n\t\t\t}\n\t\t}\n\n\t\tif len(styleBuf) > 0 {\n\t\t\tbuf = append(buf, \"\\033[\"...)\n\t\t\tbuf = append(buf, strings.Join(styleBuf, \";\")...)\n\t\t\tbuf = append(buf, 'm')\n\t\t\tbuf = append(buf, segment.Text...)\n\t\t\tbuf = append(buf, \"\\033[m\"...)\n\t\t} else {\n\t\t\tbuf = append(buf, segment.Text...)\n\t\t}\n\t}\n\treturn string(buf)\n}", "func newTex(w, h int) []byte {\n\treturn make([]byte, 3*w*h)\n}", "func (self *GameObjectCreator) Text(x int, y int, text string, style interface{}) *Text{\n return &Text{self.Object.Call(\"text\", x, y, text, style)}\n}", "func (hdlr Latex) Draw(c vg.Canvas, txt string, sty Style, pt vg.Point) {\n\tcnv := drawtex.New()\n\tface := hdlr.Fonts.Lookup(sty.Font, sty.Font.Size)\n\tfnts := hdlr.fontsFor(sty.Font)\n\tbox, err := mtex.Parse(txt, face.Font.Size.Points(), latexDPI, ttf.NewFrom(cnv, fnts))\n\tif err != nil {\n\t\tpanic(fmt.Errorf(\"could not parse math expression: %w\", err))\n\t}\n\n\tvar sh tex.Ship\n\tsh.Call(0, 0, box.(tex.Tree))\n\n\tw := box.Width()\n\th := box.Height()\n\td := box.Depth()\n\n\tdpi := hdlr.dpi() / latexDPI\n\to := latex{\n\t\tcnv: c,\n\t\tfonts: hdlr.Fonts,\n\t\tsty: sty,\n\t\tpt: pt,\n\t\tw: vg.Length(w * dpi),\n\t\th: vg.Length((h + d) * dpi),\n\t\tcos: 1,\n\t\tsin: 0,\n\t}\n\te := face.Extents()\n\to.xoff = vg.Length(sty.XAlign) * o.w\n\to.yoff = o.h + o.h*vg.Length(sty.YAlign) - (e.Height - e.Ascent)\n\n\tif sty.Rotation != 0 {\n\t\tsin64, cos64 := math.Sincos(sty.Rotation)\n\t\to.cos = vg.Length(cos64)\n\t\to.sin = vg.Length(sin64)\n\n\t\to.cnv.Push()\n\t\tdefer o.cnv.Pop()\n\t\to.cnv.Rotate(sty.Rotation)\n\t}\n\n\terr = o.Render(w/latexDPI, (h+d)/latexDPI, dpi, cnv)\n\tif err != nil {\n\t\tpanic(fmt.Errorf(\"could not render math expression: %w\", err))\n\t}\n}", "func (this *SemanticsBase) RHSText(i int,j int) string {\n\treturn this.CurrentRule.RHSText(i,j);\n}", "func NewAtlas(font *Font, first, last rune) *Atlas {\n\n\ta := new(Atlas)\n\ta.Chars = make([]CharInfo, last+1)\n\n\t// Get font metrics\n\tmetrics := font.Metrics()\n\ta.Height = int(metrics.Height >> 6)\n\ta.Ascent = int(metrics.Ascent >> 6)\n\ta.Descent = int(metrics.Descent >> 6)\n\n\tconst cols = 16\n\tcol := 0\n\tencoded := make([]byte, 4)\n\tline := []byte{}\n\tlines := \"\"\n\tmaxWidth := 0\n\tlastX := 0\n\tlastY := a.Descent\n\tnlines := 0\n\tfor code := first; code <= last; code++ {\n\t\t// Encodes rune into UTF8 and appends to current line\n\t\tcount := utf8.EncodeRune(encoded, code)\n\t\tline = append(line, encoded[:count]...)\n\n\t\t// Measure current line\n\t\twidth, _ := font.MeasureText(string(line))\n\n\t\t// Sets current code fields\n\t\tcinfo := &a.Chars[code]\n\t\tcinfo.X = lastX\n\t\tcinfo.Y = lastY\n\t\tcinfo.Width = width - lastX - 1\n\t\tcinfo.Height += a.Height\n\t\tlastX = width\n\t\tfmt.Printf(\"%c: cinfo:%+v\\n\", code, cinfo)\n\n\t\t// Checks end of the current line\n\t\tcol++\n\t\tif col >= cols || code == last {\n\t\t\tnlines++\n\t\t\tlines += string(line) + \"\\n\"\n\t\t\tline = []byte{}\n\t\t\t// Checks max width\n\t\t\tif width > maxWidth {\n\t\t\t\tmaxWidth = width\n\t\t\t}\n\t\t\tif code == last {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tcol = 0\n\t\t\tlastX = 0\n\t\t\tlastY += a.Height\n\t\t}\n\t}\n\theight := (nlines * a.Height) + a.Descent\n\n\t// Draw atlas image\n\tcanvas := NewCanvas(maxWidth, height, &math32.Color4{1, 1, 1, 1})\n\tcanvas.DrawText(0, 0, lines, font)\n\ta.Image = canvas.RGBA\n\n\t// Calculate normalized char positions in the image\n\tfWidth := float32(maxWidth)\n\tfHeight := float32(height)\n\tfor i := 0; i < len(a.Chars); i++ {\n\t\tchar := &a.Chars[i]\n\t\tchar.OffsetX = float32(char.X) / fWidth\n\t\tchar.OffsetY = float32(char.Y) / fHeight\n\t\tchar.RepeatX = float32(char.Width) / fWidth\n\t\tchar.RepeatY = float32(char.Height) / fHeight\n\t}\n\n\ta.SavePNG(\"atlas.png\")\n\treturn a\n}", "func (i Img) generate() (*bytes.Buffer, error) {\n\t// If there are dimensions and there are no requirements for the Text, we will build the default Text.\n\tif ((i.Width > 0 || i.Height > 0) && i.Label.Text == \"\") || i.Label.Text == \"\" {\n\t\ti.Label.Text = fmt.Sprintf(\"%d x %d\", i.Width, i.Height)\n\t}\n\t// If there are no parameters for the font size, we will construct it based on the sizes of the image.\n\tif i.Label.FontSize == 0 {\n\t\ti.Label.FontSize = i.Width / 10\n\t\tif i.Height < i.Width {\n\t\t\ti.Label.FontSize = i.Height / 5\n\t\t}\n\t}\n\t// Convert the color from string to color.RGBA.\n\tclr, err := ToRGBA(i.Color)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// Create an in-memory image with the desired size.\n\tm := image.NewRGBA(image.Rect(0, 0, i.Width, i.Height))\n\t//Draw a picture:\n\t// - in the sizes (Bounds)\n\t// - with color (Uniform - wrapper above color.Color with Image functions)\n\t// - based on the point (Point) as the base image\n\t// - fill with color Uniform (draw.Src)\n\tdraw.Draw(m, m.Bounds(), image.NewUniform(clr), image.Point{}, draw.Src)\n\t// add a text in the picture.\n\tif err = i.drawLabel(m); err != nil {\n\t\treturn nil, err\n\t}\n\tvar im image.Image = m\n\t// Allocate memory for our data (the bytes of the image)\n\tbuffer := &bytes.Buffer{}\n\t// Let's encode the image into our allocated memory.\n\terr = jpeg.Encode(buffer, im, nil)\n\n\treturn buffer, err\n}", "func Draw(d Drawable, l rune, x, y int, r, g, b byte) error {\n\n\tfontLine := func(s string, x, y int) {\n\t\tfor _, l := range s {\n\t\t\tif l == '*' {\n\t\t\t\td.Set(x, y, color.NRGBA{r, g, b, 255})\n\t\t\t} else if l == '-' {\n\t\t\t\td.Set(x, y, color.NRGBA{r, g, b, 64})\n\t\t\t}\n\t\t\tx++\n\t\t}\n\t\treturn\n\t}\n\n\tswitch l {\n\tcase 'a':\n\t\tfontLine(\"***-\", x+1, y+1)\n\t\tfontLine(\"--**\", x+1, y+2)\n\t\tfontLine(\"-****\", x, y+3)\n\t\tfontLine(\"**-**-\", x, y+4)\n\t\tfontLine(\"-**-**\", x, y+5)\n\tcase 'A':\n\t\tfontLine(\"-**-\", x+1, y)\n\t\tfontLine(\"-****-\", x, y+1)\n\t\tfontLine(\"**--**\", x, y+2)\n\t\tfontLine(\"******\", x, y+3)\n\t\tfontLine(\"**--**\", x, y+4)\n\t\tfontLine(\"** **\", x, y+5)\n\tcase 'b':\n\t\tfontLine(\"***\", x, y)\n\t\tfontLine(\"-**-\", x, y+1)\n\t\tfontLine(\"****-\", x+1, y+2)\n\t\tfontLine(\"** **\", x+1, y+3)\n\t\tfontLine(\"-**-**\", x, y+4)\n\t\tfontLine(\"**-**-\", x, y+5)\n\tcase 'B':\n\t\tfontLine(\"*****-\", x, y)\n\t\tfontLine(\"-**-**\", x, y+1)\n\t\tfontLine(\"****-\", x+1, y+2)\n\t\tfontLine(\"** **\", x+1, y+3)\n\t\tfontLine(\"-** **\", x, y+4)\n\t\tfontLine(\"*****-\", x, y+5)\n\tcase 'c':\n\t\tfontLine(\"-****-\", x, y+1)\n\t\tfontLine(\"**--**\", x, y+2)\n\t\tfontLine(\"** ---\", x, y+3)\n\t\tfontLine(\"**--**\", x, y+4)\n\t\tfontLine(\"-****-\", x, y+5)\n\tcase 'C':\n\t\tfontLine(\"-****-\", x, y)\n\t\tfontLine(\"**- -*\", x, y+1)\n\t\tfontLine(\"**\", x, y+2)\n\t\tfontLine(\"**\", x, y+3)\n\t\tfontLine(\"**- -*\", x, y+4)\n\t\tfontLine(\"-****-\", x, y+5)\n\tcase 'd':\n\t\tfontLine(\"***\", x+3, y)\n\t\tfontLine(\"-**\", x+3, y+1)\n\t\tfontLine(\"-*****\", x, y+2)\n\t\tfontLine(\"**--**\", x, y+3)\n\t\tfontLine(\"** **\", x, y+4)\n\t\tfontLine(\"-***-*\", x, y+5)\n\tcase 'D':\n\t\tfontLine(\"****-\", x, y)\n\t\tfontLine(\"**-**-\", x, y+1)\n\t\tfontLine(\"** -**\", x, y+2)\n\t\tfontLine(\"** -**\", x, y+3)\n\t\tfontLine(\"**-**-\", x, y+4)\n\t\tfontLine(\"****-\", x, y+5)\n\tcase 'e':\n\t\tfontLine(\"-****-\", x, y+1)\n\t\tfontLine(\"** -**\", x, y+2)\n\t\tfontLine(\"******\", x, y+3)\n\t\tfontLine(\"**-\", x, y+4)\n\t\tfontLine(\"-****-\", x, y+5)\n\tcase 'E':\n\t\tfontLine(\"-*****\", x, y)\n\t\tfontLine(\"**---*\", x, y+1)\n\t\tfontLine(\"****-\", x, y+2)\n\t\tfontLine(\"**--\", x, y+3)\n\t\tfontLine(\"**- **\", x, y+4)\n\t\tfontLine(\"*****-\", x, y+5)\n\tcase 'f':\n\t\tfontLine(\"-***-\", x+1, y)\n\t\tfontLine(\"-**--*\", x, y+1)\n\t\tfontLine(\"****\", x, y+2)\n\t\tfontLine(\"-**-\", x, y+3)\n\t\tfontLine(\"**\", x+1, y+4)\n\t\tfontLine(\"****\", x, y+5)\n\tcase 'F':\n\t\tfontLine(\"*****-\", x, y)\n\t\tfontLine(\"**--**\", x, y+1)\n\t\tfontLine(\"**-\", x, y+2)\n\t\tfontLine(\"****\", x, y+3)\n\t\tfontLine(\"**-\", x, y+4)\n\t\tfontLine(\"*-\", x, y+5)\n\tcase 'g':\n\t\tfontLine(\"-**-**\", x, y+1)\n\t\tfontLine(\"** **-\", x, y+2)\n\t\tfontLine(\"-****\", x, y+3)\n\t\tfontLine(\"-**\", x+2, y+4)\n\t\tfontLine(\"****-\", x, y+5)\n\tcase 'G':\n\t\tfontLine(\"-****-\", x, y)\n\t\tfontLine(\"**--**\", x, y+1)\n\t\tfontLine(\"**\", x, y+2)\n\t\tfontLine(\"** ***\", x, y+3)\n\t\tfontLine(\"**--**\", x, y+4)\n\t\tfontLine(\"-***-*\", x, y+5)\n\tcase 'h':\n\t\tfontLine(\"***\", x, y)\n\t\tfontLine(\"-**-\", x, y+1)\n\t\tfontLine(\"****-\", x+1, y+2)\n\t\tfontLine(\"**-**\", x+1, y+3)\n\t\tfontLine(\"-** **\", x, y+4)\n\t\tfontLine(\"*** **\", x, y+5)\n\tcase 'H':\n\t\tfontLine(\"** **\", x, y)\n\t\tfontLine(\"**--**\", x, y+1)\n\t\tfontLine(\"******\", x, y+2)\n\t\tfontLine(\"**--**\", x, y+3)\n\t\tfontLine(\"** **\", x, y+4)\n\t\tfontLine(\"** **\", x, y+5)\n\tcase 'i':\n\t\tfontLine(\"**\", x+2, y)\n\t\tfontLine(\"--\", x+2, y+1)\n\t\tfontLine(\"***\", x+1, y+2)\n\t\tfontLine(\"-**\", x+1, y+3)\n\t\tfontLine(\"-**-\", x+1, y+4)\n\t\tfontLine(\"****\", x+1, y+5)\n\tcase 'I':\n\t\tfontLine(\"****\", x+1, y)\n\t\tfontLine(\"-**-\", x+1, y+1)\n\t\tfontLine(\"**\", x+2, y+2)\n\t\tfontLine(\"**\", x+2, y+3)\n\t\tfontLine(\"-**-\", x+1, y+4)\n\t\tfontLine(\"****\", x+1, y+5)\n\tcase 'j':\n\t\tfontLine(\"**\", x+3, y)\n\t\tfontLine(\"--\", x+3, y+1)\n\t\tfontLine(\"***\", x+2, y+2)\n\t\tfontLine(\"-**\", x+2, y+3)\n\t\tfontLine(\"**-**\", x, y+4)\n\t\tfontLine(\"-***-\", x, y+5)\n\tcase 'J':\n\t\tfontLine(\"****\", x+2, y)\n\t\tfontLine(\"-**-\", x+2, y+1)\n\t\tfontLine(\"**\", x+3, y+2)\n\t\tfontLine(\"** **\", x, y+3)\n\t\tfontLine(\"**-**\", x, y+4)\n\t\tfontLine(\"-***-\", x, y+5)\n\tcase 'k':\n\t\tfontLine(\"***\", x, y)\n\t\tfontLine(\"-**\", x, y+1)\n\t\tfontLine(\"**-**\", x+1, y+2)\n\t\tfontLine(\"****-\", x+1, y+3)\n\t\tfontLine(\"-**-**\", x, y+4)\n\t\tfontLine(\"*** **\", x, y+5)\n\tcase 'K':\n\t\tfontLine(\"***-**\", x, y)\n\t\tfontLine(\"-****-\", x, y+1)\n\t\tfontLine(\"***-\", x+1, y+2)\n\t\tfontLine(\"****-\", x+1, y+3)\n\t\tfontLine(\"-**-**\", x, y+4)\n\t\tfontLine(\"*** **\", x, y+5)\n\tcase 'l':\n\t\tfontLine(\"***\", x+1, y)\n\t\tfontLine(\"-**\", x+1, y+1)\n\t\tfontLine(\"**\", x+2, y+2)\n\t\tfontLine(\"**\", x+2, y+3)\n\t\tfontLine(\"-**-\", x+1, y+4)\n\t\tfontLine(\"****\", x+1, y+5)\n\tcase 'L':\n\t\tfontLine(\"****\", x, y)\n\t\tfontLine(\"-**-\", x, y+1)\n\t\tfontLine(\"**\", x+1, y+2)\n\t\tfontLine(\"**\", x+1, y+3)\n\t\tfontLine(\"-**--*\", x, y+4)\n\t\tfontLine(\"******\", x, y+5)\n\tcase 'm':\n\t\tfontLine(\"**-**-\", x, y+1)\n\t\tfontLine(\"-*****\", x, y+2)\n\t\tfontLine(\"*-*-*\", x+1, y+3)\n\t\tfontLine(\"*-*-*\", x+1, y+4)\n\t\tfontLine(\"*-*-*\", x+1, y+5)\n\tcase 'M':\n\t\tfontLine(\"**-**\", x+1, y)\n\t\tfontLine(\"**-**\", x+1, y+1)\n\t\tfontLine(\"*-*-*\", x+1, y+2)\n\t\tfontLine(\"*- -*\", x+1, y+3)\n\t\tfontLine(\"*- -*\", x+1, y+4)\n\t\tfontLine(\"* *\", x+1, y+5)\n\tcase 'n':\n\t\tfontLine(\"**-**-\", x, y+1)\n\t\tfontLine(\"-*****\", x, y+2)\n\t\tfontLine(\"**-**\", x+1, y+3)\n\t\tfontLine(\"** **\", x+1, y+4)\n\t\tfontLine(\"** **\", x+1, y+5)\n\tcase 'N':\n\t\tfontLine(\"** **\", x, y)\n\t\tfontLine(\"**- **\", x, y+1)\n\t\tfontLine(\"***-**\", x, y+2)\n\t\tfontLine(\"**-***\", x, y+3)\n\t\tfontLine(\"** -**\", x, y+4)\n\t\tfontLine(\"** **\", x, y+5)\n\tcase 'o':\n\t\tfontLine(\"-****-\", x, y+1)\n\t\tfontLine(\"**--**\", x, y+2)\n\t\tfontLine(\"** **\", x, y+3)\n\t\tfontLine(\"**--**\", x, y+4)\n\t\tfontLine(\"-****-\", x, y+5)\n\tcase 'O':\n\t\tfontLine(\"-****-\", x, y)\n\t\tfontLine(\"**--**\", x, y+1)\n\t\tfontLine(\"** **\", x, y+2)\n\t\tfontLine(\"** **\", x, y+3)\n\t\tfontLine(\"**--**\", x, y+4)\n\t\tfontLine(\"-****-\", x, y+5)\n\tcase 'p':\n\t\tfontLine(\"**-**-\", x, y+1)\n\t\tfontLine(\"-**--*\", x, y+2)\n\t\tfontLine(\"****\", x+1, y+3)\n\t\tfontLine(\"-**-\", x, y+4)\n\t\tfontLine(\"****\", x, y+5)\n\tcase 'P':\n\t\tfontLine(\"*****-\", x, y)\n\t\tfontLine(\"-**-**\", x, y+1)\n\t\tfontLine(\"****-\", x+1, y+2)\n\t\tfontLine(\"**-\", x+1, y+3)\n\t\tfontLine(\"-**-\", x, y+4)\n\t\tfontLine(\"****\", x, y+5)\n\tcase 'q':\n\t\tfontLine(\"-**-**\", x, y+1)\n\t\tfontLine(\"*--**-\", x, y+2)\n\t\tfontLine(\"-****\", x, y+3)\n\t\tfontLine(\"-**-\", x+2, y+4)\n\t\tfontLine(\"****\", x+2, y+5)\n\tcase 'Q':\n\t\tfontLine(\"-****-\", x, y)\n\t\tfontLine(\"**--**\", x, y+1)\n\t\tfontLine(\"** -**\", x, y+2)\n\t\tfontLine(\"**-***\", x, y+3)\n\t\tfontLine(\"-****-\", x, y+4)\n\t\tfontLine(\"-**\", x+3, y+5)\n\tcase 'r':\n\t\tfontLine(\"**-**-\", x, y+1)\n\t\tfontLine(\"-*****\", x, y+2)\n\t\tfontLine(\"**--*\", x+1, y+3)\n\t\tfontLine(\"-**-\", x, y+4)\n\t\tfontLine(\"****\", x, y+5)\n\tcase 'R':\n\t\tfontLine(\"****-\", x, y)\n\t\tfontLine(\"-*--*-\", x, y+1)\n\t\tfontLine(\"*--*-\", x+1, y+2)\n\t\tfontLine(\"***-\", x+1, y+3)\n\t\tfontLine(\"-*-**-\", x, y+4)\n\t\tfontLine(\"**--**\", x, y+5)\n\tcase 's':\n\t\tfontLine(\"-*****\", x, y+1)\n\t\tfontLine(\"**-\", x, y+2)\n\t\tfontLine(\"-****-\", x, y+3)\n\t\tfontLine(\"-**\", x+3, y+4)\n\t\tfontLine(\"*****-\", x, y+5)\n\tcase 'S':\n\t\tfontLine(\"-****-\", x, y)\n\t\tfontLine(\"**--**\", x, y+1)\n\t\tfontLine(\"-***-\", x, y+2)\n\t\tfontLine(\"-**-\", x+2, y+3)\n\t\tfontLine(\"**--**\", x, y+4)\n\t\tfontLine(\"-****-\", x, y+5)\n\tcase 't':\n\t\tfontLine(\"-*\", x+1, y)\n\t\tfontLine(\"-**-\", x, y+1)\n\t\tfontLine(\"****\", x, y+2)\n\t\tfontLine(\"-**-\", x, y+3)\n\t\tfontLine(\"**-*\", x+1, y+4)\n\t\tfontLine(\"-**-\", x+1, y+5)\n\tcase 'T':\n\t\tfontLine(\"******\", x, y)\n\t\tfontLine(\"*-**-*\", x, y+1)\n\t\tfontLine(\"**\", x+2, y+2)\n\t\tfontLine(\"**\", x+2, y+3)\n\t\tfontLine(\"-**-\", x+1, y+4)\n\t\tfontLine(\"****\", x+1, y+5)\n\tcase 'u':\n\t\tfontLine(\"** **\", x, y+1)\n\t\tfontLine(\"** **\", x, y+2)\n\t\tfontLine(\"** **\", x, y+3)\n\t\tfontLine(\"**-**-\", x, y+4)\n\t\tfontLine(\"-**-**\", x, y+5)\n\tcase 'U':\n\t\tfontLine(\"** **\", x, y)\n\t\tfontLine(\"** **\", x, y+1)\n\t\tfontLine(\"** **\", x, y+2)\n\t\tfontLine(\"** **\", x, y+3)\n\t\tfontLine(\"**--**\", x, y+4)\n\t\tfontLine(\"-****-\", x, y+5)\n\tcase 'v':\n\t\tfontLine(\"** **\", x, y+1)\n\t\tfontLine(\"** **\", x, y+2)\n\t\tfontLine(\"**--**\", x, y+3)\n\t\tfontLine(\"-****-\", x, y+4)\n\t\tfontLine(\"-**-\", x+1, y+5)\n\tcase 'V':\n\t\tfontLine(\"** **\", x, y)\n\t\tfontLine(\"** **\", x, y+1)\n\t\tfontLine(\"** **\", x, y+2)\n\t\tfontLine(\"**--**\", x, y+3)\n\t\tfontLine(\"-****-\", x, y+4)\n\t\tfontLine(\"-**-\", x+1, y+5)\n\tcase 'w':\n\t\tfontLine(\"* *\", x+1, y+1)\n\t\tfontLine(\"*- -*\", x+1, y+2)\n\t\tfontLine(\"*-*-*\", x+1, y+3)\n\t\tfontLine(\"*-*-*\", x+1, y+4)\n\t\tfontLine(\"-*-*-\", x+1, y+5)\n\tcase 'W':\n\t\tfontLine(\"* *\", x+1, y)\n\t\tfontLine(\"*- -*\", x+1, y+1)\n\t\tfontLine(\"*- -*\", x+1, y+2)\n\t\tfontLine(\"*-*-*\", x+1, y+3)\n\t\tfontLine(\"**-**\", x+1, y+4)\n\t\tfontLine(\"-*-*-\", x+1, y+5)\n\tcase 'x':\n\t\tfontLine(\"**--**\", x, y+1)\n\t\tfontLine(\"-****-\", x, y+2)\n\t\tfontLine(\"-**-\", x+1, y+3)\n\t\tfontLine(\"-****-\", x, y+4)\n\t\tfontLine(\"**--**\", x, y+5)\n\tcase 'X':\n\t\tfontLine(\"**--**\", x, y)\n\t\tfontLine(\"-****-\", x, y+1)\n\t\tfontLine(\"-**-\", x+1, y+2)\n\t\tfontLine(\"-**-\", x+1, y+3)\n\t\tfontLine(\"-****-\", x, y+4)\n\t\tfontLine(\"**--**\", x, y+5)\n\tcase 'y':\n\t\tfontLine(\"** **\", x, y+1)\n\t\tfontLine(\"**--**\", x, y+2)\n\t\tfontLine(\"-****-\", x, y+3)\n\t\tfontLine(\"-*\", x+4, y+4)\n\t\tfontLine(\"*****-\", x, y+5)\n\tcase 'Y':\n\t\tfontLine(\"** **\", x, y)\n\t\tfontLine(\"**--**\", x, y+1)\n\t\tfontLine(\"-****-\", x, y+2)\n\t\tfontLine(\"-**-\", x+1, y+3)\n\t\tfontLine(\"-**-\", x+1, y+4)\n\t\tfontLine(\"****\", x+1, y+5)\n\tcase 'z':\n\t\tfontLine(\"******\", x, y+1)\n\t\tfontLine(\"*--**-\", x, y+2)\n\t\tfontLine(\"-**-\", x+1, y+3)\n\t\tfontLine(\"-**--*\", x, y+4)\n\t\tfontLine(\"******\", x, y+5)\n\tcase 'Z':\n\t\tfontLine(\"******\", x, y)\n\t\tfontLine(\"*- -**\", x, y+1)\n\t\tfontLine(\"-**-\", x+2, y+2)\n\t\tfontLine(\"-**-\", x+1, y+3)\n\t\tfontLine(\"-**--*\", x, y+4)\n\t\tfontLine(\"******\", x, y+5)\n\tcase 'æ':\n\t\tfontLine(\"****-\", x, y+1)\n\t\tfontLine(\"-*-*\", x+1, y+2)\n\t\tfontLine(\"-***-\", x, y+3)\n\t\tfontLine(\"*-*-\", x, y+4)\n\t\tfontLine(\"-****\", x, y+5)\n\tcase 'Æ':\n\t\tfontLine(\"-*****\", x, y)\n\t\tfontLine(\"**-**-\", x, y+1)\n\t\tfontLine(\"******\", x, y+2)\n\t\tfontLine(\"**-**-\", x, y+3)\n\t\tfontLine(\"** **-\", x, y+4)\n\t\tfontLine(\"** ***\", x, y+5)\n\tcase 'ø':\n\t\tfontLine(\"-***-*\", x, y+1)\n\t\tfontLine(\"*--**-\", x, y+2)\n\t\tfontLine(\"*-**-*\", x, y+3)\n\t\tfontLine(\"-**--*\", x, y+4)\n\t\tfontLine(\"*-***-\", x, y+5)\n\tcase 'Ø':\n\t\tfontLine(\"-***-*\", x, y)\n\t\tfontLine(\"*--**-\", x, y+1)\n\t\tfontLine(\"*-**-*\", x, y+2)\n\t\tfontLine(\"*-**-*\", x, y+3)\n\t\tfontLine(\"-**--*\", x, y+4)\n\t\tfontLine(\"*-***-\", x, y+5)\n\tcase 'å':\n\t\tfontLine(\"-**-\", x+1, y)\n\t\tfontLine(\"***-\", x+1, y+1)\n\t\tfontLine(\"--**\", x+1, y+2)\n\t\tfontLine(\"-****\", x, y+3)\n\t\tfontLine(\"**-**-\", x, y+4)\n\t\tfontLine(\"-**-**\", x, y+5)\n\tcase 'Å':\n\t\tfontLine(\"**\", x+2, y)\n\t\tfontLine(\"--\", x+2, y+1)\n\t\tfontLine(\"-****-\", x, y+2)\n\t\tfontLine(\"**--**\", x, y+3)\n\t\tfontLine(\"******\", x, y+4)\n\t\tfontLine(\"**--**\", x, y+5)\n\tcase '.':\n\t\tfontLine(\"**-\", x+1, y+4)\n\t\tfontLine(\"-**\", x, y+5)\n\tcase ':':\n\t\tfontLine(\"**-\", x+1, y)\n\t\tfontLine(\"-**\", x, y+1)\n\t\tfontLine(\"**-\", x+1, y+4)\n\t\tfontLine(\"-**\", x, y+5)\n\tcase ';':\n\t\tfontLine(\"**-\", x+1, y)\n\t\tfontLine(\"-**\", x, y+1)\n\t\tfontLine(\"-*\", x+1, y+4)\n\t\tfontLine(\"*-\", x+1, y+5)\n\tcase ',':\n\t\tfontLine(\"-*\", x+1, y+4)\n\t\tfontLine(\"*-\", x+1, y+5)\n\tcase '\\'':\n\t\tfontLine(\"-*\", x+1, y)\n\t\tfontLine(\"*-\", x+1, y+1)\n\tcase '\"':\n\t\tfontLine(\"-* -*\", x, y)\n\t\tfontLine(\"*- *-\", x, y+1)\n\tcase '→':\n\t\tfontLine(\"-*-\", x+2, y)\n\t\tfontLine(\"-*-\", x+3, y+1)\n\t\tfontLine(\"******\", x, y+2)\n\t\tfontLine(\"-*-\", x+3, y+3)\n\t\tfontLine(\"-*-\", x+2, y+4)\n\tcase '*':\n\t\tfontLine(\"* *\", x+2, y+1)\n\t\tfontLine(\" *\", x+2, y+2)\n\t\tfontLine(\"* *\", x+2, y+3)\n\tcase '+':\n\t\tfontLine(\" *\", x, y)\n\t\tfontLine(\" *\", x, y+1)\n\t\tfontLine(\"*****\", x, y+2)\n\t\tfontLine(\" *\", x, y+3)\n\t\tfontLine(\" *\", x, y+4)\n\tcase '!':\n\t\tfontLine(\"**\", x+1, y)\n\t\tfontLine(\"-**-\", x, y+1)\n\t\tfontLine(\"-**-\", x, y+2)\n\t\tfontLine(\"**\", x+1, y+3)\n\t\tfontLine(\"--\", x+1, y+4)\n\t\tfontLine(\"**\", x+1, y+5)\n\tcase '?':\n\t\tfontLine(\"-****-\", x, y)\n\t\tfontLine(\"**--**\", x, y+1)\n\t\tfontLine(\"-**-\", x+2, y+2)\n\t\tfontLine(\"**-\", x+2, y+3)\n\t\tfontLine(\"--\", x+2, y+4)\n\t\tfontLine(\"**\", x+2, y+5)\n\tcase '-':\n\t\tfontLine(\"-****-\", x, y+3)\n\tcase '=':\n\t\tfontLine(\"-****-\", x, y+2)\n\t\tfontLine(\"-****-\", x, y+4)\n\tcase '_':\n\t\tfontLine(\"-****-\", x, y+5)\n\tcase '/':\n\t\tfontLine(\"*\", x+4, y+1)\n\t\tfontLine(\"*-\", x+3, y+2)\n\t\tfontLine(\"*-\", x+2, y+3)\n\t\tfontLine(\"*-\", x+1, y+4)\n\t\tfontLine(\"*-\", x, y+5)\n\tcase '1':\n\t\tfontLine(\"-**\", x+1, y)\n\t\tfontLine(\"***\", x+1, y+1)\n\t\tfontLine(\"-**\", x+1, y+2)\n\t\tfontLine(\"**\", x+2, y+3)\n\t\tfontLine(\"**-\", x+2, y+4)\n\t\tfontLine(\"****\", x+1, y+5)\n\tcase '2':\n\t\tfontLine(\"-****-\", x, y)\n\t\tfontLine(\"*- -**\", x, y+1)\n\t\tfontLine(\"-**-\", x+2, y+2)\n\t\tfontLine(\"-**-\", x+1, y+3)\n\t\tfontLine(\"-**--*\", x, y+4)\n\t\tfontLine(\"******\", x, y+5)\n\tcase '3':\n\t\tfontLine(\"-****-\", x, y)\n\t\tfontLine(\"**--**\", x, y+1)\n\t\tfontLine(\"**-\", x+3, y+2)\n\t\tfontLine(\"-**\", x+3, y+3)\n\t\tfontLine(\"**--**\", x, y+4)\n\t\tfontLine(\"-****-\", x, y+5)\n\tcase '4':\n\t\tfontLine(\"-***\", x+1, y)\n\t\tfontLine(\"-*-**\", x, y+1)\n\t\tfontLine(\"*--**-\", x, y+2)\n\t\tfontLine(\"******\", x, y+3)\n\t\tfontLine(\"-**-\", x+2, y+4)\n\t\tfontLine(\"****\", x+2, y+5)\n\tcase '5':\n\t\tfontLine(\"******\", x, y)\n\t\tfontLine(\"**-\", x, y+1)\n\t\tfontLine(\"*****-\", x, y+2)\n\t\tfontLine(\"-**\", x+3, y+3)\n\t\tfontLine(\"**--**\", x, y+4)\n\t\tfontLine(\"-****-\", x, y+5)\n\tcase '6':\n\t\tfontLine(\"-***\", x+1, y)\n\t\tfontLine(\"-**-\", x, y+1)\n\t\tfontLine(\"**-\", x, y+2)\n\t\tfontLine(\"*****-\", x, y+3)\n\t\tfontLine(\"**--**\", x, y+4)\n\t\tfontLine(\"-****-\", x, y+5)\n\tcase '7':\n\t\tfontLine(\"******\", x, y)\n\t\tfontLine(\"*- -**\", x, y+1)\n\t\tfontLine(\"-**\", x+3, y+2)\n\t\tfontLine(\"-**-\", x+2, y+3)\n\t\tfontLine(\"**-\", x+2, y+4)\n\t\tfontLine(\"**\", x+2, y+5)\n\tcase '8':\n\t\tfontLine(\"-****-\", x, y)\n\t\tfontLine(\"**--**\", x, y+1)\n\t\tfontLine(\"-****-\", x, y+2)\n\t\tfontLine(\"**--**\", x, y+3)\n\t\tfontLine(\"**--**\", x, y+4)\n\t\tfontLine(\"-****-\", x, y+5)\n\tcase '9':\n\t\tfontLine(\"-****-\", x, y)\n\t\tfontLine(\"**--**\", x, y+1)\n\t\tfontLine(\"-*****\", x, y+2)\n\t\tfontLine(\"-**\", x+3, y+3)\n\t\tfontLine(\"-**-\", x+2, y+4)\n\t\tfontLine(\"-***-\", x, y+5)\n\tcase '0':\n\t\tfontLine(\"-****-\", x, y)\n\t\tfontLine(\"**--**\", x, y+1)\n\t\tfontLine(\"**--**\", x, y+2)\n\t\tfontLine(\"**--**\", x, y+3)\n\t\tfontLine(\"**--**\", x, y+4)\n\t\tfontLine(\"-****-\", x, y+5)\n\tcase '(':\n\t\tfontLine(\"-***\", x+2, y)\n\t\tfontLine(\"**-\", x+2, y+1)\n\t\tfontLine(\"**-\", x+1, y+2)\n\t\tfontLine(\"**-\", x+1, y+3)\n\t\tfontLine(\"**-\", x+2, y+4)\n\t\tfontLine(\"-***\", x+2, y+5)\n\tcase ')':\n\t\tfontLine(\"***-\", x, y)\n\t\tfontLine(\"-**\", x+1, y+1)\n\t\tfontLine(\"-**\", x+2, y+2)\n\t\tfontLine(\"-**\", x+2, y+3)\n\t\tfontLine(\"-**\", x+1, y+4)\n\t\tfontLine(\"***-\", x, y+5)\n\tcase '{':\n\t\tfontLine(\"-**\", x+2, y)\n\t\tfontLine(\"*\", x+2, y+1)\n\t\tfontLine(\"*\", x+2, y+2)\n\t\tfontLine(\"*\", x+1, y+3)\n\t\tfontLine(\"*\", x+2, y+4)\n\t\tfontLine(\"*\", x+2, y+5)\n\t\tfontLine(\"-**\", x+2, y+6)\n\tcase '}':\n\t\tfontLine(\"**-\", x+1, y)\n\t\tfontLine(\"*\", x+3, y+1)\n\t\tfontLine(\"*\", x+3, y+2)\n\t\tfontLine(\"*\", x+4, y+3)\n\t\tfontLine(\"*\", x+3, y+4)\n\t\tfontLine(\"*\", x+3, y+5)\n\t\tfontLine(\"**-\", x+1, y+6)\n\tcase '[':\n\t\tfontLine(\"****\", x+1, y)\n\t\tfontLine(\"**\", x+1, y+1)\n\t\tfontLine(\"**\", x+1, y+2)\n\t\tfontLine(\"**\", x+1, y+3)\n\t\tfontLine(\"**\", x+1, y+4)\n\t\tfontLine(\"****\", x+1, y+5)\n\tcase ']':\n\t\tfontLine(\"****\", x+1, y)\n\t\tfontLine(\" **\", x+1, y+1)\n\t\tfontLine(\" **\", x+1, y+2)\n\t\tfontLine(\" **\", x+1, y+3)\n\t\tfontLine(\" **\", x+1, y+4)\n\t\tfontLine(\"****\", x+1, y+5)\n\tcase '<':\n\t\tfontLine(\" *-\", x+2, y+1)\n\t\tfontLine(\" *-\", x+2, y+2)\n\t\tfontLine(\"*-\", x+2, y+3)\n\t\tfontLine(\" *-\", x+2, y+4)\n\t\tfontLine(\" *-\", x+2, y+5)\n\tcase '>':\n\t\tfontLine(\"-*\", x+2, y+1)\n\t\tfontLine(\"-*\", x+3, y+2)\n\t\tfontLine(\" -*\", x+3, y+3)\n\t\tfontLine(\"-*\", x+3, y+4)\n\t\tfontLine(\"-*\", x+2, y+5)\n\tcase '&':\n\t\tfontLine(\" **\", x, y)\n\t\tfontLine(\"** *\", x, y+1)\n\t\tfontLine(\" ** *\", x, y+2)\n\t\tfontLine(\" **-*\", x, y+3)\n\t\tfontLine(\"* **\", x, y+4)\n\t\tfontLine(\" ** **\", x, y+5)\n\tcase '|':\n\t\tfontLine(\"*\", x+3, y)\n\t\tfontLine(\"*\", x+3, y+1)\n\t\tfontLine(\"*\", x+3, y+2)\n\t\tfontLine(\"*\", x+3, y+3)\n\t\tfontLine(\"*\", x+3, y+4)\n\t\tfontLine(\"*\", x+3, y+5)\n\tcase '\\\\':\n\t\tfontLine(\"**\", x+1, y+1)\n\t\tfontLine(\"-**\", x+1, y+2)\n\t\tfontLine(\"-**\", x+2, y+3)\n\t\tfontLine(\"-**\", x+3, y+4)\n\t\tfontLine(\"-**\", x+4, y+5)\n\n\tcase 0:\n\t\treturn errors.New(\"the rune was 0. Did you pass a coordinate instead of a rune?\")\n\tdefault:\n\t\treturn fmt.Errorf(\"rune %s is not available\", string(l))\n\t}\n\treturn nil\n}", "func WriteASCII(w io.Writer, t []Triangle) error {\n\tvar err error\n\n\tprintf := func(format string, a ...interface{}) {\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\t_, err = fmt.Fprintf(w, format, a...)\n\t}\n\tprintf(\"solid object\\n\")\n\tfor _, tt := range t {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tprintf(\"facet normal %f %f %f\\n\", tt.N[0], tt.N[1], tt.N[2])\n\t\tprintf(\" outer loop\\n\")\n\t\tfor _, v := range tt.V {\n\t\t\tprintf(\" vertex %f %f %f\\n\", v[0], v[1], v[2])\n\t\t}\n\t\tprintf(\" endloop\\n\")\n\t\tprintf(\"endfacet\\n\")\n\t}\n\tprintf(\"endsolid object\\n\")\n\treturn nil\n}", "func (s *SVG) Str() string {\n\treturn s.header() + s.svgString + s.footer()\n}", "func (r renderer) NormalText(out *bytes.Buffer, text []byte) {\n\tout.Write(text)\n}", "func Display(m charset.Decoder) {\n\tspecials := map[rune]string{\n\t\t'\\n': \"LF\",\n\t\t'\\r': \"CR\",\n\t\t'\\f': \"FF\",\n\t\t' ': \"SP\",\n\t\t0x1b: \"ESC\",\n\t\t0x20ac: \" €\",\n\t}\n\tfmt.Printf(\" \")\n\tfor c := 0; c < 8; c++ {\n\t\tfmt.Printf(\"0x%d_ \", c)\n\t}\n\tfmt.Println(\"\")\n\tfor r := 0; r < 0x10; r++ {\n\t\tfmt.Printf(\"0x_%x: \", r)\n\t\tfor c := 0; c < 8; c++ {\n\t\t\tk := byte(c*0x10 + r)\n\t\t\tif v, ok := m[k]; ok {\n\t\t\t\tif s, ok := specials[v]; ok {\n\t\t\t\t\tfmt.Printf(\"%3s \", s)\n\t\t\t\t} else if v >= 0x400 {\n\t\t\t\t\tfmt.Printf(\"%04x \", v)\n\t\t\t\t} else {\n\t\t\t\t\tfmt.Printf(\" %c \", v)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tfmt.Printf(\" \")\n\t\t\t}\n\t\t}\n\t\tfmt.Println()\n\t}\n}", "func PrintItalic(format string, a ...interface{}) { fmt.Println(Italic(format, a...)) }", "func createTextBoard() *discordgo.MessageEmbed {\n\treturn &discordgo.MessageEmbed{\n\t\tTitle: \"Text-to-Speech\",\n\t\tDescription: \"\",\n\t\tFooter: nil,\n\t\tImage: nil,\n\t\tThumbnail: nil,\n\t\tVideo: nil,\n\t\tProvider: nil,\n\t\tAuthor: nil,\n\t\tFields: make([]*discordgo.MessageEmbedField, 1, 1),\n\t}\n}", "func (c *Canvas) CText(x, y float64, size float64, s string, color color.RGBA) {\n\tx, y = dimen(x, y, c.Width, c.Height)\n\tsize = pct(size, c.Width)\n\tAbsTextMid(c.Container, int(x), int(y), s, int(size), color)\n}", "func (c *FontTextureAtlas) buildTextureAtlas(text string) {\n\n\tvar dictionary fontTextureMetricsDictionary = fontTextureMetricsDictionary{}\n\tvar currentCVT texture.CanvasTexture = c.createDefaultCanvasTexture()\n\n\tmx := 0\n\tmy := 0\n\tmaxLineHeight := 0\n\txOffset := c.textureAtlasOffsetX\n\tyOffset := c.textureAtlasOffsetY\n\n\tfor _, ch := range text {\n\t\tword := string([]rune{ch})\n\n\t\t// すでに辞書に登録されていればスキップ\n\t\tif _, ok := dictionary[word]; ok {\n\t\t\tcontinue\n\t\t}\n\n\t\tcv := currentCVT.Canvas()\n\t\tcanvasWidth := cv.Width()\n\t\tcanvasHeight := cv.Height()\n\t\tmetrics := cv.Context2D().MeasureText(word)\n\n\t\tw := int(math.Abs(metrics.ActualBoundingBoxLeft)+math.Abs(metrics.ActualBoundingBoxRight)) + 1\n\t\tif w < int(metrics.Width) {\n\t\t\tw = int(metrics.Width)\n\t\t}\n\t\th := int(math.Abs(metrics.ActualBoundingBoxAscent)+math.Abs(metrics.ActualBoundingBoxDescent)) + 1\n\n\t\t// 同行の最大高さを更新\n\t\tif h > maxLineHeight {\n\t\t\tmaxLineHeight = h\n\t\t}\n\n\t\t// 通常文字なら、レンダリング\n\t\tif h > 0 {\n\t\t\t// 横幅がオーバーなら、位置を改行\n\t\t\tif int(mx+w) > canvasWidth {\n\t\t\t\tmx = 0\n\t\t\t\tmy = my + maxLineHeight + yOffset\n\t\t\t\tmaxLineHeight = 0\n\t\t\t}\n\t\t\t// 描画範囲の高さがオーバーなら、新しいキャンバスを作成\n\t\t\tif int(my+h) > canvasHeight {\n\t\t\t\tmx = 0\n\t\t\t\tmy = 0\n\t\t\t\tmaxLineHeight = 0\n\n\t\t\t\tcurrentCVT = c.createDefaultCanvasTexture()\n\t\t\t}\n\n\t\t\t// Canvasへ文字を描画\n\t\t\tcctx := currentCVT.Canvas().Context2D()\n\t\t\tcctx.FillText(\n\t\t\t\tword,\n\t\t\t\tmx,\n\t\t\t\tmy+h,\n\t\t\t)\n\t\t\t// cctx.StrokeRect(mx, my, w, h)\n\n\t\t\t// 文字位置辞書を登録\n\t\t\tdictionary[word] = &FontTextureMetrics{\n\t\t\t\tWord: word,\n\t\t\t\tU: float64(mx) / float64(canvasWidth),\n\t\t\t\tV: 1.0 - (float64(my+h) / float64(canvasHeight)),\n\t\t\t\tWidth: float64(w) / float64(canvasWidth),\n\t\t\t\tHeight: float64(h) / float64(canvasHeight),\n\t\t\t\tabsoluteWidth: float64(w),\n\t\t\t\tabsoluteHeight: float64(h),\n\t\t\t\tCanvasTexture: currentCVT,\n\t\t\t}\n\n\t\t\t// 文字位置をずらす\n\t\t\tmx = mx + w + xOffset\n\t\t}\n\n\t}\n\n\t// 結果を格納\n\tc.fontDictionary = dictionary\n}", "func Figure_(children ...HTML) HTML {\n return Figure(nil, children...)\n}", "func (me TxsdPresentationAttributesGraphicsTextRendering) String() string {\n\treturn xsdt.String(me).String()\n}", "func (r renderer) Emphasis(out *bytes.Buffer, text []byte) {\n\tout.Write(text)\n}", "func ExampleRender() {\n\tconst s = `\n\tFirst Line\n\tSecond Line\n\tThird Line\n\tHello\n\tThis is go-music`\n\n\tfmt.Println(RenderText(s, Spring))\n\tfmt.Println(RenderText(s, Autumn))\n\tfmt.Println(RenderText(s, Winter))\n\tfmt.Println(RenderText(s, Rose))\n\tfmt.Println(RenderText(s, Valentine))\n}", "func (td TextDisplay)DrawLives(lives int, screen *ebiten.Image){\r\n\tlife := strconv.Itoa(lives)\r\n\ttext.Draw(screen, life, td.mplusNormalFont,10,30,color.White)\r\n}", "func help() {\n\tfmt.Println(exampleText)\n}", "func (script Script) RenderHTML() {\n\tsymbols := []string{}\n\ttemplateHTML := `\n<!DOCTYPE html>\n<html>\n <head>\n <title>Writing System</title>\n <style type=\"text/css\">\n body, html { font-size: 28px; }\n div.container { display: flex; flex-wrap: wrap; width: 1600px; margin: 1rem auto; }\n div.cell { width: 100px; height: 100px; margin: 1rem; text-align: center; font-weight: 700; }\n div.cell > img { display: block; }\n </style>\n </head>\n <body>\n\t\t<div class=\"container\">\n\t\t\t{{range $index, $element := .}}\n <div class=\"cell\">\n <img src=\"{{ $element }}.png\">\n <p>{{ $element }}</p>\n </div>\n {{end}}\n </div>\n </body>\n</html>\n`\n\n\twriter, err := os.Create(\"./output/index.html\")\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\n\tt, err := template.New(\"htmlIndex\").Parse(templateHTML)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\n\tfor _, g := range script.Glyphs {\n\t\tsymbols = append(symbols, g.Representation)\n\t}\n\n\terr = t.Execute(writer, symbols)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\n\tdefer writer.Close()\n}", "func Figcaption(attrs []htmlgo.Attribute, children ...HTML) HTML {\n\treturn &htmlgo.Tree{Tag: \"figcaption\", Attributes: attrs, Children: children}\n}", "func (s Artwork) String() string {\n\treturn awsutil.Prettify(s)\n}", "func drawitem(s string, x, y, w, h float64, shape, color, align string, milestone, bline bool, canvas *gensvg.SVG) {\n\tvar textfill string\n\tfc := fmt.Sprintf(strokefmt, *bgcolor, hexstyle(color))\n\n\tif bline {\n\t\tcanvas.Line(x+w, y, x+w, *height, borderfmt)\n\t}\n\n\tif len(s) > 0 {\n\t\tswitch shape {\n\t\tcase \"r\":\n\t\t\tcanvas.Rect(x, y, w, h, fc)\n\t\tcase \"rr\":\n\t\t\tcanvas.Roundrect(x, y, w, h, 5, 5, fc)\n\t\tcase \"e\":\n\t\t\tcanvas.Ellipse(x+(w/2), y+(h/2), w/2, h/2, fc)\n\t\tcase \"c\":\n\t\t\tcanvas.Circle(x+(w/2), y+(h/2), h/2, fc)\n\t\tcase \"a\":\n\t\t\tarrow(x, y, w, h, h/2, fc, canvas)\n\t\tcase \"l\":\n\t\t\tyl := (y + (h / 2)) + ((*ifs / 4) - (*ifs / 3))\n\t\t\tcanvas.Line(x, yl, x+w, yl, fmt.Sprintf(itemlinefmt, color, *ifs))\n\t\tdefault:\n\t\t\tcanvas.Rect(x, y, w, h, fc)\n\t\t}\n\t} else {\n\t\tcanvas.Rect(x, y, w, h, fc)\n\t}\n\n\tred, green, blue, alpha := colorcomp(color)\n\t_, _, v := rgbtohsb(red, green, blue)\n\n\tif v <= 100.0 && v > 70.0 || alpha < 127 {\n\t\ttextfill = \"black\"\n\t} else {\n\t\ttextfill = \"white\"\n\t}\n\n\tif milestone {\n\t\ts += \" \\u2605\"\n\t}\n\ttx := x\n\tswitch align {\n\tcase \"start\":\n\t\ttx += 1.0 // 5\n\tcase \"middle\":\n\t\ttx += (w / 2)\n\tcase \"end\":\n\t\ttx += (w)\n\tdefault:\n\t\ttx += (w / 2)\n\t}\n\tcanvas.Text(tx, (y+(h/2))+(*ifs/4), s, fmt.Sprintf(itemtextfmt, align, textfill, *ifs))\n\n}", "func GetPDFConv() []byte {\n\tscript := []byte(`\nimport Quartz as Quartz\nfrom CoreFoundation import NSImage\nfrom os.path import realpath, basename\nfrom sys import argv\n\n\ndef png_to_pdf(args):\n image = NSImage.alloc().initWithContentsOfFile_(args[0])\n page_init = Quartz.PDFPage.alloc().initWithImage_(image)\n pdf = Quartz.PDFDocument.alloc().initWithData_(page_init.dataRepresentation())\n\n for index, file_path in enumerate(args[1:]):\n image = NSImage.alloc().initWithContentsOfFile_(file_path)\n page_init = Quartz.PDFPage.alloc().initWithImage_(image)\n pdf.insertPage_atIndex_(page_init, index + 1)\n\n pdf.writeToFile_(realpath(__file__)[:-len(basename(__file__))] + 'aggr.pdf')\n\n\nif __name__ == '__main__':\n\tpng_to_pdf(argv[1:])\n`)\n\treturn bytes.ReplaceAll(script, []byte{0x09}, []byte{0x20, 0x20, 0x20, 0x20})\n}", "func MakeArt(rules string, iterations int) int {\n\tr := parseRules(rules)\n\n\timage := []string{\n\t\t\".#.\",\n\t\t\"..#\",\n\t\t\"###\",\n\t}\n\n\tvar step int\n\tfor i := 0; i < iterations; i++ {\n\t\tnewImage := []string{}\n\t\tsize := len(image)\n\t\tif size%2 == 0 {\n\t\t\tstep = 2\n\t\t} else {\n\t\t\tstep = 3\n\t\t}\n\n\t\tfor j := 0; j < size; j += step {\n\t\t\tfor k := 0; k < step+1; k++ {\n\t\t\t\tnewImage = append(newImage, \"\")\n\t\t\t}\n\n\t\t\tfor l := 0; l < size; l += step {\n\t\t\t\tblock := []string{}\n\t\t\t\tfor m := 0; m < step; m++ {\n\t\t\t\t\tblock = append(block, image[j+m][l:l+step])\n\t\t\t\t}\n\t\t\t\tblock = transform(block, r)\n\t\t\t\tdstY := (j / step) * (step + 1)\n\t\t\t\tfor m := 0; m < step+1; m++ {\n\t\t\t\t\tnewImage[dstY+m] += block[m]\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\timage = newImage\n\t}\n\n\tvar counter = 0\n\tfor _, r := range image {\n\t\tcounter += strings.Count(r, \"#\")\n\t}\n\treturn counter\n}", "func Asciify(image image.Image, palette CharacterPalette) ASCIIArt {\n\tbounds := image.Bounds()\n\tart := make(ASCIIArt, bounds.Max.Y)\n\n\t// Iterate over all the pixels\n\tfor y := bounds.Min.Y; y < bounds.Max.Y; y++ {\n\t\tart[y] = make([]string, bounds.Max.X)\n\t\tfor x := bounds.Min.X; x < bounds.Max.X; x++ {\n\t\t\tart[y][x] = palette.pick(rgbaLuminance(image.At(x, y)))\n\t\t}\n\t}\n\n\treturn art\n}", "func Render(colorCode int, fontSize int, content string) string {\n\treturn \"\\033[\" + strconv.Itoa(fontSize) + \";\" + strconv.Itoa(colorCode) + \"m\" + content + reset\n}", "func (self *GameObjectCreator) BitmapText1O(x int, y int, font string, text string) *BitmapText{\n return &BitmapText{self.Object.Call(\"bitmapText\", x, y, font, text)}\n}", "func Imagify(art ASCIIArt, palette CharacterPalette) (image.Image, error) {\n\tbounds := image.Rect(0, 0, len(art[0]), len(art))\n\timg := image.NewRGBA(bounds)\n\n\tfor y, col := range art {\n\t\tfor x, row := range col {\n\t\t\tcol, err := palette.color(row)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\timg.Set(x, y, col)\n\t\t}\n\t}\n\n\treturn img, nil\n}", "func Outro(name string) {\n ClearScreen()\n fmt.Printf(\"\\n\\n\")\n TypedText(name+\" walks through the door into a long tunnel.\\n\", 80)\n time.Sleep(500 * time.Millisecond)\n TypedText(\"\\n\\nAfter walking what seems like an eternity \"+name+ \" approaches a ladder.\\n\", 80)\n time.Sleep(500 * time.Millisecond)\n TypedText(\"\\n\\nTrudging up the ladder step by step \"+name+\" comes face to face with an iron plate.\\n\", 80)\n time.Sleep(500 * time.Millisecond)\n TypedText(\"\\n\\nBestowing all inner strength \"+name+\" lifts the iron plate and is hit with bright light and a breath of fresh air.\\n\", 80)\n time.Sleep(500 * time.Millisecond)\n fmt.Printf(\"\\n\")\n TypedText(name+\" has escaped.\\n\", 80)\n time.Sleep(2 * time.Second)\n}", "func Chart(f *hclwrite.File, chart *chart.Chart) *hclwrite.Body {\n\n // program_text wrapper\n programText := utils.ProgramTextProc(chart.ProgramText)\n // group_by wrapper\n groupBy := chart.Options.GroupBy\n if groupBy == nil {\n groupBy = []string{}\n }\n // tags wrapper\n tags := chart.Tags\n if tags == nil {\n tags = []string{}\n }\n // wrapper around label\n label := utils.LabelProc(chart.Id)\n\n rootBody := f.Body()\n chartBlock := rootBody.AppendNewBlock(\"resource\", []string{utils.Type[chart.Options.Type], label})\n chartBody := chartBlock.Body()\n chartBody.SetAttributeValue(\"name\", cty.StringVal(chart.Name))\n chartBody.SetAttributeValue(\"description\", cty.StringVal(chart.Description))\n chartBody.SetAttributeValue(\"plot_type\", cty.StringVal(chart.Options.DefaultPlotType))\n chartBody.SetAttributeValue(\"stacked\", cty.BoolVal(chart.Options.Stacked))\n chartBody.SetAttributeValue(\"axes_include_zero\", cty.BoolVal(chart.Options.IncludeZero))\n\n // Histograms processing\n if chart.Options.HistogramChartOptions != nil {\n histogramOptionsBlock := chartBody.AppendNewBlock(\"histogram_options\", nil)\n histogramOptionsBody := histogramOptionsBlock.Body()\n histogramOptionsBody.SetAttributeValue(\"color_theme\", cty.StringVal(utils.Color[*chart.Options.HistogramChartOptions.ColorThemeIndex]))\n }\n\n // legend_options_fields\n utils.GetLegendOptionsBlock(chart, chartBody)\n\n // create viz_options\n utils.GetVizOptions(chart, chartBody)\n\n\n // create event_options\n utils.EventProc(chart, chartBody)\n // chartBody.SetAttributeValue(\"event_options\", utils.EventProc(chart))\n\n chartBody.SetAttributeTraversal(\"program_text\", hcl.Traversal{\n hcl.TraverseRoot{\n Name: programText,\n },\n })\n\n chartBody.SetAttributeValue(\"disable_sampling\", utils.DisableSamplingProc(chart))\n chartBody.SetAttributeValue(\"minimum_resolution\", cty.NumberIntVal(utils.MinResolutionProc(chart)))\n chartBody.SetAttributeValue(\"unit_prefix\", cty.StringVal(chart.Options.UnitPrefix))\n chartBody.SetAttributeValue(\"max_delay\", cty.NumberIntVal(utils.MaxDelayProc(chart)))\n chartBody.SetAttributeValue(\"color_by\", cty.StringVal(chart.Options.ColorBy))\n chartBody.SetAttributeValue(\"on_chart_legend_dimension\", utils.OnChartLegendProc(chart))\n // Time range processing\n if chart.Options.Time != nil { // Checking Time structure against nil\n\n if chart.Options.Time.Type == \"relative\" {\n /* Convert to sec, we have different format here:\n See details here: https://github.com/terraform-providers/terraform-provider-signalfx/issues/55\n */\n chartBody.SetAttributeValue(\"time_range\", cty.NumberIntVal(*chart.Options.Time.Range/1000))\n }\n\n if chart.Options.Time.Type == \"absolute\" {\n chartBody.SetAttributeValue(\"start_time\", cty.NumberIntVal(*chart.Options.Time.Start/1000))\n chartBody.SetAttributeValue(\"end_time\", cty.NumberIntVal(*chart.Options.Time.End/1000))\n }\n }\n return chartBody\n}", "func (r RGASS) Text() string {\n\tstr := \"\"\n\n\tfor node := range r.Model.Iter() {\n\t\tif node.Hidden {\n\t\t\tcontinue\n\t\t}\n\t\tstr += node.Str\n\t}\n\n\treturn str\n}", "func drawText(p **widgets.Paragraph) {\n\tupdateColor := false\n\n\t/* Did text hit the bottom or top of the term? */\n\tif py == yEdge {\n\t\tyAdd = false\n\t\tupdateColor = true\n\t} else if py == 0 {\n\t\tyAdd = true\n\t\tupdateColor = true\n\t}\n\n\t/* Did the text hit the right or left of term? */\n\tif px == xEdge {\n\t\txAdd = false\n\t\tupdateColor = true\n\t} else if px == 0 {\n\t\txAdd = true\n\t\tupdateColor = true\n\t}\n\n\t/* Update color on hit and when all a flag is used */\n\tif updateColor && allColors {\n\t\tupdateTextColor(p)\n\t}\n\n\tif yAdd {\n\t\tpy++\n\t} else {\n\t\tpy--\n\t}\n\n\tif xAdd {\n\t\tpx++\n\t} else {\n\t\tpx--\n\t}\n\n\t(*p).SetRect(px, py, termWidth, termHeight)\n}", "func NewText(fontData draw2d.FontData, fontSize float64, text string) *Text {\n\ttextFigure := &Text{NewFigure(), fontData, fontSize, text}\n\ttextFigure.SetSubClass(textFigure)\n\n\treturn textFigure\n}", "func renderAffirmation(config Config, cr *cairo.Context, x, y int, pangoMarkup string, fontDescription *pango.FontDescription, black, outline bool) {\n\n\t// If outline, draw it first.\n\tif outline {\n\n\t\t// Create a pango layout.\n\t\tlayout := pango.CairoCreateLayout(cr)\n\n\t\t// Position at the beginning of the text.\n\t\tcr.MoveTo(float64(x), float64(y))\n\n\t\t// Black or white? Do opposite of text.\n\t\tvar outlineScale float64\n\t\tif black {\n\t\t\tcr.SetSourceRGB(255, 255, 255)\n\t\t\toutlineScale = config.BlackOutlineScale\n\t\t} else {\n\t\t\tcr.SetSourceRGB(0, 0, 0)\n\t\t\toutlineScale = config.WhiteOutlineScale\n\t\t}\n\n\t\t// Set the font description.\n\t\tlayout.SetFontDescription(fontDescription)\n\n\t\t// Set the markup in the mask.\n\t\tlayout.SetMarkup(pangoMarkup, -1)\n\n\t\t// Half of this stroke will be the outline.\n\t\tstrokeWidth := (float64(fontDescription.GetSize()) / pango.PANGO_SCALE) * outlineScale\n\t\tcr.SetLineWidth(strokeWidth)\n\n\t\t// Create the mask and outline the text.\n\t\tpango.CairoLayoutPath(cr, layout)\n\t\tcr.Stroke()\n\t}\n\n\t// Create a pango layout.\n\tlayout := pango.CairoCreateLayout(cr)\n\n\t// Position at the beginning of the text.\n\tcr.MoveTo(float64(x), float64(y))\n\n\t// Black or white?\n\tif black {\n\t\tcr.SetSourceRGB(0, 0, 0)\n\t} else {\n\t\tcr.SetSourceRGB(255, 255, 255)\n\t}\n\n\t// Set the font description.\n\tlayout.SetFontDescription(fontDescription)\n\n\t// Set the markup in the mask.\n\tlayout.SetMarkup(pangoMarkup, -1)\n\n\t// Create the mask and fill the text.\n\tpango.CairoShowLayout(cr, layout)\n\tcr.Fill()\n}" ]
[ "0.6409015", "0.63927233", "0.58695", "0.58139056", "0.5674917", "0.56051385", "0.5559758", "0.5534353", "0.5517358", "0.5478104", "0.5449108", "0.5446319", "0.53765625", "0.5314368", "0.5303025", "0.5302334", "0.52745944", "0.52546656", "0.5204993", "0.5194887", "0.5191028", "0.51771194", "0.5175349", "0.514112", "0.5137554", "0.5117147", "0.5113724", "0.511218", "0.5094811", "0.50921494", "0.5087734", "0.5075413", "0.50692886", "0.504531", "0.5043019", "0.5040718", "0.5036407", "0.49903062", "0.49739856", "0.49674395", "0.49263456", "0.49186385", "0.48863897", "0.48863304", "0.48808247", "0.48794577", "0.48786435", "0.48650044", "0.48644385", "0.4844562", "0.484081", "0.48359078", "0.4829485", "0.48276898", "0.48207772", "0.48177686", "0.48091185", "0.48014674", "0.4788741", "0.47868398", "0.47746837", "0.47684753", "0.4764454", "0.47440243", "0.47385898", "0.4736952", "0.47206184", "0.47035253", "0.46981746", "0.46962774", "0.46825233", "0.46795306", "0.46785337", "0.46760562", "0.4668668", "0.46669382", "0.46572235", "0.46519172", "0.46487007", "0.46459416", "0.4637275", "0.46368495", "0.46238545", "0.46148267", "0.4612419", "0.46062294", "0.46045998", "0.4602257", "0.46000937", "0.45982522", "0.45940167", "0.45931688", "0.45895013", "0.45890915", "0.45842946", "0.45766228", "0.45738408", "0.4570371", "0.45607734", "0.4557107" ]
0.55526286
7
NewRepository returns Repository with given middleware.Pool
func NewRepository(db middleware.Pool) *Repository { return &Repository{Database: db} }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewRepository(pool *pgxpool.Pool) *Repository {\n\treturn &Repository{pool: pool}\n}", "func New(pool *pgxpool.Pool) (repository.Repository, error) {\n\tif pool == nil {\n\t\treturn nil, ErrMissingPool\n\t}\n\n\treturn postgresRepository{\n\t\tpool: pool,\n\t}, nil\n}", "func NewRepository(pool *pgxpool.Pool) *Repository {\n\treturn &Repository{\n\t\tpool: pool,\n\t}\n}", "func New(databaseConfig *Config, logger pgx.Logger) (*Repository, error) {\n\tpostgresDataSource := fmt.Sprintf(\"postgres://%s:%s@%s/%s?sslmode=%s\",\n\t\tdatabaseConfig.Username,\n\t\tdatabaseConfig.Password,\n\t\tdatabaseConfig.Hostname,\n\t\tdatabaseConfig.Name,\n\t\tdatabaseConfig.SSLMode)\n\tpoolConfig, err := pgxpool.ParseConfig(postgresDataSource)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tpoolConfig.ConnConfig.Logger = logger\n\tlogLevelMapping := map[string]pgx.LogLevel{\n\t\t\"trace\": pgx.LogLevelTrace,\n\t\t\"debug\": pgx.LogLevelDebug,\n\t\t\"info\": pgx.LogLevelInfo,\n\t\t\"warn\": pgx.LogLevelWarn,\n\t\t\"error\": pgx.LogLevelError,\n\t}\n\tpoolConfig.ConnConfig.LogLevel = logLevelMapping[databaseConfig.LogLevel]\n\tpoolConfig.MaxConns = databaseConfig.MaxConnections\n\tpoolConfig.MinConns = databaseConfig.MinConnections\n\n\tpool, err := pgxpool.ConnectConfig(context.Background(), poolConfig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Repository{pool: pool}, nil\n}", "func newRepository() Repository {\n\tif cfg == nil {\n\t\tpanic(fmt.Errorf(\"missing configuration\"))\n\t}\n\tif log == nil {\n\t\tpanic(fmt.Errorf(\"missing logger\"))\n\t}\n\n\tp2p.SetConfig(cfg)\n\tp2p.SetLogger(log)\n\n\t// create connections\n\tcaBridge, dbBridge, rpcBridge, geoBridge, err := connect(cfg, log)\n\tif err != nil {\n\t\tlog.Fatal(\"repository init failed\")\n\t\treturn nil\n\t}\n\n\t// construct the proxy instance\n\tp := proxy{\n\t\tcache: caBridge,\n\t\tdb: dbBridge,\n\t\trpc: rpcBridge,\n\t\tgeoip: geoBridge,\n\t\tlog: log,\n\t\tcfg: cfg,\n\n\t\t// get the map of governance contracts\n\t\tgovContracts: governanceContractsMap(cfg.Governance),\n\n\t\t// keep reference to the SOL compiler\n\t\tsolCompiler: cfg.Compiler.DefaultSolCompilerPath,\n\t}\n\n\t// return the proxy\n\treturn &p\n}", "func NewRepository(db *pgx.ConnPool) HistoryRepository {\n\treturn Repository{\n\t\tdb: db,\n\t}\n}", "func newRepository(\n\tid borges.RepositoryID,\n\tsto storage.Storer,\n\tfs billy.Filesystem,\n\tm borges.Mode,\n\ttransactional bool,\n\tl *Location,\n) (*Repository, error) {\n\trepo, err := git.Open(sto, nil)\n\tif err != nil {\n\t\tif err == git.ErrRepositoryNotExists {\n\t\t\trepo, err = git.Init(sto, nil)\n\t\t}\n\n\t\tif err != nil {\n\t\t\treturn nil, borges.ErrLocationNotExists.Wrap(err, id)\n\t\t}\n\t}\n\n\treturn &Repository{\n\t\tid: id,\n\t\trepo: repo,\n\t\ts: sto,\n\t\tfs: fs,\n\t\tmode: m,\n\t\ttransactional: transactional,\n\t\tlocation: l,\n\t\tcreateVersion: -1,\n\t}, nil\n}", "func NewRepository(repo *Repository, db *sql.DB) (*Repository, error) {\n\tfmt.Println(\"START NewRepository\")\n\tdefer fmt.Println(\"END START NewRepository\")\n\n\trepo.db = db\n\n\terr := db.Ping()\n\tif err != nil {\n\t\tfmt.Println(\"db err: \", err)\n\t\treturn nil, err\n\t}\n\n\tdb.SetMaxIdleConns(500)\n\tdb.SetMaxOpenConns(500)\n\n\treturn repo, nil\n}", "func NewPool(fn ResolverFactory) *Pool {\n\treturn &Pool{\n\t\tfactory: fn,\n\t}\n}", "func New(db *bolt.DB) Repository {\n\treturn Repository{\n\t\tdb: db,\n\t}\n}", "func New() backing.Repo {\n\treturn &Repo{}\n}", "func NewRepository(db *gorm.DB) ProductDomain.Repository {\n\treturn &handler{\n\t\tdb: db,\n\t}\n}", "func NewRepository(conn *pgx.Conn) *RepositoryImpl {\n\treturn &RepositoryImpl{conn: conn}\n}", "func New(conn *pgx.Conn) *RepositoryImpl {\n\treturn &RepositoryImpl{conn: conn}\n}", "func New() *Repository {\n\tr := &Repository{}\n\tr.buildChain()\n\treturn r\n}", "func New(conn *sqlx.DB) *Repository {\n\treturn &Repository{\n\t\tconn: conn,\n\t}\n}", "func New(sqlConn string) (*Repository, error) {\n\tdb, err := connectDatabase(sqlConn)\n\tif err != nil {\n\t\tlogger.Error(\"error connecting to db\", zap.Error(err))\n\t\treturn nil, err\n\t}\n\treturn &Repository{\n\t\tdb: db,\n\t}, nil\n}", "func New() *Repository {\n\treturn &Repository{\n\t\tagents: map[string]*adagio.Agent{},\n\t\truns: map[string]runState{},\n\t\tclaims: map[string]struct {\n\t\t\trun *adagio.Run\n\t\t\tnode *adagio.Node\n\t\t}{},\n\t\tlisteners: listenerSet{},\n\t}\n}", "func NewPool() Pool {\n\treturn Pool{\n\t\tBalanceRune: cosmos.ZeroUint(),\n\t\tBalanceAsset: cosmos.ZeroUint(),\n\t\tPoolUnits: cosmos.ZeroUint(),\n\t\tStatus: Enabled,\n\t}\n}", "func NewRepository(repoName string) *Repository {\n\n\tclientIndex := model.ByEquality(\"ClientId\")\n\tclientIndex.Unique = true\n\t//\tuserIndex := model.ByEquality(\"UserId\")\n\t//\tuserIndex.Unique = true\n\n\treturn &Repository{\n\t\tName: repoName,\n\t\tmesssages: model.NewTable(store.DefaultStore, repoName, model.Indexes(clientIndex), nil),\n\t}\n}", "func getNewPool(cfg *config.Pool) *pool {\n\tvar p pool\n\n\tp.lockDuration = cfg.LockDuration\n\n\tp.locks = make(map[*config.Resource]*ResourceLock)\n\tfor _, resource := range cfg.Resources {\n\t\tp.locks[resource] = nil\n\t}\n\n\tkeys, _ := storage.GetKeys(storageKey)\n\tfor _, key := range keys {\n\t\tvar lock ResourceLock\n\t\tif err := storage.Read(storageKey, key, &lock); err != nil {\n\t\t\tlog.Errorf(\"[Pool] unable to restore lock for '%s': %s\", key, err)\n\t\t\tcontinue\n\t\t}\n\n\t\tfor k := range p.locks {\n\t\t\tif k.Name == key {\n\t\t\t\tlock.Resource = *k\n\t\t\t\tp.locks[k] = &lock\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\treturn &p\n}", "func NewRepository(db *sql.DB) *repository {\n\treturn &repository{db: db}\n}", "func NewRepo(a *config.Appconfig) *Repository {\n\treturn &Repository{\n\t\tApp: a,\n\t}\n}", "func NewRepository(collection Collection) BasicRepository {\n\treturn &basicRepositoryUsecase{\n\t\tcollectionName: collection,\n\t}\n}", "func NewRepository(conf *Configuration) (storage.Repository, error) {\n\t// Set client options\n\tclientOptions := options.Client().\n\t\tApplyURI(conf.ConnectionString)\n\n\tif conf.Username != \"\" {\n\t\tclientOptions = clientOptions.\n\t\t\tSetAuth(options.Credential{\n\t\t\t\tUsername: conf.Username,\n\t\t\t\tPassword: conf.Password,\n\t\t\t})\n\t}\n\n\t// Connect to MongoDB\n\tclient, err := mongo.Connect(context.TODO(), clientOptions)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Check the connection\n\tif err = client.Ping(context.TODO(), nil); err != nil {\n\t\treturn nil, err\n\t}\n\n\tcollection := client.\n\t\tDatabase(conf.Database).\n\t\tCollection(conf.Collection)\n\n\t// build database\n\tstconf := *conf\n\tstconf.Password = \"\"\n\n\tdb := mongoStore{\n\t\tClient: client,\n\t\tCollection: collection,\n\t\tConfiguration: stconf,\n\t}\n\treturn &db, nil\n}", "func NewRepository(db *sql.DB) payment.Repository {\n\treturn &repository{gq: goqu.New(\"postgres\", db)}\n}", "func NewRepository(db *dbcontext.DB, logger log.Logger) Repository {\n\treturn repository{db, logger}\n}", "func New(db *gorm.DB) (Repository, error) {\n\treturn &repo{\n\t\tDB: db,\n\t}, nil\n}", "func New(db *gorm.DB) *Repository {\n\treturn &Repository{\n\t\tdb: db,\n\t}\n}", "func New(db *gorm.DB) *Repository {\n\treturn &Repository{\n\t\tdb: db,\n\t}\n}", "func New(db *gorm.DB) *Repository {\n\treturn &Repository{\n\t\tdb: db,\n\t}\n}", "func NewRepo(a *config.AppConfig) *Repository {\n\treturn &Repository{ //returns the referenc to a Repository\n\t\tApp: a, // populate in \"App\" from type \"Repository\n\t}\n}", "func NewRepository(region, tableName string) Repository {\n\treturn nil\n}", "func NewRepository(namespace string, k8sClient client.Client, connDecryptToken string) conn_repository.Repository {\n\treturn &k8sConnectionRepository{\n\t\tnamespace: namespace,\n\t\tk8sClient: k8sClient,\n\t\tconnDecryptToken: connDecryptToken,\n\t}\n}", "func newRepo(r *github.Repository) Repo {\n\tvar lang string\n\tif r.Language != nil {\n\t\tlang = *r.Language\n\t} else {\n\t\tlang = \"-\"\n\t}\n\treturn Repo{*r.HTMLURL, lang, *r.StargazersCount, *r.ForksCount}\n}", "func NewPool(delegate *redis.Client) redsyncredis.Pool {\n\treturn &pool{delegate}\n}", "func New(prototype Aggregate, opts ...Option) *Repository {\n\tt := reflect.TypeOf(prototype)\n\tif t.Kind() == reflect.Ptr {\n\t\tt = t.Elem()\n\t}\n\n\tr := &Repository{\n\t\tprototype: t,\n\t\tstore: newMemoryStore(),\n\t\tserializer: NewJSONSerializer(),\n\t}\n\n\tfor _, opt := range opts {\n\t\topt(r)\n\t}\n\n\treturn r\n}", "func New(cfg Config, logger kitlog.Logger) *Repository {\n\treturn &Repository{\n\t\tcfg: cfg,\n\t\tlogger: logger,\n\t}\n}", "func NewRepository(dbConn *sqlx.DB) Repository {\n\treturn &repository{\n\t\tdb: dbConn,\n\t}\n}", "func NewRepository(db *sqlx.DB, log log.Logger) Repository {\n\treturn repository{db: db, logger: log}\n}", "func New() *Repo {\n\treturn &Repo{}\n}", "func NewRepository() Repository {\n\tclient := redis.NewClient(&redis.Options{\n\t\tAddr: \"127.0.0.1:6379\",\n\t\tPassword: \"\",\n\t\tDB: 0,\n\t})\n\n\treturn Repository{Client: client}\n}", "func newPool(s *site) (*pool, query.Error) {\n\tp := new(pool)\n\tp.site = s\n\tp.id = POOL_ID\n\tp.name = POOL_NAME\n\tp.buckets = make(map[string]catalog.Bucket)\n\n\te := p.loadBuckets()\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\treturn p, nil\n}", "func NewRepo(uri string) (Repository, error) {\n\tclient, err := mongo.NewClient(options.Client().ApplyURI(uri))\n\tlog.Println(\"db client created\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), 20 * time.Second)\n\tdefer cancel()\n\terr = client.Connect(ctx)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlog.Println(\"db client connected\")\n\n\terr = client.Ping(ctx, readpref.Primary())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlog.Println(\"db client ping\")\n\n\tdbName := os.Getenv(\"DATABASE_NAME\")\n\tdb := client.Database(dbName)\n\tcol := db.Collection(os.Getenv(\"TRAVEL_COLLECTION\"))\n\treturn &DBRepository{\n\t\tclient: \tclient,\n\t\tdatabase: db,\n\t\tCollection: col,\n\t}, nil\n}", "func NewRepository(db *sql.DB) Repository {\n\treturn &repoSvc{\n\t\tQueries: New(db),\n\t\tdb: db,\n\t}\n}", "func NewRepository(name, endpoint string, client *http.Client) (*Repository, error) {\n\tname = strings.TrimSpace(name)\n\n\tu, err := utils.ParseEndpoint(endpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trepository := &Repository{\n\t\tName: name,\n\t\tEndpoint: u,\n\t\tclient: client,\n\t}\n\n\treturn repository, nil\n}", "func NewPool() Pool {\n\treturn &pool{\n\t\tclients: make(map[string]client),\n\t}\n}", "func NewRepository(MongoClient *mongo.Client) Repository {\n\treturn &repository{\n\t\tdatabaseAccess{\n\t\t\tdb: MongoClient,\n\t\t},\n\t}\n}", "func NewRepository(db *db.Database) *Repository {\n\treturn &Repository{db}\n}", "func NewRepository(funcs template.FuncMap) *Repository {\n\trepo := Repository{\n\t\tfiles: make(map[string]string),\n\t\ttemplates: make(map[string]*template.Template),\n\t\tfuncs: funcs,\n\t}\n\n\tif repo.funcs == nil {\n\t\trepo.funcs = make(template.FuncMap)\n\t}\n\n\treturn &repo\n}", "func NewRepository(db mysql.BaseRepository, logger log.Logger) Repository {\n\treturn repository{db, logger}\n}", "func New(db *db.DB) core.RepositoryStore {\n\treturn &repoStore{db}\n}", "func NewRepository(db *sqlx.DB) *Repository {\n\treturn &Repository{\n\t\tDbConn: db,\n\t}\n}", "func NewRepository(db *sqlx.DB) *Repository {\n\treturn &Repository{\n\t\tDbConn: db,\n\t}\n}", "func NewRepository(awsSession *session.Session, stage string) *Repository {\n\treturn &Repository{\n\t\tstage: stage,\n\t\tdynamoDBClient: dynamodb.New(awsSession),\n\t\trepositoryTableName: fmt.Sprintf(\"cla-%s-repositories\", stage),\n\t\tgitLabOrgTableName: fmt.Sprintf(\"cla-%s-gitlab-orgs\", stage),\n\t}\n}", "func NewRepository(db database.Database) Repository {\n\treturn Repository{db}\n}", "func NewPool() *Pool {\n\treturn &Pool{\n\t\tresourceQueue: make(chan Resource),\n\t\tresourceCount: 0,\n\t}\n}", "func NewPool(size int) Pool {\n\treturn Pool {\n\t\tSize: size,\n\t}\n}", "func NewRepository(db *gorm.DB) *Repository {\n\treturn &Repository{db}\n}", "func NewRepository(db *gorm.DB) *Repository {\n\treturn &Repository{db}\n}", "func NewRepository(ctx *pulumi.Context,\n\tname string, args *RepositoryArgs, opts ...pulumi.ResourceOption) (*Repository, error) {\n\tif args == nil {\n\t\targs = &RepositoryArgs{}\n\t}\n\n\topts = internal.PkgResourceDefaultOpts(opts)\n\tvar resource Repository\n\terr := ctx.RegisterResource(\"aws-native:ecr:Repository\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (r RedisWriter) NewPool() *redis.Pool {\n\tconfig := r\n\treturn &redis.Pool{\n\t\tMaxIdle: config.RedisPoolMaxIdle,\n\t\tMaxActive: config.RedisPoolMaxActive,\n\t\tIdleTimeout: time.Duration(config.RedisPoolIdleTimeout) * time.Second,\n\t\tDial: func() (redis.Conn, error) {\n\t\t\t//return redis.Dial(\"tcp\", config.RedisAddress) },\n\t\t\tc, err := redis.Dial(\"tcp\", config.RedisAddress)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tc.Do(\"AUTH\", config.RedisPassword)\n\t\t\treturn c, err\n\t\t},\n\t}\n}", "func New(connection *gorm.DB) Interface {\n\treturn &Repository{connection: connection}\n}", "func NewPool(conn *Connector) *Pool {\n\treturn &Pool{\n\t\tconn: conn,\n\t}\n}", "func NewRepo() (r *Repo, err error) {\n\n\tr = &Repo{\n\t\twrappers: make(map[string]*Wrap),\n\t\twaiting: make(map[string]chan struct{}),\n\t}\n\n\treturn r, nil\n}", "func MakeRepository(db *sql.DB) journal.Repository {\n\tr := &repository{db}\n\n\treturn r\n}", "func NewRepository(db *database.Database) *Repository {\n\treturn &Repository{db}\n}", "func New() *pool {\n\treturn &pool{\n\t\tmetrics: newMetrics(),\n\t}\n}", "func New(c Config) (*repo, error) {\n\tp, err := getAbsStoragePath(c.StoragePath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err := mkDirIfNotExists(p); err != nil {\n\t\treturn nil, err\n\t}\n\tcwd, err := os.Getwd()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tb := repo{\n\t\tpath: p,\n\t\tcwd: cwd,\n\t\tbaseURL: c.BaseURL,\n\t\tlogFn: defaultLogFn,\n\t\terrFn: defaultLogFn,\n\t\tcache: cache.New(c.EnableCache),\n\t}\n\treturn &b, nil\n}", "func NewRepository() (Repository, error) {\n\n\t// Mysql connection configuration\n\tconfig := &mysql.Config{\n\t\tUser: _MysqlUser,\n\t\tPasswd: _MysqlPassword,\n\t\tAddr: fmt.Sprintf(\"%s:%s\", _MysqlHost, _MysqlPort),\n\t\tDBName: _MysqlDB,\n\t\tLoc: time.UTC,\n\t\tParseTime: true,\n\t\tAllowNativePasswords: true,\n\t\tNet: \"tcp\",\n\t}\n\n\t// Do connecting mysql server\n\tconn, err := sql.Open(\"mysql\", config.FormatDSN())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Check if mysql is available\n\tif err := conn.Ping(); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// return mysql object with connection & error value\n\treturn &repository{conn}, nil\n}", "func NewJobRepository(ctxIn context.Context, credentialsProvider secret.SecretProvider) (JobRepository, error) {\n ctx, span := trace.StartSpan(ctxIn, \"NewJobRepository\")\n defer span.End()\n\n storageService, err := service.NewStorageService(ctx, credentialsProvider)\n if err != nil {\n return nil, err\n }\n return &defaultJobRepository{storageService: storageService}, nil\n}", "func NewRepo(ac *config.App) *Repository {\n\treturn &Repository{\n\t\tApp: ac,\n\t}\n}", "func NewRepository(conv APIDefinitionConverter) *pgRepository {\n\treturn &pgRepository{\n\t\tsingleGetter: repo.NewSingleGetter(apiDefTable, apiDefColumns),\n\t\tsingleGetterGlobal: repo.NewSingleGetterGlobal(resource.API, apiDefTable, apiDefColumns),\n\t\tpageableQuerier: repo.NewPageableQuerier(apiDefTable, apiDefColumns),\n\t\tbundleRefQueryBuilder: repo.NewQueryBuilderGlobal(resource.BundleReference, bundlereferences.BundleReferenceTable, []string{bundlereferences.APIDefIDColumn}),\n\t\tlister: repo.NewLister(apiDefTable, apiDefColumns),\n\t\tlisterGlobal: repo.NewListerGlobal(resource.API, apiDefTable, apiDefColumns),\n\t\tcreator: repo.NewCreator(apiDefTable, apiDefColumns),\n\t\tcreatorGlobal: repo.NewCreatorGlobal(resource.API, apiDefTable, apiDefColumns),\n\t\tupdater: repo.NewUpdater(apiDefTable, updatableColumns, idColumns),\n\t\tupdaterGlobal: repo.NewUpdaterGlobal(resource.API, apiDefTable, updatableColumns, idColumns),\n\t\tdeleter: repo.NewDeleter(apiDefTable),\n\t\tdeleterGlobal: repo.NewDeleterGlobal(resource.API, apiDefTable),\n\t\texistQuerier: repo.NewExistQuerier(apiDefTable),\n\t\tconv: conv,\n\t}\n}", "func NewRepository(db interfaces.IDatabase) interfaces.IRepository {\n\treturn &Repository{\n\t\tdb: db,\n\t\tcollectionName: \"models\",\n\t}\n}", "func NewRepo(appConfig *config.AppConfig) *Repository {\n\treturn &Repository{\n\t\tAppConfig: appConfig,\n\t}\n}", "func NewRepository() dish.Repository {\n\tdb, err := sql.Open(\"sqlite3\", \"./internal/database/test.sqlite\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn &repository{\n\t\tdb: db,\n\t}\n}", "func NewRepository(storage Storage) Repository {\n\treturn &RepositoryImpl{\n\t\tstorage: storage,\n\t}\n}", "func NewRepository() *Repository {\n\treturn &Repository{\n\t\trepo: make(map[Name]worker.ArtifactSource),\n\t}\n}", "func NewPool(rootDir string, globalCacheDir string) *Pool {\n\tresult := &Pool{\n\t\thosts: make(map[string]*pluginsForHost),\n\t\trootDir: rootDir,\n\t\tglobalCacheDir: globalCacheDir,\n\t}\n\tgo result.supervisor()\n\treturn result\n}", "func New(c *config.Config) (*Repository, error) {\n\tDBURL := fmt.Sprintf(\"host=%s port=%d user=%s dbname=%s sslmode=disable password=%s\",\n\t\tc.Database.DBHost,\n\t\tc.Database.DBPort,\n\t\tc.Database.DBUser,\n\t\tc.Database.DBName,\n\t\tc.Database.DBPassword)\n\n\tdb, err := gorm.Open(dbDriver, DBURL)\n\tif err != nil {\n\t\tfmt.Printf(\"Unable to connect to database, %v\", err)\n\t\tdefer db.Close()\n\t\treturn nil, err\n\t}\n\n\tdb.LogMode(true)\n\n\tuserRepository := UserRepository{db}\n\n\tr := &Repository{db: db, UserRepository: &userRepository}\n\treturn r, nil\n}", "func NewRepo(a *config.AppConfig) *Repository {\n\treturn &Repository{App: a}\n}", "func New(cfg *config.Config, log logger.Logger) (Repository, error) {\n\t// create new in-memory cache bridge\n\tcaBridge, err := cache.New(cfg, log)\n\tif err != nil {\n\t\tlog.Criticalf(\"can not create in-memory cache bridge, %s\", err.Error())\n\t\treturn nil, err\n\t}\n\n\t// create new database connection bridge\n\tdbBridge, err := db.New(cfg, log)\n\tif err != nil {\n\t\tlog.Criticalf(\"can not connect backend persistent storage, %s\", err.Error())\n\t\treturn nil, err\n\t}\n\n\t// create new Lachesis RPC bridge\n\trpcBridge, err := rpc.New(cfg, log)\n\tif err != nil {\n\t\tlog.Criticalf(\"can not connect Lachesis RPC interface, %s\", err.Error())\n\t\treturn nil, err\n\t}\n\n\t// try to validate the solidity compiler by asking for it's version\n\tif _, err := compiler.SolidityVersion(cfg.SolCompilerPath); err != nil {\n\t\tlog.Criticalf(\"can not invoke the Solidity compiler, %s\", err.Error())\n\t\treturn nil, err\n\t}\n\n\t// construct the proxy instance\n\tp := proxy{\n\t\tcache: caBridge,\n\t\tdb: dbBridge,\n\t\trpc: rpcBridge,\n\t\tlog: log,\n\n\t\t// keep reference to the SOL compiler\n\t\tsolCompiler: cfg.SolCompilerPath,\n\n\t\t// keep the ballot sources ref\n\t\tballotSources: cfg.VotingSources,\n\t}\n\n\t// inform about voting sources\n\tlog.Infof(\"voting ballots accepted from %s\", cfg.VotingSources)\n\n\t// propagate callbacks\n\tdbBridge.SetBalance(p.AccountBalance)\n\n\t// make the service orchestrator\n\tp.orc = newOrchestrator(&p, log)\n\n\t// return the proxy\n\treturn &p, nil\n}", "func NewPool(t mockConstructorTestingTNewPool) *Pool {\n\tmock := &Pool{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func NewRepository(db *gorm.DB) Repository {\n\treturn &repository{\n\t\tpagination.Service{},\n\t\tdb,\n\t}\n}", "func NewRepo(a *config.AppConfig) *Repository {\n\treturn &Repository{\n\t\tApp: a,\n\t}\n}", "func NewRepo(a *config.AppConfig) *Repository {\n\treturn &Repository{\n\t\tApp: a,\n\t}\n}", "func NewRepo(a *config.AppConfig) *Repository {\n\treturn &Repository{\n\t\tApp: a,\n\t}\n}", "func NewRepo(a *config.AppConfig) *Repository {\n\treturn &Repository{\n\t\tApp: a,\n\t}\n}", "func NewPool(config etc.RedisPool) (pool *redis.Pool, err error) {\n\tconfigURL, err := url.Parse(config.URL)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"invalid redis URL: %s\", err)\n\t\treturn\n\t}\n\n\tswitch configURL.Scheme {\n\tcase \"redis\":\n\t\tpool = newInstancePool(config)\n\tdefault:\n\t\terr = fmt.Errorf(\"invalid redis URL scheme: %s\", configURL.Scheme)\n\t}\n\treturn\n}", "func NewRepository(dataBaseConnection *sql.DB) Repository {\n\treturn &repository{\n\t\tdb: dataBaseConnection,\n\t}\n}", "func NewPool(cfg *config) (*Pool, error) {\n\tp := new(Pool)\n\tp.cfg = cfg\n\n\terr := p.initDB()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tp.limiter = network.NewRateLimiter()\n\tdcrdRPCCfg := &rpcclient.ConnConfig{\n\t\tHost: cfg.DcrdRPCHost,\n\t\tEndpoint: \"ws\",\n\t\tUser: cfg.RPCUser,\n\t\tPass: cfg.RPCPass,\n\t\tCertificates: cfg.dcrdRPCCerts,\n\t}\n\n\tminPmt, err := dcrutil.NewAmount(cfg.MinPayment)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tmaxTxFeeReserve, err := dcrutil.NewAmount(cfg.MaxTxFeeReserve)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tp.ctx, p.cancel = context.WithCancel(context.Background())\n\thcfg := &network.HubConfig{\n\t\tActiveNet: cfg.net,\n\t\tWalletRPCCertFile: cfg.WalletRPCCert,\n\t\tWalletGRPCHost: cfg.WalletGRPCHost,\n\t\tDcrdRPCCfg: dcrdRPCCfg,\n\t\tPoolFee: cfg.PoolFee,\n\t\tMaxTxFeeReserve: maxTxFeeReserve,\n\t\tMaxGenTime: new(big.Int).SetUint64(cfg.MaxGenTime),\n\t\tPaymentMethod: cfg.PaymentMethod,\n\t\tLastNPeriod: cfg.LastNPeriod,\n\t\tWalletPass: cfg.WalletPass,\n\t\tMinPayment: minPmt,\n\t\tPoolFeeAddrs: cfg.poolFeeAddrs,\n\t\tSoloPool: cfg.SoloPool,\n\t}\n\n\tp.hub, err = network.NewHub(p.ctx, p.cancel, p.db, p.httpc, hcfg, p.limiter)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar blockExplorerURL string\n\tswitch cfg.ActiveNet {\n\tcase chaincfg.TestNet3Params.Name:\n\t\tblockExplorerURL = \"https://testnet.dcrdata.org\"\n\tdefault:\n\t\tblockExplorerURL = \"https://explorer.dcrdata.org\"\n\t}\n\n\tgcfg := &gui.Config{\n\t\tCtx: p.ctx,\n\t\tSoloPool: cfg.SoloPool,\n\t\tGUIDir: cfg.GUIDir,\n\t\tBackupPass: cfg.BackupPass,\n\t\tGUIPort: cfg.GUIPort,\n\t\tTLSCertFile: defaultTLSCertFile,\n\t\tTLSKeyFile: defaultTLSKeyFile,\n\t\tActiveNet: cfg.net,\n\t\tPaymentMethod: cfg.PaymentMethod,\n\t\tBlockExplorerURL: blockExplorerURL,\n\t}\n\n\tp.gui, err = gui.NewGUI(gcfg, p.hub, p.db)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn p, nil\n}", "func NewRepository(data []byte) Repository {\n\treturn Repository{\n\t\tdata: data,\n\t}\n}", "func NewRepo(a *config.AppConfig, db *driver.DB) *Repository {\n\treturn &Repository{\n\t\tApp: a,\n\t\tDB: dbrepo.NewPostgresRepo(db.SQL, a),\n\t}\n}", "func New(db *sql.DB) *Repository {\n\treturn &Repository{\n\t\tdb: sqlx.NewDb(db, \"mysql\"),\n\t}\n}", "func newResourcePool(config resourcePoolConfig) (*resourcePool, error) {\n\terr := (&config).setup()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\trp := &resourcePool{\n\t\tminSize: config.MinSize,\n\t\tmaxSize: config.MaxSize,\n\t\texpiredFn: config.ExpiredFn,\n\t\tcloseFn: config.CloseFn,\n\t\tinitFn: config.InitFn,\n\t\tmaintainInterval: config.MaintainInterval,\n\t}\n\n\treturn rp, nil\n}", "func NewRepo(a *config2.AppConfig) *Repository {\n\treturn &Repository{\n\t\tApp: a,\n\t}\n}", "func NewRepo(a *config.AppConfig, db *driver.DB) *Repository {\n\treturn &Repository{\n\t\tApp: a,\n\t\tDB: dbrepo.NewPostgresRepo(db.SQL, a),\n\t}\n}", "func NewRepository() Repository {\n\treturn &GormRepository{}\n}", "func newPool(opts EngineOpts) *connPool {\n\tpool := tunny.NewFunc(opts.PoolSize, func(payload interface{}) interface{} {\n\t\tfn, ok := payload.(executable)\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"payload is not an executable: %T\", payload)\n\t\t}\n\t\treturn fn()\n\t})\n\treturn &connPool{\n\t\tp: pool,\n\t\tto: opts.TransactionTimeout,\n\t}\n}", "func NewRepository(config *RepositoryConfig) (Repository, error) {\n\tsettings := postgresql.ConnectionURL{\n\t\tUser: config.User,\n\t\tPassword: config.Password,\n\t\tHost: config.Host,\n\t\tDatabase: config.Database,\n\t\tOptions: map[string]string{\"sslmode\": config.SSLMode},\n\t}\n\n\tdb, err := postgresql.Open(settings)\n\tif err != nil {\n\t\treturn Repository{}, err\n\t}\n\n\tdb.SetPreparedStatementCache(config.PreparedStatementCacheEnabled)\n\tdb.SetMaxIdleConns(config.MaxIdleConns)\n\tdb.SetMaxOpenConns(config.MaxOpenConns)\n\n\treturn Repository{db}, nil\n}" ]
[ "0.7033007", "0.70324636", "0.68525314", "0.67147243", "0.65552145", "0.65296394", "0.6516489", "0.64633226", "0.6458645", "0.6428717", "0.6424986", "0.64021033", "0.6314789", "0.62775224", "0.6276378", "0.62416613", "0.6236914", "0.621444", "0.61860937", "0.6185766", "0.6185194", "0.6177726", "0.61723626", "0.61685574", "0.6155021", "0.61531234", "0.61443263", "0.6143662", "0.61287546", "0.61287546", "0.61287546", "0.6120398", "0.6120008", "0.6116608", "0.611141", "0.6100531", "0.6099751", "0.6097242", "0.60846514", "0.6076042", "0.6066851", "0.6059344", "0.6053035", "0.60494703", "0.6048846", "0.6046728", "0.60456777", "0.6043235", "0.60253406", "0.6012124", "0.60072374", "0.60048616", "0.6002951", "0.6002951", "0.60027", "0.599297", "0.5986745", "0.59860003", "0.59845424", "0.59845424", "0.5975751", "0.59707314", "0.59680027", "0.59641886", "0.5962434", "0.5942204", "0.59408695", "0.5938838", "0.5938039", "0.5935358", "0.59352005", "0.5931444", "0.5930982", "0.59293246", "0.5927361", "0.59253377", "0.59126425", "0.590892", "0.5904077", "0.5898507", "0.5898286", "0.58950984", "0.5875687", "0.5869919", "0.5867181", "0.5867181", "0.5867181", "0.5867181", "0.58641565", "0.5860008", "0.58599776", "0.5859343", "0.58549505", "0.58546376", "0.5853594", "0.58526254", "0.58493763", "0.5848169", "0.5844212", "0.5841985" ]
0.76721686
0
NewRepository returns Repository with global mysql pool
func NewRepositoryWithGlobal() *Repository { return NewRepository(global.DASMySQLPool) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewRepository(repo *Repository, db *sql.DB) (*Repository, error) {\n\tfmt.Println(\"START NewRepository\")\n\tdefer fmt.Println(\"END START NewRepository\")\n\n\trepo.db = db\n\n\terr := db.Ping()\n\tif err != nil {\n\t\tfmt.Println(\"db err: \", err)\n\t\treturn nil, err\n\t}\n\n\tdb.SetMaxIdleConns(500)\n\tdb.SetMaxOpenConns(500)\n\n\treturn repo, nil\n}", "func NewRepository(db middleware.Pool) *Repository {\n\treturn &Repository{Database: db}\n}", "func newRepository() Repository {\n\tif cfg == nil {\n\t\tpanic(fmt.Errorf(\"missing configuration\"))\n\t}\n\tif log == nil {\n\t\tpanic(fmt.Errorf(\"missing logger\"))\n\t}\n\n\tp2p.SetConfig(cfg)\n\tp2p.SetLogger(log)\n\n\t// create connections\n\tcaBridge, dbBridge, rpcBridge, geoBridge, err := connect(cfg, log)\n\tif err != nil {\n\t\tlog.Fatal(\"repository init failed\")\n\t\treturn nil\n\t}\n\n\t// construct the proxy instance\n\tp := proxy{\n\t\tcache: caBridge,\n\t\tdb: dbBridge,\n\t\trpc: rpcBridge,\n\t\tgeoip: geoBridge,\n\t\tlog: log,\n\t\tcfg: cfg,\n\n\t\t// get the map of governance contracts\n\t\tgovContracts: governanceContractsMap(cfg.Governance),\n\n\t\t// keep reference to the SOL compiler\n\t\tsolCompiler: cfg.Compiler.DefaultSolCompilerPath,\n\t}\n\n\t// return the proxy\n\treturn &p\n}", "func New(databaseConfig *Config, logger pgx.Logger) (*Repository, error) {\n\tpostgresDataSource := fmt.Sprintf(\"postgres://%s:%s@%s/%s?sslmode=%s\",\n\t\tdatabaseConfig.Username,\n\t\tdatabaseConfig.Password,\n\t\tdatabaseConfig.Hostname,\n\t\tdatabaseConfig.Name,\n\t\tdatabaseConfig.SSLMode)\n\tpoolConfig, err := pgxpool.ParseConfig(postgresDataSource)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tpoolConfig.ConnConfig.Logger = logger\n\tlogLevelMapping := map[string]pgx.LogLevel{\n\t\t\"trace\": pgx.LogLevelTrace,\n\t\t\"debug\": pgx.LogLevelDebug,\n\t\t\"info\": pgx.LogLevelInfo,\n\t\t\"warn\": pgx.LogLevelWarn,\n\t\t\"error\": pgx.LogLevelError,\n\t}\n\tpoolConfig.ConnConfig.LogLevel = logLevelMapping[databaseConfig.LogLevel]\n\tpoolConfig.MaxConns = databaseConfig.MaxConnections\n\tpoolConfig.MinConns = databaseConfig.MinConnections\n\n\tpool, err := pgxpool.ConnectConfig(context.Background(), poolConfig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Repository{pool: pool}, nil\n}", "func newConnectionsRepository(p *PostRepository, m *MessageRepository, u *UserRepository) *ConnectionsRepository {\n\t// 1024 * 1024 => single MB\n\tdebug.SetGCPercent(20)\n\treturn &ConnectionsRepository{connections: make(map[string]PostConnections, 1000), postRepo: p, mu: &sync.Mutex{}, messageRepo: m, userRepo: u}\n}", "func NewRepository(connString string, conns int, timeoutMillis int) (*SQLRepository, error) {\n\tdb, err := sql.Open(\"sqlserver\", connString)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdb.SetMaxOpenConns(conns)\n\tdb.SetMaxIdleConns(conns)\n\n\treturn &SQLRepository{\n\t\tdb: db,\n\t\tmutex: new(sync.Mutex),\n\t\ttimeout: time.Millisecond * time.Duration(timeoutMillis),\n\t}, nil\n}", "func New(sqlConn string) (*Repository, error) {\n\tdb, err := connectDatabase(sqlConn)\n\tif err != nil {\n\t\tlogger.Error(\"error connecting to db\", zap.Error(err))\n\t\treturn nil, err\n\t}\n\treturn &Repository{\n\t\tdb: db,\n\t}, nil\n}", "func New(db *sql.DB) *Repository {\n\treturn &Repository{\n\t\tdb: sqlx.NewDb(db, \"mysql\"),\n\t}\n}", "func NewRepository() (Repository, error) {\n\n\t// Mysql connection configuration\n\tconfig := &mysql.Config{\n\t\tUser: _MysqlUser,\n\t\tPasswd: _MysqlPassword,\n\t\tAddr: fmt.Sprintf(\"%s:%s\", _MysqlHost, _MysqlPort),\n\t\tDBName: _MysqlDB,\n\t\tLoc: time.UTC,\n\t\tParseTime: true,\n\t\tAllowNativePasswords: true,\n\t\tNet: \"tcp\",\n\t}\n\n\t// Do connecting mysql server\n\tconn, err := sql.Open(\"mysql\", config.FormatDSN())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Check if mysql is available\n\tif err := conn.Ping(); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// return mysql object with connection & error value\n\treturn &repository{conn}, nil\n}", "func newClientMongoRepository() repository.ClientRepository {\n\tmongoAddr := os.Getenv(\"DATABASE_CONN\")\n\tfmt.Println(\"mongoAddr => \", mongoAddr)\n\tclient := repositoryimpl.Connect(mongoAddr)\n\treturn repositoryimpl.NewRepository(client)\n}", "func New() (Repo, error) {\n\t// Open the my.db data file in your current directory.\n\t// It will be created if it doesn't exist.\n\tlogrus.Infof(\"Using database file %s\\n\", conf.Options.DB.ConnectString)\n\tdb, err := bolt.Open(conf.Options.DB.ConnectString, 0600, &bolt.Options{Timeout: 1 * time.Second})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = db.Update(func(tx *bolt.Tx) error {\n\t\t_, err := tx.CreateBucketIfNotExists([]byte(\"users\"))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = tx.CreateBucketIfNotExists([]byte(\"teams\"))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = tx.CreateBucketIfNotExists([]byte(\"teamusers\"))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = tx.CreateBucketIfNotExists([]byte(\"oauth\"))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = tx.CreateBucketIfNotExists([]byte(\"channels\"))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = tx.CreateBucketIfNotExists([]byte(\"joinslack\"))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = tx.CreateBucketIfNotExists([]byte(\"convicted\"))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tr := &repo{\n\t\tdb: db,\n\t\tstop: make(chan bool),\n\t}\n\tgo r.cleanOAuthState()\n\treturn r, nil\n}", "func newRepository(\n\tid borges.RepositoryID,\n\tsto storage.Storer,\n\tfs billy.Filesystem,\n\tm borges.Mode,\n\ttransactional bool,\n\tl *Location,\n) (*Repository, error) {\n\trepo, err := git.Open(sto, nil)\n\tif err != nil {\n\t\tif err == git.ErrRepositoryNotExists {\n\t\t\trepo, err = git.Init(sto, nil)\n\t\t}\n\n\t\tif err != nil {\n\t\t\treturn nil, borges.ErrLocationNotExists.Wrap(err, id)\n\t\t}\n\t}\n\n\treturn &Repository{\n\t\tid: id,\n\t\trepo: repo,\n\t\ts: sto,\n\t\tfs: fs,\n\t\tmode: m,\n\t\ttransactional: transactional,\n\t\tlocation: l,\n\t\tcreateVersion: -1,\n\t}, nil\n}", "func newGreetingRepository(db *sql.DB) engine.GreetingRepository {\n\treturn &greetingRepository{db}\n}", "func NewRepository(pool *pgxpool.Pool) *Repository {\n\treturn &Repository{pool: pool}\n}", "func NewRepository(db *sql.DB) payment.Repository {\n\treturn &repository{gq: goqu.New(\"postgres\", db)}\n}", "func NewRepo(uri string) (Repository, error) {\n\tclient, err := mongo.NewClient(options.Client().ApplyURI(uri))\n\tlog.Println(\"db client created\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), 20 * time.Second)\n\tdefer cancel()\n\terr = client.Connect(ctx)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlog.Println(\"db client connected\")\n\n\terr = client.Ping(ctx, readpref.Primary())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlog.Println(\"db client ping\")\n\n\tdbName := os.Getenv(\"DATABASE_NAME\")\n\tdb := client.Database(dbName)\n\tcol := db.Collection(os.Getenv(\"TRAVEL_COLLECTION\"))\n\treturn &DBRepository{\n\t\tclient: \tclient,\n\t\tdatabase: db,\n\t\tCollection: col,\n\t}, nil\n}", "func NewRepository(conn *pgx.Conn) *RepositoryImpl {\n\treturn &RepositoryImpl{conn: conn}\n}", "func NewRepo(db *db.DB) {\n\tPsql = db.SQL\n}", "func NewRepository(db *sql.DB) *repository {\n\treturn &repository{db: db}\n}", "func New(conn *sqlx.DB) *Repository {\n\treturn &Repository{\n\t\tconn: conn,\n\t}\n}", "func New(db *bolt.DB) Repository {\n\treturn Repository{\n\t\tdb: db,\n\t}\n}", "func NewRepository(db mysql.BaseRepository, logger log.Logger) Repository {\n\treturn repository{db, logger}\n}", "func New(connection *gorm.DB) Interface {\n\treturn &Repository{connection: connection}\n}", "func New(c *config.Config) (*Repository, error) {\n\tDBURL := fmt.Sprintf(\"host=%s port=%d user=%s dbname=%s sslmode=disable password=%s\",\n\t\tc.Database.DBHost,\n\t\tc.Database.DBPort,\n\t\tc.Database.DBUser,\n\t\tc.Database.DBName,\n\t\tc.Database.DBPassword)\n\n\tdb, err := gorm.Open(dbDriver, DBURL)\n\tif err != nil {\n\t\tfmt.Printf(\"Unable to connect to database, %v\", err)\n\t\tdefer db.Close()\n\t\treturn nil, err\n\t}\n\n\tdb.LogMode(true)\n\n\tuserRepository := UserRepository{db}\n\n\tr := &Repository{db: db, UserRepository: &userRepository}\n\treturn r, nil\n}", "func New(db *sql.DB, logger log.Logger) (todo.Repository, error) {\n\t// return repository\n\treturn &repository{\n\t\tdb: db,\n\t\tlogger: log.With(logger, \"rep\", \"cockroachdb\"),\n\t}, nil\n}", "func NewRepository(region, tableName string) Repository {\n\treturn nil\n}", "func NewRepository(db *pgx.ConnPool) HistoryRepository {\n\treturn Repository{\n\t\tdb: db,\n\t}\n}", "func New(pool *pgxpool.Pool) (repository.Repository, error) {\n\tif pool == nil {\n\t\treturn nil, ErrMissingPool\n\t}\n\n\treturn postgresRepository{\n\t\tpool: pool,\n\t}, nil\n}", "func New(conn *pgx.Conn) *RepositoryImpl {\n\treturn &RepositoryImpl{conn: conn}\n}", "func NewRepository(pool *pgxpool.Pool) *Repository {\n\treturn &Repository{\n\t\tpool: pool,\n\t}\n}", "func initializeRepo(database *string) repository.ClientRepository {\n\tswitch *database {\n\tcase \"mongo\":\n\t\treturn newClientMongoRepository()\n\tdefault:\n\t\treturn nil // we can have several implementation like in memory, postgress etc\n\t}\n}", "func New(db *sql.DB, logger log.Logger) (order.Repository, error) {\n\t// return repository\n\treturn &repository{\n\t\tdb: db,\n\t\tlogger: log.With(logger, \"rep\", \"cockroachdb\"),\n\t}, nil\n}", "func MakeRepository(db *sql.DB) journal.Repository {\n\tr := &repository{db}\n\n\treturn r\n}", "func requestedRepository(repoName string) (repository.Repository, error) {\n\t/*\t_, repoName, err := parseGitCommand(sshcmd)\n\t\tif err != nil {\n\t\t\treturn repository.Repository{}, err\n\t\t}*/\n\tvar repo repository.Repository\n\tconn, err := db.Conn()\n\tif err != nil {\n\t\treturn repository.Repository{}, err\n\t}\n\tdefer conn.Close()\n\tif err := conn.Repository().Find(bson.M{\"_id\": repoName}).One(&repo); err != nil {\n\t\treturn repository.Repository{}, errors.New(\"Repository not found\")\n\t}\n\treturn repo, nil\n}", "func NewJobRepository(ctxIn context.Context, credentialsProvider secret.SecretProvider) (JobRepository, error) {\n ctx, span := trace.StartSpan(ctxIn, \"NewJobRepository\")\n defer span.End()\n\n storageService, err := service.NewStorageService(ctx, credentialsProvider)\n if err != nil {\n return nil, err\n }\n return &defaultJobRepository{storageService: storageService}, nil\n}", "func newRepo(r *github.Repository) Repo {\n\tvar lang string\n\tif r.Language != nil {\n\t\tlang = *r.Language\n\t} else {\n\t\tlang = \"-\"\n\t}\n\treturn Repo{*r.HTMLURL, lang, *r.StargazersCount, *r.ForksCount}\n}", "func NewRepository(db *sqlx.DB) *Repository {\n\treturn &Repository{\n\t\tDbConn: db,\n\t}\n}", "func NewRepository(db *sqlx.DB) *Repository {\n\treturn &Repository{\n\t\tDbConn: db,\n\t}\n}", "func NewRepository(db *sqlx.DB, log log.Logger) Repository {\n\treturn repository{db: db, logger: log}\n}", "func NewRepository(dbConn *sqlx.DB) Repository {\n\treturn &repository{\n\t\tdb: dbConn,\n\t}\n}", "func newUserRepo(db *sql.DB) *userRepo {\n\treturn &userRepo{\n\t\tdb: db,\n\t}\n}", "func (c AppConfig) GetRepository() (repositories.Repository, error) {\n\tif c.Storage == \"neo4j\" {\n\t\tdb, err := whiterabbit.Open(c.Neo4j)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\t// TODO: test the connection\n\t\t//defer func() {\n\t\t//\tdb.Close()\n\t\t//}()\n\n\t\treturn neo4j.New(*db), nil\n\t}\n\treturn nil, fmt.Errorf(\"storage %s not implemeted\", c.Storage)\n}", "func NewRepository(conf *Configuration) (storage.Repository, error) {\n\t// Set client options\n\tclientOptions := options.Client().\n\t\tApplyURI(conf.ConnectionString)\n\n\tif conf.Username != \"\" {\n\t\tclientOptions = clientOptions.\n\t\t\tSetAuth(options.Credential{\n\t\t\t\tUsername: conf.Username,\n\t\t\t\tPassword: conf.Password,\n\t\t\t})\n\t}\n\n\t// Connect to MongoDB\n\tclient, err := mongo.Connect(context.TODO(), clientOptions)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Check the connection\n\tif err = client.Ping(context.TODO(), nil); err != nil {\n\t\treturn nil, err\n\t}\n\n\tcollection := client.\n\t\tDatabase(conf.Database).\n\t\tCollection(conf.Collection)\n\n\t// build database\n\tstconf := *conf\n\tstconf.Password = \"\"\n\n\tdb := mongoStore{\n\t\tClient: client,\n\t\tCollection: collection,\n\t\tConfiguration: stconf,\n\t}\n\treturn &db, nil\n}", "func NewRepository(namespace string, k8sClient client.Client, connDecryptToken string) conn_repository.Repository {\n\treturn &k8sConnectionRepository{\n\t\tnamespace: namespace,\n\t\tk8sClient: k8sClient,\n\t\tconnDecryptToken: connDecryptToken,\n\t}\n}", "func NewRepository(config *RepositoryConfig) (Repository, error) {\n\tsettings := postgresql.ConnectionURL{\n\t\tUser: config.User,\n\t\tPassword: config.Password,\n\t\tHost: config.Host,\n\t\tDatabase: config.Database,\n\t\tOptions: map[string]string{\"sslmode\": config.SSLMode},\n\t}\n\n\tdb, err := postgresql.Open(settings)\n\tif err != nil {\n\t\treturn Repository{}, err\n\t}\n\n\tdb.SetPreparedStatementCache(config.PreparedStatementCacheEnabled)\n\tdb.SetMaxIdleConns(config.MaxIdleConns)\n\tdb.SetMaxOpenConns(config.MaxOpenConns)\n\n\treturn Repository{db}, nil\n}", "func NewRepository(db *db.Database) *Repository {\n\treturn &Repository{db}\n}", "func New() backing.Repo {\n\treturn &Repo{}\n}", "func NewRepository(dbDSN string) (*Repository, error) {\n\tc, err := pgx.ParseConfig(dbDSN)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdb := stdlib.OpenDB(*c)\n\terr = migrateUp(db)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &Repository{\n\t\tdb: db,\n\t\tsb: squirrel.StatementBuilder.PlaceholderFormat(squirrel.Dollar),\n\t}, nil\n}", "func NewRepository(repoName string) *Repository {\n\n\tclientIndex := model.ByEquality(\"ClientId\")\n\tclientIndex.Unique = true\n\t//\tuserIndex := model.ByEquality(\"UserId\")\n\t//\tuserIndex.Unique = true\n\n\treturn &Repository{\n\t\tName: repoName,\n\t\tmesssages: model.NewTable(store.DefaultStore, repoName, model.Indexes(clientIndex), nil),\n\t}\n}", "func NewRepositories() *repository.Repositories {\n\tdriver := config.Get(\"driver\")\n\n\tvar impl *repository.Repositories\n\tif driver == \"mysql\" {\n\t\tlog.Info().Msg(\"Select mysql as a driver for repositories\")\n\t\tdb, err := mysql.NewDB()\n\t\tif err != nil {\n\t\t\tlog.Panic().Msg(err.Error())\n\t\t}\n\n\t\timpl = &repository.Repositories{\n\t\t\tQueue: mysql.NewQueueRepository(db),\n\t\t\tRouting: mysql.NewRoutingRepository(db),\n\t\t}\n\t}\n\tif driver == \"in-memory\" {\n\t\tlog.Info().Msg(\"Select in-memory as a driver for repositories\")\n\t\timpl = &repository.Repositories{\n\t\t\tQueue: inmemory.NewQueueRepository(),\n\t\t\tRouting: inmemory.NewRoutingRepository(),\n\t\t}\n\t}\n\n\tif impl == nil {\n\t\tlog.Panic().Msgf(\"Unknown driver: %s\", driver)\n\t}\n\n\treturn impl\n}", "func NewRepository(db database.Database) Repository {\n\treturn Repository{db}\n}", "func NewRepository() Repository {\n\tclient := redis.NewClient(&redis.Options{\n\t\tAddr: \"127.0.0.1:6379\",\n\t\tPassword: \"\",\n\t\tDB: 0,\n\t})\n\n\treturn Repository{Client: client}\n}", "func NewRepository(db *gorm.DB) *Repository {\n\treturn &Repository{db}\n}", "func NewRepository(db *gorm.DB) *Repository {\n\treturn &Repository{db}\n}", "func NewRepository(dataBaseConnection *sql.DB) Repository {\n\treturn &repository{\n\t\tdb: dataBaseConnection,\n\t}\n}", "func NewRepository(db *component.Mysql) *Repository {\n\treturn &Repository{\n\t\tdb: db,\n\t\tLogActivities: mysql.NewLogActivitiesRepo(db),\n\t}\n}", "func New(db *gorm.DB) (Repository, error) {\n\treturn &repo{\n\t\tDB: db,\n\t}, nil\n}", "func NewRepository(db *sql.DB) Repository {\n\treturn &repoSvc{\n\t\tQueries: New(db),\n\t\tdb: db,\n\t}\n}", "func (s *Submodule) Repository() (*Repository, error) {\n\tstorer, err := s.w.r.Storer.Module(s.c.Name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t_, err = storer.Reference(plumbing.HEAD)\n\tif err != nil && err != plumbing.ErrReferenceNotFound {\n\t\treturn nil, err\n\t}\n\n\tworktree := s.w.fs.Dir(s.c.Path)\n\tif err == nil {\n\t\treturn Open(storer, worktree)\n\t}\n\n\tr, err := Init(storer, worktree)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t_, err = r.CreateRemote(&config.RemoteConfig{\n\t\tName: DefaultRemoteName,\n\t\tURL: s.c.URL,\n\t})\n\n\treturn r, err\n}", "func (s *server) newRepoClient(ctx context.Context, storageName string) (gitalypb.RepositoryServiceClient, error) {\n\tgitalyServerInfo, err := storage.ExtractGitalyServer(ctx, storageName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tconn, err := s.conns.Dial(ctx, gitalyServerInfo.Address, gitalyServerInfo.Token)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn gitalypb.NewRepositoryServiceClient(conn), nil\n}", "func New(db *gorm.DB) *Repository {\n\treturn &Repository{\n\t\tdb: db,\n\t}\n}", "func New(db *gorm.DB) *Repository {\n\treturn &Repository{\n\t\tdb: db,\n\t}\n}", "func New(db *gorm.DB) *Repository {\n\treturn &Repository{\n\t\tdb: db,\n\t}\n}", "func NewRepository(MongoClient *mongo.Client) Repository {\n\treturn &repository{\n\t\tdatabaseAccess{\n\t\t\tdb: MongoClient,\n\t\t},\n\t}\n}", "func NewRepository(db *gorm.DB) ProductDomain.Repository {\n\treturn &handler{\n\t\tdb: db,\n\t}\n}", "func NewRepository(db *database.Database) *Repository {\n\treturn &Repository{db}\n}", "func NewRepository(tableName string) repository.Repository {\n\treturn repository.NewRepository(tableName)\n}", "func newOrderRepo(db *sql.DB) *orderRepo {\n\treturn &orderRepo{\n\t\tdb: db,\n\t}\n}", "func getNewPool(cfg *config.Pool) *pool {\n\tvar p pool\n\n\tp.lockDuration = cfg.LockDuration\n\n\tp.locks = make(map[*config.Resource]*ResourceLock)\n\tfor _, resource := range cfg.Resources {\n\t\tp.locks[resource] = nil\n\t}\n\n\tkeys, _ := storage.GetKeys(storageKey)\n\tfor _, key := range keys {\n\t\tvar lock ResourceLock\n\t\tif err := storage.Read(storageKey, key, &lock); err != nil {\n\t\t\tlog.Errorf(\"[Pool] unable to restore lock for '%s': %s\", key, err)\n\t\t\tcontinue\n\t\t}\n\n\t\tfor k := range p.locks {\n\t\t\tif k.Name == key {\n\t\t\t\tlock.Resource = *k\n\t\t\t\tp.locks[k] = &lock\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\treturn &p\n}", "func NewRepository(tableName string) RepositoryInterface {\n\treturn &repository{tableName: tableName}\n}", "func newPool(s *site) (*pool, query.Error) {\n\tp := new(pool)\n\tp.site = s\n\tp.id = POOL_ID\n\tp.name = POOL_NAME\n\tp.buckets = make(map[string]catalog.Bucket)\n\n\te := p.loadBuckets()\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\treturn p, nil\n}", "func (c *container) Repository() db.RepositoryInterface {\n\tif nil == c.repository {\n\t\tc.repository = db.NewRepository(c.DbConnection())\n\t}\n\treturn c.repository\n}", "func repositoryFactory(ctx context.Context, providerConfig config.Provider, configVariablesClient config.VariablesClient) (Repository, error) {\n\t// parse the repository url\n\trURL, err := url.Parse(providerConfig.URL())\n\tif err != nil {\n\t\treturn nil, errors.Errorf(\"failed to parse repository url %q\", providerConfig.URL())\n\t}\n\n\tif rURL.Scheme == httpsScheme {\n\t\t// if the url is a GitHub repository\n\t\tif rURL.Host == githubDomain {\n\t\t\trepo, err := NewGitHubRepository(ctx, providerConfig, configVariablesClient)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Wrap(err, \"error creating the GitHub repository client\")\n\t\t\t}\n\t\t\treturn repo, err\n\t\t}\n\n\t\t// if the url is a GitLab repository\n\t\tif strings.HasPrefix(rURL.Host, gitlabHostPrefix) && strings.HasPrefix(rURL.RawPath, gitlabPackagesAPIPrefix) {\n\t\t\trepo, err := NewGitLabRepository(providerConfig, configVariablesClient)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Wrap(err, \"error creating the GitLab repository client\")\n\t\t\t}\n\t\t\treturn repo, err\n\t\t}\n\n\t\treturn nil, errors.Errorf(\"invalid provider url. Only GitHub and GitLab are supported for %q schema\", rURL.Scheme)\n\t}\n\n\t// if the url is a local filesystem repository\n\tif rURL.Scheme == \"file\" || rURL.Scheme == \"\" {\n\t\trepo, err := newLocalRepository(ctx, providerConfig, configVariablesClient)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"error creating the local filesystem repository client\")\n\t\t}\n\t\treturn repo, err\n\t}\n\n\treturn nil, errors.Errorf(\"invalid provider url. there are no provider implementation for %q schema\", rURL.Scheme)\n}", "func NewRepo(a *config.Appconfig) *Repository {\n\treturn &Repository{\n\t\tApp: a,\n\t}\n}", "func New(prototype Aggregate, opts ...Option) *Repository {\n\tt := reflect.TypeOf(prototype)\n\tif t.Kind() == reflect.Ptr {\n\t\tt = t.Elem()\n\t}\n\n\tr := &Repository{\n\t\tprototype: t,\n\t\tstore: newMemoryStore(),\n\t\tserializer: NewJSONSerializer(),\n\t}\n\n\tfor _, opt := range opts {\n\t\topt(r)\n\t}\n\n\treturn r\n}", "func NewRepository() Repository {\n\treturn &GormRepository{}\n}", "func newTxAndBoardRepository() (tx *gorm.DB, repo *BoardRepository) {\n\ttx = orm.GetDB().Begin()\n\trepo = NewBoardRepository(tx)\n\treturn\n}", "func (r *Resolver) Repository() generated.RepositoryResolver { return &repositoryResolver{r} }", "func NewRepository(conv APIDefinitionConverter) *pgRepository {\n\treturn &pgRepository{\n\t\tsingleGetter: repo.NewSingleGetter(apiDefTable, apiDefColumns),\n\t\tsingleGetterGlobal: repo.NewSingleGetterGlobal(resource.API, apiDefTable, apiDefColumns),\n\t\tpageableQuerier: repo.NewPageableQuerier(apiDefTable, apiDefColumns),\n\t\tbundleRefQueryBuilder: repo.NewQueryBuilderGlobal(resource.BundleReference, bundlereferences.BundleReferenceTable, []string{bundlereferences.APIDefIDColumn}),\n\t\tlister: repo.NewLister(apiDefTable, apiDefColumns),\n\t\tlisterGlobal: repo.NewListerGlobal(resource.API, apiDefTable, apiDefColumns),\n\t\tcreator: repo.NewCreator(apiDefTable, apiDefColumns),\n\t\tcreatorGlobal: repo.NewCreatorGlobal(resource.API, apiDefTable, apiDefColumns),\n\t\tupdater: repo.NewUpdater(apiDefTable, updatableColumns, idColumns),\n\t\tupdaterGlobal: repo.NewUpdaterGlobal(resource.API, apiDefTable, updatableColumns, idColumns),\n\t\tdeleter: repo.NewDeleter(apiDefTable),\n\t\tdeleterGlobal: repo.NewDeleterGlobal(resource.API, apiDefTable),\n\t\texistQuerier: repo.NewExistQuerier(apiDefTable),\n\t\tconv: conv,\n\t}\n}", "func NewRepo() (*Repo, error) {\n\tconn, err := dbconn.Connect() \n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trepo := new(Repo)\n\trepo.DB = conn\n\treturn repo, nil\n}", "func NewRepo(a *config.AppConfig) *Repository {\n\treturn &Repository{ //returns the referenc to a Repository\n\t\tApp: a, // populate in \"App\" from type \"Repository\n\t}\n}", "func NewPostgreRepository() (Repository, error) {\n\tdb, err := driver.ConnectDB()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trepo := postgresRepository{\n\t\tdb,\n\t}\n\treturn repo, nil\n}", "func New() *Repository {\n\treturn &Repository{\n\t\tagents: map[string]*adagio.Agent{},\n\t\truns: map[string]runState{},\n\t\tclaims: map[string]struct {\n\t\t\trun *adagio.Run\n\t\t\tnode *adagio.Node\n\t\t}{},\n\t\tlisteners: listenerSet{},\n\t}\n}", "func R() Repository {\n\t// make sure to instantiate the Repository only once\n\tonceRepo.Do(func() {\n\t\trepo = newRepository()\n\t})\n\treturn repo\n}", "func NewRepository(db *dbcontext.DB, logger log.Logger) Repository {\n\treturn repository{db, logger}\n}", "func NewRepository(collection Collection) BasicRepository {\n\treturn &basicRepositoryUsecase{\n\t\tcollectionName: collection,\n\t}\n}", "func Instance() (contract.MySQLRepo, error) {\n\tonceDB.Do(func() {\n\t\tcfg := config.GetConfigEnvironment()\n\n\t\tdataSourceName := fmt.Sprintf(\"%s:root@tcp(%s:%s)/%s?charset=utf8&parseTime=true\",\n\t\t\tcfg.MySQL.Username, cfg.MySQL.Host, cfg.MySQL.Port, cfg.MySQL.DBName,\n\t\t)\n\n\t\tlog.Info(\"Connecting to database...\")\n\t\tdb, connErr := sql.Open(\"mysql\", dataSourceName)\n\t\tif connErr != nil {\n\t\t\treturn\n\t\t}\n\n\t\tlog.Info(\"Database Ping...\")\n\t\tconnErr = db.Ping()\n\t\tif connErr != nil {\n\t\t\treturn\n\t\t}\n\n\t\tlog.Info(\"Creating database...\")\n\t\tif _, connErr = db.Exec(fmt.Sprintf(\"CREATE DATABASE IF NOT EXISTS %s;\", cfg.MySQL.DBName)); connErr != nil {\n\t\t\tlogger.Error(\"Create Database error: \", connErr)\n\t\t\treturn\n\t\t}\n\n\t\tif _, connErr = db.Exec(fmt.Sprintf(\"USE %s;\", cfg.MySQL.DBName)); connErr != nil {\n\t\t\tlogger.Error(\"Default Database error: \", connErr)\n\t\t\treturn\n\t\t}\n\n\t\tconnErr = mysqlDriver.SetLogger(logger.GetLogger())\n\t\tif connErr != nil {\n\t\t\treturn\n\t\t}\n\t\tlogger.Info(\"Database successfully configured\")\n\n\t\tlogger.Info(\"Running the migrations\")\n\t\tdriver := darwin.NewGenericDriver(db, darwin.MySQLDialect{})\n\n\t\td := darwin.New(driver, migrations.Migrations, nil)\n\n\t\tconnErr = d.Migrate()\n\t\tif connErr != nil {\n\t\t\tlogger.Error(\"Migrate Error: \", connErr)\n\t\t\treturn\n\t\t}\n\n\t\tlogger.Info(\"Migrations executed\")\n\n\t\tconn = &mysqlConn{\n\t\t\tdb: db,\n\t\t}\n\t})\n\n\treturn conn, connErr\n}", "func NewRepository(option cache.Option) internal.Repository {\n\tvar repo internal.Repository\n\tswitch option.AlgorithmType {\n\tcase cache.LRUAlgorithm:\n\t\trepo = lru.New(option.MaxSizeItem, option.MaxMemory, option.ExpiryTime)\n\tcase cache.LFUAlgorithm:\n\t\trepo = lfu.New(option.MaxSizeItem, option.MaxMemory, option.ExpiryTime)\n\t}\n\treturn repo\n}", "func RepoCreateDatabaseConnection(t DatabaseConnection) DatabaseConnection {\n\tcurrentId += 1\n\tt.Id = currentId\n\tdatabaseConnections = append(databaseConnections, t)\n\treturn t\n}", "func NewRepository(db interfaces.IDatabase) interfaces.IRepository {\n\treturn &Repository{\n\t\tdb: db,\n\t\tcollectionName: \"models\",\n\t}\n}", "func New(cfg *config.Config, log logger.Logger) (Repository, error) {\n\t// create new in-memory cache bridge\n\tcaBridge, err := cache.New(cfg, log)\n\tif err != nil {\n\t\tlog.Criticalf(\"can not create in-memory cache bridge, %s\", err.Error())\n\t\treturn nil, err\n\t}\n\n\t// create new database connection bridge\n\tdbBridge, err := db.New(cfg, log)\n\tif err != nil {\n\t\tlog.Criticalf(\"can not connect backend persistent storage, %s\", err.Error())\n\t\treturn nil, err\n\t}\n\n\t// create new Lachesis RPC bridge\n\trpcBridge, err := rpc.New(cfg, log)\n\tif err != nil {\n\t\tlog.Criticalf(\"can not connect Lachesis RPC interface, %s\", err.Error())\n\t\treturn nil, err\n\t}\n\n\t// try to validate the solidity compiler by asking for it's version\n\tif _, err := compiler.SolidityVersion(cfg.SolCompilerPath); err != nil {\n\t\tlog.Criticalf(\"can not invoke the Solidity compiler, %s\", err.Error())\n\t\treturn nil, err\n\t}\n\n\t// construct the proxy instance\n\tp := proxy{\n\t\tcache: caBridge,\n\t\tdb: dbBridge,\n\t\trpc: rpcBridge,\n\t\tlog: log,\n\n\t\t// keep reference to the SOL compiler\n\t\tsolCompiler: cfg.SolCompilerPath,\n\n\t\t// keep the ballot sources ref\n\t\tballotSources: cfg.VotingSources,\n\t}\n\n\t// inform about voting sources\n\tlog.Infof(\"voting ballots accepted from %s\", cfg.VotingSources)\n\n\t// propagate callbacks\n\tdbBridge.SetBalance(p.AccountBalance)\n\n\t// make the service orchestrator\n\tp.orc = newOrchestrator(&p, log)\n\n\t// return the proxy\n\treturn &p, nil\n}", "func testRepo() *library.Repo {\n\treturn &library.Repo{\n\t\tID: new(int64),\n\t\tUserID: new(int64),\n\t\tBuildLimit: new(int64),\n\t\tTimeout: new(int64),\n\t\tCounter: new(int),\n\t\tPipelineType: new(string),\n\t\tHash: new(string),\n\t\tOrg: new(string),\n\t\tName: new(string),\n\t\tFullName: new(string),\n\t\tLink: new(string),\n\t\tClone: new(string),\n\t\tBranch: new(string),\n\t\tVisibility: new(string),\n\t\tPreviousName: new(string),\n\t\tPrivate: new(bool),\n\t\tTrusted: new(bool),\n\t\tActive: new(bool),\n\t\tAllowPull: new(bool),\n\t\tAllowPush: new(bool),\n\t\tAllowDeploy: new(bool),\n\t\tAllowTag: new(bool),\n\t\tAllowComment: new(bool),\n\t}\n}", "func (d *drv) getPool(P commonAndPoolParams) (*connPool, error) {\n\t// initialize driver, if necessary\n\tif err := d.init(P.ConfigDir, P.LibDir); err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar usernameKey string\n\tvar passwordHash [sha256.Size]byte\n\tif !P.Heterogeneous && !P.ExternalAuth {\n\t\t// skip username being part of key in heterogeneous pools\n\t\tusernameKey = P.Username\n\t\tpasswordHash = sha256.Sum256([]byte(P.Password.Secret())) // See issue #245\n\t}\n\t// determine key to use for pool\n\tpoolKey := fmt.Sprintf(\"%s\\t%x\\t%s\\t%d\\t%d\\t%d\\t%s\\t%s\\t%s\\t%t\\t%t\\t%t\\t%s\\t%d\\t%s\",\n\t\tusernameKey, passwordHash[:4], P.ConnectString, P.MinSessions, P.MaxSessions,\n\t\tP.SessionIncrement, P.WaitTimeout, P.MaxLifeTime, P.SessionTimeout,\n\t\tP.Heterogeneous, P.EnableEvents, P.ExternalAuth,\n\t\tP.Timezone, P.MaxSessionsPerShard, P.PingInterval,\n\t)\n\tlogger := P.Logger\n\tif logger != nil {\n\t\tlogger.Debug(\"getPool\", \"key\", poolKey)\n\t}\n\n\t// if pool already exists, return it immediately; otherwise, create a new\n\t// pool; hold the lock while the pool is looked up (and created, if needed)\n\t// in order to ensure that multiple goroutines do not attempt to create a\n\t// pool\n\td.mu.RLock()\n\tpool, ok := d.pools[poolKey]\n\td.mu.RUnlock()\n\tif ok {\n\t\treturn pool, nil\n\t}\n\t// createPool uses checkExec wich needs getError which uses RLock,\n\t// so we cannot Lock here, thus this little race window for\n\t// creating a pool and throwing it away.\n\tpool, err := d.createPool(P)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\td.mu.Lock()\n\tdefer d.mu.Unlock()\n\tif poolOld, ok := d.pools[poolKey]; ok {\n\t\t_ = pool.Close()\n\t\treturn poolOld, nil\n\t}\n\tpool.key = poolKey\n\td.pools[poolKey] = pool\n\treturn pool, nil\n}", "func newRepoCache(apiURL *url.URL, a auth.Authenticator) *rcache.Cache {\n\tvar cacheTTL time.Duration\n\tif urlIsGitHubDotCom(apiURL) {\n\t\tcacheTTL = 10 * time.Minute\n\t} else {\n\t\t// GitHub Enterprise\n\t\tcacheTTL = 30 * time.Second\n\t}\n\n\tkey := \"\"\n\tif a != nil {\n\t\tkey = a.Hash()\n\t}\n\treturn rcache.NewWithTTL(\"gh_repo:\"+key, int(cacheTTL/time.Second))\n}", "func New(db *db.DB) core.RepositoryStore {\n\treturn &repoStore{db}\n}", "func newRepoImpl(ctx context.Context, gs gitstore.GitStore, repo *gitiles.Repo, gcsClient gcs.GCSClient, gcsPath string, p *pubsub.Publisher, includeBranches, excludeBranches []string) (repograph.RepoImpl, error) {\n\tindexCommits, err := gs.RangeByTime(ctx, vcsinfo.MinTime, vcsinfo.MaxTime, gitstore.ALL_BRANCHES)\n\tif err != nil {\n\t\treturn nil, skerr.Wrapf(err, \"Failed loading IndexCommits from GitStore.\")\n\t}\n\tvar commits []*vcsinfo.LongCommit\n\tif len(indexCommits) > 0 {\n\t\thashes := make([]string, 0, len(indexCommits))\n\t\tfor _, c := range indexCommits {\n\t\t\thashes = append(hashes, c.Hash)\n\t\t}\n\t\tcommits, err = gs.Get(ctx, hashes)\n\t\tif err != nil {\n\t\t\treturn nil, skerr.Wrapf(err, \"Failed loading LongCommits from GitStore.\")\n\t\t}\n\t}\n\tgb, err := gs.GetBranches(ctx)\n\tif err != nil {\n\t\treturn nil, skerr.Wrapf(err, \"Failed loading branches from GitStore.\")\n\t}\n\tbranches := make([]*git.Branch, 0, len(gb))\n\tfor name, branch := range gb {\n\t\tbranches = append(branches, &git.Branch{\n\t\t\tName: name,\n\t\t\tHead: branch.Head,\n\t\t})\n\t}\n\tcommitsMap := make(map[string]*vcsinfo.LongCommit, len(commits))\n\tfor _, c := range commits {\n\t\tcommitsMap[c.Hash] = c\n\t}\n\tsklog.Infof(\"Repo %s has %d commits and %d branches.\", repo.URL(), len(commits), len(branches))\n\tfor _, b := range branches {\n\t\tsklog.Infof(\" branch %s @ %s\", b.Name, b.Head)\n\t}\n\treturn &repoImpl{\n\t\tMemCacheRepoImpl: repograph.NewMemCacheRepoImpl(commitsMap, branches),\n\t\tgcsClient: gcsClient,\n\t\tgcsPath: gcsPath,\n\t\tgitiles: repo,\n\t\tgitstore: gs,\n\t\tpubsub: p,\n\t\tincludeBranches: includeBranches,\n\t\texcludeBranches: excludeBranches,\n\t}, nil\n}", "func NewRepository(db *gorm.DB) *Repository {\n\treturn &Repository{\n\t\tdb: db,\n\t}\n}", "func NewRepository(db *gorm.DB) *Repository {\n\treturn &Repository{\n\t\tdb: db,\n\t}\n}", "func NewRepository() dish.Repository {\n\tdb, err := sql.Open(\"sqlite3\", \"./internal/database/test.sqlite\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn &repository{\n\t\tdb: db,\n\t}\n}", "func NewDBAdapterRepository(config *config.Config) *sql.DB {\n\tconnectionString := fmt.Sprintf(\"host=%s port=%s user=%s password=%s dbname=%s sslmode=disable\",\n\t\tconfig.Database.Host, config.Database.Port, config.Database.User, config.Database.Pass, config.Database.DBName)\n\n\tdbConn, err := sql.Open(config.Database.DBType, connectionString)\n\tif err != nil {\n\t\tlogger.Logger.WithError(err).WithField(\"connection_string\", connectionString).Errorf(\"Unable to connect to database\")\n\t\treturn nil\n\t}\n\tlogger.Logger.WithField(\"connection_string\", connectionString).Info(\"connect to database\")\n\tdbConn.SetMaxOpenConns(config.Database.PoolSize)\n\tdbConn.SetMaxIdleConns(config.Database.PoolSize)\n\treturn dbConn\n}" ]
[ "0.6742565", "0.66320115", "0.6582812", "0.65410703", "0.6381777", "0.6357211", "0.6310225", "0.6292557", "0.62729156", "0.62224114", "0.62185216", "0.6188371", "0.61257654", "0.6119679", "0.6106526", "0.609459", "0.60807514", "0.6075455", "0.6065493", "0.6063669", "0.60626966", "0.60591906", "0.6038626", "0.6019743", "0.60042334", "0.60024476", "0.5998384", "0.59942484", "0.59727293", "0.5942222", "0.59196746", "0.59128445", "0.58991545", "0.58914745", "0.58774364", "0.58727986", "0.58483285", "0.58483285", "0.5834462", "0.5827699", "0.581175", "0.5809956", "0.5803903", "0.5796238", "0.57798654", "0.57671773", "0.57469696", "0.5745758", "0.574551", "0.57455", "0.57443464", "0.5732923", "0.57287717", "0.57287717", "0.5728514", "0.57280797", "0.57142943", "0.5712451", "0.570172", "0.5701707", "0.5687496", "0.5687496", "0.5687496", "0.567858", "0.5677184", "0.566823", "0.5655086", "0.56524", "0.563166", "0.562394", "0.5620527", "0.56186795", "0.5607601", "0.5597454", "0.5595444", "0.5594651", "0.5591883", "0.55861324", "0.5581571", "0.5581111", "0.5576494", "0.5575235", "0.55664945", "0.55649847", "0.55429", "0.55418414", "0.5529488", "0.55140054", "0.5512396", "0.55123824", "0.5508869", "0.5496279", "0.54936713", "0.54876715", "0.5485992", "0.5484029", "0.54770803", "0.54770803", "0.5471158", "0.54682267" ]
0.63773257
5
Execute executes given command and placeholders on the middleware
func (r *Repository) Execute(command string, args ...interface{}) (middleware.Result, error) { conn, err := r.Database.Get() if err != nil { return nil, err } defer func() { err = conn.Close() if err != nil { log.Errorf("alert DASRepo.Execute(): close database connection failed.\n%s", err.Error()) } }() return conn.Execute(command, args...) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (c *Command) Execute(user string, msg string, args []string) {\n}", "func (h *Handler) Execute(name string, args []string) {\n\tlog.Warn(\"generic doesn't support command execution\")\n}", "func (this Middleware) executeMiddlewareLocally(pair models.RequestResponsePair) (models.RequestResponsePair, error) {\n\tcommandAndArgs := []string{this.Binary, this.Script.Name()}\n\n\tmiddlewareCommand := exec.Command(commandAndArgs[0], commandAndArgs[1:]...)\n\n\t// getting payload\n\tpairViewBytes, err := json.Marshal(pair.ConvertToRequestResponsePairView())\n\n\tif err != nil {\n\t\treturn pair, errors.New(\"Failed to marshal request to JSON\")\n\t}\n\n\tlog.WithFields(log.Fields{\n\t\t\"middleware\": this.toString(),\n\t\t\"stdin\": string(pairViewBytes),\n\t}).Debug(\"preparing to modify payload\")\n\n\tvar stdout bytes.Buffer\n\tvar stderr bytes.Buffer\n\n\t// Redirect standard streams\n\tmiddlewareCommand.Stdin = bytes.NewReader(pairViewBytes)\n\tmiddlewareCommand.Stdout = &stdout\n\tmiddlewareCommand.Stderr = &stderr\n\n\tif err := middlewareCommand.Start(); err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"sdtdout\": string(stdout.Bytes()),\n\t\t\t\"sdtderr\": string(stderr.Bytes()),\n\t\t\t\"error\": err.Error(),\n\t\t}).Error(\"Middleware failed to start\")\n\t\treturn pair, err\n\t}\n\n\tif err := middlewareCommand.Wait(); err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"sdtdout\": string(stdout.Bytes()),\n\t\t\t\"sdtderr\": string(stderr.Bytes()),\n\t\t\t\"error\": err.Error(),\n\t\t}).Error(\"Middleware failed to stop successfully\")\n\t\treturn pair, err\n\t}\n\n\t// log stderr, middleware executed successfully\n\tif len(stderr.Bytes()) > 0 {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"sdtderr\": string(stderr.Bytes()),\n\t\t}).Info(\"Information from middleware\")\n\t}\n\n\tif len(stdout.Bytes()) > 0 {\n\t\tvar newPairView RequestResponsePairView\n\n\t\terr = json.Unmarshal(stdout.Bytes(), &newPairView)\n\n\t\tif err != nil {\n\t\t\treturn pair, errors.New(\"Failed to unmarshal JSON from middleware\")\n\t\t} else {\n\t\t\tif log.GetLevel() == log.DebugLevel {\n\t\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\t\"middleware\": this.toString(),\n\t\t\t\t\t\"payload\": string(stdout.Bytes()),\n\t\t\t\t}).Debug(\"payload after modifications\")\n\t\t\t}\n\t\t\t// payload unmarshalled into RequestResponsePair struct, returning it\n\t\t\treturn models.NewRequestResponsePairFromRequestResponsePairView(newPairView), nil\n\t\t}\n\t} else {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"stdout\": string(stdout.Bytes()),\n\t\t}).Warn(\"No response from middleware.\")\n\t}\n\n\treturn pair, nil\n\n}", "func (cmd startCmd) Execute(_ []string) error {\n\t// Set up the logger\n\tlog := logger.NewLeveledLogger(cmd.LogLevel)\n\tdefer log.Sync()\n\tlog.Debug(\"DEBUG logging enabled\")\n\n\tpdClient := pagerduty.NewClient(cmd.APIKey)\n\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\th := handler.New(pdClient, log)\n\th.WithMiddleware(httplogger.HTTPLogger)\n\ts := server.New(h, cmd.Port, 5*time.Second, log)\n\n\treturn s.Serve(ctx)\n}", "func (n *Namer) Execute(with interface{}) (string, error) {\n\treturn n.ExecuteWithTemplate(with, n.Template())\n}", "func (e *CustomExecutor) Execute(s api.DiscordSession, channel model.Snowflake, command *model.Command) {\n\tif command.Custom == nil {\n\t\tlog.Fatal(\"Incorrectly generated learn command\", errors.New(\"wat\"))\n\t}\n\n\thas, err := e.commandMap.Has(command.Custom.Call)\n\tif err != nil {\n\t\tlog.Fatal(\"Error testing custom feature\", err)\n\t}\n\tif !has {\n\t\tlog.Fatal(\"Accidentally found a mismatched call/response pair\", errors.New(\"call response mismatch\"))\n\t}\n\n\tresponse, err := e.commandMap.Get(command.Custom.Call)\n\tif err != nil {\n\t\tlog.Fatal(\"Error reading custom response\", err)\n\t}\n\n\t// Perform command substitutions.\n\tif strings.Contains(response, \"$1\") {\n\t\tif command.Custom.Args == \"\" {\n\t\t\tresponse = MsgCustomNeedsArgs\n\t\t} else {\n\t\t\tresponse = strings.Replace(response, \"$1\", command.Custom.Args, 4)\n\t\t}\n\t} else if matches := giphyRegexp.FindStringSubmatch(response); len(matches) > 2 {\n\t\turl := matches[2]\n\t\tresponse = fmt.Sprintf(MsgGiphyLink, url)\n\t}\n\n\ts.ChannelMessageSend(channel.Format(), response)\n}", "func (c *Command) Execute(ctx context.Context) error {\n\n\tpctx := &commandContext{\n\t\tdependencyResolver: pipeline.NewDependencyRecorder[*commandContext](),\n\t\tContext: ctx,\n\t}\n\n\tp := pipeline.NewPipeline[*commandContext]().WithBeforeHooks(pipe.DebugLogger[*commandContext](c.Log), pctx.dependencyResolver.Record)\n\tp.WithSteps(\n\t\tp.NewStep(\"create client\", c.createClient),\n\t\tp.NewStep(\"fetch task\", c.fetchTask),\n\t\tp.NewStep(\"list intermediary files\", c.listIntermediaryFiles),\n\t\tp.NewStep(\"delete intermediary files\", c.deleteFiles),\n\t\tp.NewStep(\"delete source file\", c.deleteSourceFile),\n\t)\n\n\treturn p.RunWithContext(pctx)\n}", "func (s *server) Execute(args ExecuteArgs, resp *string) error {\n\tr, err := s.impl.Execute(args)\n\t*resp = r\n\treturn err\n}", "func (_e *handler_Expecter) Execute(req interface{}, s interface{}) *handler_Execute_Call {\n\treturn &handler_Execute_Call{Call: _e.mock.On(\"Execute\", req, s)}\n}", "func Command(handler handlers.CommandHandler) starlark.Fn {\n\treturn func(thread *lib.Thread, b *lib.Builtin, args lib.Tuple, kwargs []lib.Tuple) (lib.Value, error) {\n\t\tctx := starlark.GetCtx(thread)\n\t\tdryrun := starlark.GetDryRunMode(thread)\n\t\tparams, err := parseArgs(b, args, kwargs)\n\t\tif err != nil {\n\t\t\treturn lib.None, xerrors.Errorf(\": %w\", err)\n\t\t}\n\t\tzap.L().Debug(\n\t\t\t\"params\",\n\t\t\tzap.String(\"cmd\", params.CmdName),\n\t\t\tzap.Strings(\"args\", params.CmdArgs),\n\t\t\tzap.String(\"user\", params.User),\n\t\t\tzap.String(\"cwd\", params.Cwd),\n\t\t)\n\n\t\tbuf := new(bytes.Buffer)\n\t\tfor _, arg := range params.CmdArgs {\n\t\t\tfmt.Fprintf(buf, \" %s\", arg)\n\t\t}\n\t\tfmt.Fprint(buf, \"\\n\")\n\t\tui.Infof(\"Executing command: %s%s\", params.CmdName, buf.String())\n\t\tif err := handler.Command(ctx, dryrun, params); err != nil {\n\t\t\treturn lib.None, xerrors.Errorf(\": %w\", err)\n\t\t}\n\t\treturn lib.None, nil\n\t}\n}", "func Execute() {\n\n\t// initialize router\n\trouter := mux.NewRouter()\n\n\t// load custom routes\n\tloadRoutes(router)\n\n\t// initialize http server configs\n\tserver := http.Server{\n\t\tAddr: fmt.Sprintf(\":%s\", config.BackendPort),\n\t\tHandler: router,\n\t}\n\n\t// start http server\n\tfmt.Printf(\"HTTP Server listening on port: %s\\n\", config.BackendPort)\n\tserver.ListenAndServe()\n}", "func (cmd *command) Execute(ch io.ReadWriter) (err error) {\n\tif cmd.Flags.Source {\n\t\terr = cmd.serveSource(ch)\n\t} else {\n\t\terr = cmd.serveSink(ch)\n\t}\n\tif err != nil {\n\t\treturn trace.Wrap(err)\n\t}\n\treturn nil\n}", "func (h *Handler) Execute() {\n\thost := configuration.Instance.GetServiceHost()\n\twelcome := \"Starting to service \" + host + \" with root path as \" + configuration.Instance.Service.Path\n\tlog.Println(welcome)\n\tlog.Fatal(http.ListenAndServe(host, h.Router))\n}", "func (client *Client) Execute(command string) {\n\tclient.SendResponse(command)\n}", "func (e *Execute) Execute(args []string) error {\n\tfmt.Println(\"args: \", args)\n\tif len(args) <= 0 {\n\t\treturn fmt.Errorf(\"no args passed to echo\")\n\t}\n\n\tcli := client.NewClient(e.ClientOpts)\n\terr := cli.Init()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer cli.Close()\n\n\tresp, err := cli.Execute(request.Request{Query: string(args[0])})\n\tfmt.Println(\"ERROR: \", err, \" RESP: \", resp)\n\n\treturn nil\n}", "func (c *Command) Execute() {\n\targs := os.Args[1:]\n\tswitch argsLen := len(args); {\n\tcase argsLen == 1:\n\t\tc.Run(args)\n\tdefault:\n\t\tlog.Println(\"our service currently handle 1 command only\")\n\t}\n}", "func (s *Slack) CommandHandler(w http.ResponseWriter, r *http.Request) {\n\tverifier, err := slack.NewSecretsVerifier(r.Header, s.SigningSecret)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tr.Body = ioutil.NopCloser(io.TeeReader(r.Body, &verifier))\n\n\tslackSlashCommand, err := slack.SlashCommandParse(r)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tif err = verifier.Ensure(); err != nil {\n\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\treturn\n\t}\n\n\ts.logger.WithFields(logrus.Fields{\n\t\t\"slash_command\": slackSlashCommand,\n\t}).Info()\n\n\tswitch slackSlashCommand.Command {\n\tcase utils.PollCommand:\n\t\tcmd := commands.PollCommand{SlackSlashCommand: &slackSlashCommand, ResponseWriter: w, Client: s.Client, Logger: s.logger}\n\t\tcmd.Execute()\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(http.StatusOK)\n}", "func (s *ServerCommand) Execute(args []string) error {\n\n\t//log.Printf(\"[INFO] start server on port %d\", s.Port)\n\tresetEnv(\"SECRET\", \"AUTH_GOOGLE_CSEC\", \"AUTH_GITHUB_CSEC\", \"AUTH_FACEBOOK_CSEC\", \"AUTH_YANDEX_CSEC\")\n\n\tctx, cancel := context.WithCancel(context.Background())\n\tgo func() { // catch signal and invoke graceful termination\n\t\tstop := make(chan os.Signal, 1)\n\t\tsignal.Notify(stop, os.Interrupt, syscall.SIGTERM)\n\t\t<-stop\n\t\tlog.Print(\"[WARN] interrupt signal\")\n\t\tcancel()\n\t}()\n\n\tapp, err := s.newServerApp()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err = app.run(ctx); err != nil {\n\t\treturn err\n\t}\n\tlog.Printf(\"[INFO] terminated\")\n\treturn nil\n}", "func (r *Client) Execute(s ...string) {\n\n\tout := r.ExecuteAndReturn(s...)\n\n\tprint(out)\n}", "func (s *service) ExecuteCommand(pluginContext *plugin.Context, commandArgs *model.CommandArgs) (resp *model.CommandResponse, err error) {\n\tparams := &commandParams{\n\t\tpluginContext: pluginContext,\n\t\tcommandArgs: commandArgs,\n\t}\n\tif pluginContext == nil || commandArgs == nil {\n\t\treturn errorOut(params, errors.New(\"invalid arguments to command.Handler. Please contact your system administrator\"))\n\t}\n\n\tconf := s.conf.MattermostConfig().Config()\n\tenableOAuthServiceProvider := conf.ServiceSettings.EnableOAuthServiceProvider\n\tif enableOAuthServiceProvider == nil || !*enableOAuthServiceProvider {\n\t\treturn errorOut(params, errors.Errorf(\"the system setting `Enable OAuth 2.0 Service Provider` needs to be enabled in order for the Apps plugin to work. Please go to %s/admin_console/integrations/integration_management and enable it.\", commandArgs.SiteURL))\n\t}\n\n\tenableBotAccounts := conf.ServiceSettings.EnableBotAccountCreation\n\tif enableBotAccounts == nil || !*enableBotAccounts {\n\t\treturn errorOut(params, errors.Errorf(\"the system setting `Enable Bot Account Creation` needs to be enabled in order for the Apps plugin to work. Please go to %s/admin_console/integrations/bot_accounts and enable it.\", commandArgs.SiteURL))\n\t}\n\n\tsplit := strings.Fields(commandArgs.Command)\n\tif len(split) < 2 {\n\t\treturn errorOut(params, errors.New(\"no subcommand specified, nothing to do\"))\n\t}\n\n\tcommand := split[0]\n\tif command != \"/\"+config.CommandTrigger {\n\t\treturn errorOut(params, errors.Errorf(\"%q is not a supported command and should not have been invoked. Please contact your system administrator\", command))\n\t}\n\n\tparams.current = split[1:]\n\n\tdefer func(log utils.Logger, developerMode bool) {\n\t\tif x := recover(); x != nil {\n\t\t\tstack := string(debug.Stack())\n\n\t\t\tlog.Errorw(\n\t\t\t\t\"Recovered from a panic in a command\",\n\t\t\t\t\"command\", commandArgs.Command,\n\t\t\t\t\"error\", x,\n\t\t\t\t\"stack\", stack,\n\t\t\t)\n\n\t\t\ttxt := utils.CodeBlock(commandArgs.Command+\"\\n\") + \"Command paniced. \"\n\n\t\t\tif developerMode {\n\t\t\t\ttxt += fmt.Sprintf(\"Error: **%v**. Stack:\\n%v\", x, utils.CodeBlock(stack))\n\t\t\t} else {\n\t\t\t\ttxt += \"Please check the server logs for more details.\"\n\t\t\t}\n\t\t\tresp = &model.CommandResponse{\n\t\t\t\tText: txt,\n\t\t\t\tResponseType: model.CommandResponseTypeEphemeral,\n\t\t\t}\n\t\t}\n\t}(s.conf.Logger(), s.conf.Get().DeveloperMode)\n\n\treturn s.handleMain(params)\n}", "func (p *Plugin) ExecuteCommand(c *plugin.Context, args *model.CommandArgs) (*model.CommandResponse, *model.AppError) {\n\tsplit := strings.Fields(args.Command)\n\tcommand := split[0]\n\t//parameters := []string{}\n\t//action := \"\"\n\t//if len(split) > 1 {\n\t//\taction = split[1]\n\t//}\n\t//if len(split) > 2 {\n\t//\tparameters = split[2:]\n\t//}\n\n\tif command != \"/goscrum\" {\n\t\treturn &model.CommandResponse{}, nil\n\t}\n\n\ttext := \"###### Mattermost goscrum Plugin - Slash Command Help\\n\" + strings.Replace(`* |/welcomebot preview [team-name] [user-name]| - preview the welcome message for the given team name. The current user's username will be used to render the template.\n* |/welcomebot list| - list the teams for which welcome messages were defined`, \"|\", \"`\", -1)\n\tp.postCommandResponse(args, text)\n\n\t//switch action {\n\t//case \"standup\":\n\t//\tvar str strings.Builder\n\t//\tstr.WriteString(\"Welcome to goscrum\")\n\t//\tp.postCommandResponse(args, str.String())\n\t//case \"help\":\n\t//\tfallthrough\n\t//case \"\":\n\t//\ttext := \"###### Mattermost welcomebot Plugin - Slash Command Help\\n\" + strings.Replace(COMMAND_HELP, \"|\", \"`\", -1)\n\t//\tp.postCommandResponse(args, text)\n\t//\treturn &model.CommandResponse{}, nil\n\t//}\n\treturn &model.CommandResponse{}, nil\n}", "func (p *PrintCommand) Execute(_ engine.Handler) {\n\tfmt.Println(p.Arg)\n}", "func (ctrl *PGCtrl) Execute(q string) error {\n\t_, err := ctrl.conn.Exec(q)\n\treturn err\n}", "func Execute(\n\tctx context.Context,\n\tpayload gapir.Payload,\n\thandlePost builder.PostDataHandler,\n\thandleNotification builder.NotificationHandler,\n\tconnection *gapir.Connection,\n\tmemoryLayout *device.MemoryLayout,\n\tos *device.OS) error {\n\n\tctx = status.Start(ctx, \"Execute\")\n\tdefer status.Finish(ctx)\n\n\t// The memoryLayout is specific to the ABI of the requested capture,\n\t// while the OS is not. Thus a device.Configuration is not applicable here.\n\treturn executor{\n\t\tpayload: payload,\n\t\thandlePost: handlePost,\n\t\thandleNotification: handleNotification,\n\t\tmemoryLayout: memoryLayout,\n\t\tOS: os,\n\t}.execute(ctx, connection)\n}", "func (p *Plugin) ExecuteCommand(c *plugin.Context, args *model.CommandArgs) (*model.CommandResponse, *model.AppError) {\n\tstringArgs := strings.Split(strings.TrimSpace(args.Command), \" \")\n\tlengthOfArgs := len(stringArgs)\n\trestOfArgs := []string{}\n\n\tvar handler func([]string, *model.CommandArgs) (*model.CommandResponse, bool, error)\n\tif lengthOfArgs == 1 {\n\t\thandler = p.runListCommand\n\t} else {\n\t\tcommand := stringArgs[1]\n\t\tif lengthOfArgs > 2 {\n\t\t\trestOfArgs = stringArgs[2:]\n\t\t}\n\t\tswitch command {\n\t\tcase \"add\":\n\t\t\thandler = p.runAddCommand\n\t\tcase \"list\":\n\t\t\thandler = p.runListCommand\n\t\tcase \"start\":\n\t\t\thandler = p.runStartCommand\n\t\tdefault:\n\t\t\treturn getCommandResponse(model.COMMAND_RESPONSE_TYPE_EPHEMERAL, getHelp()), nil\n\t\t}\n\t}\n\n\tresp, isUserError, err := handler(restOfArgs, args)\n\tif err != nil {\n\t\tif isUserError {\n\t\t\treturn getCommandResponse(model.COMMAND_RESPONSE_TYPE_EPHEMERAL, fmt.Sprintf(\"__Error: %s__\\n\\nRun `/cquiz help` for usage instructions.\", err.Error())), nil\n\t\t}\n\t\tp.API.LogError(err.Error())\n\t\treturn getCommandResponse(model.COMMAND_RESPONSE_TYPE_EPHEMERAL, \"An unknown error occurred. Please talk to your system administrator for help.\"), nil\n\t}\n\n\treturn resp, nil\n}", "func (cb *Breaker) Execute(req func() (interface{}, error)) (interface{}, error) {\n\tgeneration, err := cb.beforeRequest()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer func() {\n\t\te := recover()\n\t\tif e != nil {\n\t\t\tcb.afterRequest(generation, false)\n\t\t\tpanic(e)\n\t\t}\n\t}()\n\n\tresult, err := req()\n\tcb.afterRequest(generation, err == nil)\n\treturn result, err\n}", "func (cmd *StartCommand) Execute(args []string) error {\n\ts, err := web.NewServer(cmd.Host, cmd.Port, cmd.Expiration)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error while starting web server: %v\", err)\n\t}\n\n\ts.Handle()\n\n\treturn nil\n}", "func (r StoreBot) Run(command *SlashCommand) (slashCommandImmediateReturn string) {\n // If you (optionally) want to do some asynchronous work (like sending API calls to slack)\n // you can put it in a go routine like this\n go r.DeferredAction(command)\n // The string returned here will be shown only to the user who executed the command\n // and will show up as a message from slackbot.\n return \"\"\n}", "func (e *Engine) ExecuteCommand(ctx context.Context, m dogma.Message) error {\n\tselect {\n\tcase <-e.ready:\n\tcase <-ctx.Done():\n\t\treturn ctx.Err()\n\t}\n\n\tmt := message.TypeOf(m)\n\n\tif x, ok := e.executors[mt]; ok {\n\t\treturn x.ExecuteCommand(ctx, m)\n\t}\n\n\treturn fmt.Errorf(\"no application accepts %s commands\", mt)\n}", "func (h StmtHandle) Exec(ctx context.Context, args ...interface{}) (pgx.CommandTag, error) {\n\th.check()\n\tp := h.s.sr.mcp.Get()\n\tswitch h.s.sr.method {\n\tcase prepare:\n\t\treturn p.ExecEx(ctx, h.s.prepared.Name, nil /* options */, args...)\n\n\tcase noprepare:\n\t\treturn p.ExecEx(ctx, h.s.sql, nil /* options */, args...)\n\n\tcase simple:\n\t\treturn p.ExecEx(ctx, h.s.sql, simpleProtocolOpt, args...)\n\n\tdefault:\n\t\tpanic(\"invalid method\")\n\t}\n}", "func Execute(ctx context.Context) error {\n\treturn rootCmd.ExecuteContext(ctx)\n}", "func (c *Ping) Execute(args ...string) Reply {\n\treply := checkExpcetArgs(0, args...)\n\tif _, ok := reply.(*OkReply); ok {\n\t\treturn &StringReply{Message: \"pong\"}\n\t}\n\treturn &StringReply{Message: strings.Join(args, \" \")}\n}", "func Execute(\n\tctx context.Context,\n\thandler Handler,\n\tabortHandler AbortHandler,\n\trequest interface{}) Awaiter {\n\ttask := &task{\n\t\trequest: request,\n\t\thandler: handler,\n\t\tabortHandler: abortHandler,\n\t\tresultQ: make(chan Response, 1),\n\t\trunning: true,\n\t}\n\tgo task.run(ctx) // run handler asynchronously\n\treturn task\n}", "func Execute() {\n\t// var err error\n\tcfg := config.Config()\n\t// driver := driver.NewDriver(cfg)\n\n\t// var postgreSQL *sqlx.DB\n\t// if cfg.GetBool(\"postgres.is_enabled\") {\n\t// \tpostgreSQL, err = driver.GetPostgreSQLConn()\n\t// \tif err != nil {\n\t// \t\tlogrus.Fatalf(\"failed to start, error connect to PostgreSQL | +v\", err)\n\t// \t\treturn\n\t// \t}\n\t// \tdefer postgreSQL.Close()\n\t// }\n\n\toption := pkg.Option{\n\t\tConfig: cfg,\n\t\t// PostgreSQL: postgreSQL,\n\t}\n\n\trepository := wiringRepository(repository.Option{\n\t\tOption: option,\n\t})\n\n\tservice := wiringService(service.Option{\n\t\tOption: option,\n\t\tRepository: repository,\n\t})\n\n\tserver := server.NewServer(option, service)\n\tserver.StartApp()\n}", "func (app *WebApp) Execute(args []string, out io.Writer) error {\n\n\t// Set the output\n\tapp.Output = out\n\tapp.Colour = false\n\n\t// Load core application\n\tappCmd := app.LoadCommand()\n\n\t// Add sub-commands\n\t// As they are webified\n\taccounts.Add(appCmd, &app.App)\n\treports.Add(appCmd, &app.App, app.Report)\n\tregister.Add(appCmd, &app.App, app.Register)\n\tcurrencies.Add(appCmd, &app.App)\n\n\t// Set the arguments\n\tappCmd.SetArgs(args)\n\n\t// Run core app\n\treturn appCmd.Execute()\n}", "func (l *CustomLambda) Execute(stdin io.Reader, args []string) (string, error) {\n\targsStr := strings.TrimSpace(strings.Join(args, \" \"))\n\tif argsStr != \"\" {\n\t\targsStr = \" \" + argsStr\n\t}\n\n\tcmd := exec.Command(\"bash\", \"-c\", l.command+argsStr)\n\n\t// pass through some stdin goodness\n\tcmd.Stdin = stdin\n\n\t// for those who are about to rock, I salute you.\n\tstdoutStderr, err := cmd.CombinedOutput()\n\n\tif err == nil {\n\t\t// noiiiice!\n\t\tlog.WithFields(log.Fields{\"name\": l.Name(), \"command\": l.command}).Info(\"Lambda Execution\")\n\t\treturn strings.TrimSpace(string(stdoutStderr)), nil\n\t}\n\n\t// *sigh*\n\tlog.WithFields(log.Fields{\"name\": l.Name(), \"command\": l.command}).Error(\"Lambda Execution\")\n\treturn string(stdoutStderr), errors.New(\"Error running command\")\n}", "func (e *ldapExecutor) Execute(ctx context.Context, config *ldapconf.Config) error {\n\treturn nil\n}", "func (m Mediator) Send(ctx context.Context, command interface{}) (interface{}, error) {\n\thandler, ok := m.registrations[reflect.TypeOf(command)]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"no handlers for command %T\", command)\n\t}\n\n\targuments := []reflect.Value{\n\t\treflect.ValueOf(command),\n\t}\n\n\tif handler.Type().NumIn() == 2 {\n\t\targuments = append(\n\t\t\t[]reflect.Value{reflect.ValueOf(ctx)},\n\t\t\targuments...,\n\t\t)\n\t}\n\n\tresult := handler.Call(arguments)\n\tswitch len(result) {\n\tcase 0:\n\t\treturn nil, nil\n\tcase 1:\n\t\treturn oneReturnValuesCommand(result)\n\tcase 2:\n\t\treturn twoReturnValuesCommand(result)\n\t}\n\treturn nil, nil\n}", "func Execute(settings internal.Settings) {\n\t// rootCmd represents the base command when called without any subcommands\n\tvar rootCmd = &cobra.Command{\n\t\tUse: \"warden\",\n\t\tShort: \"Setup and manage a reverse proxy\",\n\t\tLong: \"Setup and manage a reverse proxy\",\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\n\t\t\tlog.Println(\"Cleaning up...\")\n\t\t\tc := exec.Command(\n\t\t\t\t\"/bin/sh\",\n\t\t\t\t\"-c\",\n\t\t\t\tfmt.Sprintf(\n\t\t\t\t\t\"rm -rf %s %s\",\n\t\t\t\t\tfilepath.Join(settings.CONFIG_OUTPUT_DIR, \"/http/*\"),\n\t\t\t\t\tfilepath.Join(settings.CONFIG_OUTPUT_DIR, \"/streams/*\"),\n\t\t\t\t),\n\t\t\t)\n\n\t\t\toutput, err := c.CombinedOutput()\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\n\t\t\t\t\t\"Error cleaning up: %s: %s\",\n\t\t\t\t\terr,\n\t\t\t\t\toutput,\n\t\t\t\t)\n\t\t\t}\n\n\t\t\tlog.Println(\"Connecting to DB...\")\n\t\t\tdb, err := sql.Open(\"sqlite3\", \"file::memory:?_fk=1&cache=shared&mode=memory\")\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdb.SetMaxOpenConns(1)\n\t\t\tdefer db.Close()\n\n\t\t\tlog.Println(\"Creating tables...\")\n\t\t\terr = createTables(db)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tconductor := &orchestra.Conductor{\n\t\t\t\tTimeout: 15 * time.Second,\n\t\t\t\tPlayers: make(map[string]orchestra.Player),\n\t\t\t}\n\n\t\t\thub, err := getMonitor(settings)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"could not get monitor: %w\", err)\n\t\t\t}\n\t\t\tdefer hub.Flush(time.Second * 5)\n\n\t\t\tallPlayers, err := setPlayers(db, settings, hub)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"could not get players: %w\", err)\n\t\t\t}\n\n\t\t\t// Start all if no args were given\n\t\t\tif len(args) == 0 {\n\t\t\t\tconductor.Players = allPlayers\n\t\t\t}\n\n\t\t\tfor _, pl := range args {\n\t\t\t\tplayer, ok := allPlayers[pl]\n\t\t\t\tif ok {\n\t\t\t\t\tconductor.Players[pl] = player\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn orchestra.PlayUntilSignal(\n\t\t\t\tconductor,\n\t\t\t\tos.Interrupt, syscall.SIGTERM,\n\t\t\t)\n\t\t},\n\t}\n\n\trootCmd.Execute()\n}", "func ServerExecute() {\n\tapp := fiber.New()\n\tapp.Use(logger.New())\n\tapp.Use(cors.New(\n\t\tcors.Config{\n\t\t\tAllowOrigins: []string{\"http://localhost:3000\", \"https://kimvex.com\"},\n\t\t},\n\t))\n\n\tdatabase := db.MySQLConnect()\n\tdb.RedisConnect()\n\tmongodb := db.MonoDBConnect()\n\tgetUser := db.GetUserID\n\tsetUser := db.SetUserID\n\tdelUser := db.DeleteUserID\n\n\troutes.API(app, database, getUser, setUser, delUser, mongodb)\n\tapp.Listen(3003)\n}", "func (h *Hook) Execute(r *admission.AdmissionRequest) (*Result, error) {\n\tswitch r.Operation {\n\tcase admission.Create:\n\t\treturn wrapperExecution(h.Create, r)\n\tcase admission.Update:\n\t\treturn wrapperExecution(h.Update, r)\n\tcase admission.Delete:\n\t\treturn wrapperExecution(h.Delete, r)\n\tcase admission.Connect:\n\t\treturn wrapperExecution(h.Connect, r)\n\t}\n\n\treturn &Result{Message: fmt.Sprintf(\"Invalid operation: %s\", r.Operation)}, nil\n}", "func Execute(anyType Executable, command string) ([]reflect.Value, error) {\n\tcommandSplit := strings.Split(command, \" \")\n\tcommandName := commandSplit[0]\n\tcommandArgs := commandSplit[1:]\n\n\tmethod := reflect.ValueOf(anyType).MethodByName(strings.Title(commandName))\n\tif !method.IsValid() {\n\t\terrorMessage := commandName + \" is not a valid action\"\n\t\treturn []reflect.Value{},\n\t\t\terrors.New(errorMessage)\n\t}\n\texpectedArgsCnt := method.Type().NumIn()\n\tgivenArgsCnt := len(commandArgs)\n\tif givenArgsCnt != expectedArgsCnt {\n\t\terrorMessage := fmt.Sprintf(\n\t\t\t\"wrong number of arguments passed to %s, expected %d, got %d\",\n\t\t\tcommandName, expectedArgsCnt, givenArgsCnt)\n\t\treturn []reflect.Value{}, errors.New(errorMessage)\n\t}\n\n\tmethodArgs := make([]reflect.Value, givenArgsCnt)\n\tfor idx, _ := range commandArgs {\n\t\tmethodArgs[idx] = reflect.ValueOf(commandArgs[idx])\n\t}\n\n\treturn method.Call(methodArgs), nil\n}", "func (command *Command) trigger(ctx *Ctx) {\n\t// Check if the first argument matches a sub command\n\tif len(ctx.Arguments.arguments) > 0 {\n\t\targument := ctx.Arguments.Get(0).Raw()\n\t\tsubCommand := command.GetSubCmd(argument)\n\t\tif subCommand != nil {\n\t\t\t// Define the arguments for the sub command\n\t\t\targuments := ParseArguments(\"\")\n\t\t\tif ctx.Arguments.Amount() > 1 {\n\t\t\t\targuments = ParseArguments(strings.Join(strings.Split(ctx.Arguments.Raw(), \" \")[1:], \" \"))\n\t\t\t}\n\n\t\t\t// Trigger the sub command\n\t\t\tsubCommand.trigger(&Ctx{\n\t\t\t\tSession: ctx.Session,\n\t\t\t\tEvent: ctx.Event,\n\t\t\t\tArguments: arguments,\n\t\t\t\tCustomObjects: ctx.CustomObjects,\n\t\t\t\tRouter: ctx.Router,\n\t\t\t\tCommand: subCommand,\n\t\t\t})\n\t\t\treturn\n\t\t}\n\t}\n\n\t// Prepare all middlewares\n\tnextHandler := command.Handler\n\tfor _, middleware := range ctx.Router.Middlewares {\n\t\tnextHandler = middleware(nextHandler)\n\t}\n\n\t// Run all middlewares\n\tnextHandler(ctx)\n}", "func (cmd *Command) Do(ctx context.Context, shell *Shell, args []string) (err error) {\n\tc := &Context{ctx: ctx, shell: shell, args: args}\n\tif cmd.Before != nil {\n\t\terr = cmd.Before(c)\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\tif cmd.Action != nil {\n\t\terr = cmd.Action(c)\n\t}\n\treturn err\n}", "func (app *App) Execute(input io.Reader, output io.Writer) error {\n\tdecoder := yaml.NewDecoder(input)\n\tvar data map[string]interface{}\n\tif err := decoder.Decode(&data); err != nil {\n\t\treturn errors.Wrap(err, \"yaml decode\")\n\t}\n\treturn errors.Wrap(app.t.Execute(output, data), \"transform execute\")\n}", "func (sr *Router) HandleCommand(ctx context.Context, header http.Header, body []byte) (s int, h http.Header, b []byte, err error) {\n\tif contentType := header.Get(\"Content-Type\"); contentType != \"application/x-www-form-urlencoded\" {\n\t\treturn http.StatusBadRequest, plainResponseHeader, []byte(fmt.Sprintf(\"requires application/x-www-form-urlencoded, not %s\", contentType)), nil\n\t}\n\tif err := sr.verifyRequest(header, body); err != nil {\n\t\treturn errorResponse(ctx, http.StatusUnauthorized, err)\n\t}\n\tparams, err := url.ParseQuery(string(body))\n\tif err != nil {\n\t\treturn errorResponse(ctx, http.StatusBadRequest, fmt.Errorf(\"invalid body: %v\", err))\n\t}\n\treq := Request(params)\n\thandler, ok := sr.commands[req.Command()]\n\tif !ok {\n\t\treturn jsonResponse(ctx, sr.commandUnknownHandler(ctx, req))\n\t}\n\tlogger := loggerFromContext(ctx)\n\tlogger.Printf(\"handling command `%s` for @%s of team %s\", req.Command(), req.UserName(), req.TeamDomain())\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"panic in command handler: %v\\n%s\", r, string(debug.Stack()))\n\t\t\ts, h, b, err = jsonResponse(ctx, sr.commandFailedHandler(ctx, req))\n\t\t}\n\t}()\n\tif sr.middleware != nil {\n\t\thandler = sr.middleware(handler)\n\t}\n\treturn jsonResponse(ctx, handler(ctx, req))\n}", "func (this Interceptor) Run(vars map[string]interface{}, next func()) {\n\turl := httper.V(vars).GetRequest().URL.Path\n\texec := this[url]\n\tif exec != nil {\n\t\texec.Run(vars, next)\n\t} else {\n\t\tnext()\n\t}\n}", "func (t *Template) Execute(i *ExecuteInput) (*ExecuteResult, error) {\n\tif i == nil {\n\t\ti = &ExecuteInput{}\n\t}\n\n\tvar used, missing dep.Set\n\n\ttmpl := template.New(\"\")\n\ttmpl.Delims(t.leftDelim, t.rightDelim)\n\n\ttmpl.Funcs(funcMap(&funcMapInput{\n\t\tt: tmpl,\n\t\tbrain: i.Brain,\n\t\tenv: i.Env,\n\t\tused: &used,\n\t\tmissing: &missing,\n\t\tfunctionBlacklist: t.functionBlacklist,\n\t\tsandboxPath: t.sandboxPath,\n\t}))\n\n\tif t.errMissingKey {\n\t\ttmpl.Option(\"missingkey=error\")\n\t} else {\n\t\ttmpl.Option(\"missingkey=zero\")\n\t}\n\n\ttmpl, err := tmpl.Parse(t.contents)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"parse\")\n\t}\n\n\t// Execute the template into the writer\n\tvar b bytes.Buffer\n\tif err := tmpl.Execute(&b, nil); err != nil {\n\t\treturn nil, errors.Wrap(err, \"execute\")\n\t}\n\n\treturn &ExecuteResult{\n\t\tUsed: &used,\n\t\tMissing: &missing,\n\t\tOutput: b.Bytes(),\n\t}, nil\n}", "func (crawl *Crawl) Execute(req *Request) (resp *Response, err error) {\n\t// Make request\n\tresp, err = crawl.Do(req)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t// If request.Raw is not true - parse html\n\tif !req.Raw {\n\t\terr = resp.ParseHTML()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\n\t// Set request context if empty\n\tif req.Context == nil {\n\t\treq.Context = context.Background()\n\t}\n\n\t// ctx = context.WithValue(ctx, \"crawl\", crawl)\n\t// ctx = context.WithValue(ctx, \"response\", resp)\n\n\t// Run handlers\n\tfor _, cb := range req.Callbacks {\n\t\tif handler := crawl.GetHandler(cb); handler != nil {\n\t\t\terr = handler(req.Context, crawl, resp)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\tlog.Warningf(\"Handler %v was not found\", cb)\n\t\t}\n\t}\n\n\tlog.V(2).Infof(\"%s %s %s - %v\", req.GetMethod(), resp.GetStatus(), resp.GetURL(), req.Callbacks)\n\n\treturn\n}", "func (tx *Hello) Execute(p types.Process, ctw *types.ContextWrapper, index uint16) error {\n\tsp := p.(*HelloWorld)\n\n\treturn sp.vault.WithFee(p, ctw, tx, func() error {\n\t\tif err := sp.AddHelloCount(ctw, tx.To); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t})\n}", "func (s *serverRegistry) Execute(frame []byte) error {\n\ttr := &thrift.TMemoryBuffer{Buffer: bytes.NewBuffer(frame)}\n\treturn s.processor.Process(s.inputProtocolFactory.GetProtocol(tr), s.outputProtocol)\n}", "func (cmd *Command) Exec() error {\n\t// need a hash map of functions to support the API\n\tvar err error\n\n\tlog.Debug(\"execute op: %d\", cmd.Op)\n\n\t// TODO: put this into a hash map\n\tswitch cmd.Op {\n\tcase PUT:\n\t\terr = cache.Put(cmd.Key, cmd.Value, 0)\n\t\tcmd.Resp = ok\n\tcase GET:\n\t\tcmd.Resp, err = cache.Get(cmd.Key)\n\tcase HAS:\n\t\tr, err := cache.Has(cmd.Key)\n\t\tif err == nil && r {\n\t\t\tcmd.Resp = yes\n\t\t} else {\n\t\t\tcmd.Resp = no\n\t\t}\n\tcase DELETE:\n\t\terr = cache.Delete(cmd.Key)\n\t\tcmd.Resp = yes\n\tcase KEYS:\n\t\tcmd.Resp = no // not implemented yet...\n\tcase PING:\n\t\tcmd.Resp = pong\n\tcase STATUS:\n\t\tcmd.Resp = ok\n\t\tlog.Info(\"status: %s\", cmd.Resp)\n\tcase SHUTDOWN:\n\t\tlog.Info(\"shutdown command received...\")\n\t\tcmd.Resp = fail\n\tdefault:\n\t\tmsg := fmt.Sprintf(\"unknown command id: %d\", cmd.Op)\n\t\tlog.Warn(msg)\n\t\terr = errors.New(msg)\n\t\tcmd.Resp = fail\n\t}\n\n\treturn err\n}", "func Execute() error {\n\treturn cmd.Execute()\n}", "func Execute(d *data.D) error {\n\taddCustomCommands(d)\n\n\tvar cmd *Command\n\tif len(d.Args) == 0 { // no command given\n\t\tcmd = helpCmd\n\t} else {\n\t\tcmd = commands[d.Args[0]]\n\t}\n\n\tif cmd == nil {\n\t\treturn fmt.Errorf(\"Unknown command '%s'\", d.Args[0])\n\t}\n\n\treturn cmd.Run(cmd, d)\n}", "func (c *unknown) execute(s *session) *response {\n\tmessage := fmt.Sprintf(\"%s unknown command\", c.cmd)\n\ts.log(message)\n\treturn bad(c.tag, message)\n}", "func (c *ToyController) Execute(ctx context.Context) error {\n\tc.le.Debug(\"toy controller executed\")\n\t<-ctx.Done()\n\treturn nil\n}", "func (c *carHandler) Execute(w http.ResponseWriter, r *http.Request) {\n\tlog.Println(\"CarsHandler actived\")\n\tcontentType := r.Header.Get(\"Content-type\")\n\tif contentType != \"application/json\" {\n\t\tlog.Println(fmt.Errorf(\"Content Type is not valid\"))\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tvar input []usecase.CarInput\n\tdefer r.Body.Close()\n\n\tdecoder := json.NewDecoder(r.Body)\n\tif err := decoder.Decode(&input); err != nil {\n\t\tlog.Println(err)\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tif err := c.validate(input); err != nil {\n\t\tlog.Println(err)\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tif err := c.CarUsecase.PutCars(input); err != nil {\n\t\tlog.Println(err)\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tlog.Println(fmt.Sprintf(\"Car created\"))\n\tw.WriteHeader(http.StatusOK)\n\treturn\n}", "func (e *engine) executeMiddlewares(ctx *Context) {\n\tmwChain[0].Next(ctx)\n}", "func (cli *CLI) Execute(query string) int {\n\tfor _, command := range cli.commands {\n\t\tif command.Name == query {\n\t\t\treturn command.Function()\n\t\t}\n\t}\n\n\treturn cli.CallHelp()\n}", "func (*Execute) Frontend() {}", "func execute(yaml string, method string, endpoint string, f func(http.ResponseWriter, *http.Request), t *testing.T) *httptest.ResponseRecorder {\n\t// Read data, create a request manually, instantiate recording apparatus.\n\tdata := strings.NewReader(yaml)\n\treq, err := http.NewRequest(method, endpoint, data)\n\tok(t, err)\n\trr := httptest.NewRecorder()\n\n\t// Create handler and process request\n\thandler := http.HandlerFunc(f)\n\thandler.ServeHTTP(rr, req)\n\n\treturn rr\n}", "func (c *clientRegistry) Execute(frame []byte) error {\n\theaders, err := getHeadersFromFrame(frame)\n\tif err != nil {\n\t\tlog.Warn(\"frugal: invalid protocol frame headers:\", err)\n\t\treturn err\n\t}\n\n\topid, err := strconv.ParseUint(headers[opID], 10, 64)\n\tif err != nil {\n\t\tlog.Warn(\"frugal: invalid protocol frame:\", err)\n\t\treturn err\n\t}\n\n\tc.mu.RLock()\n\thandler, ok := c.handlers[opid]\n\tif !ok {\n\t\tc.mu.RUnlock()\n\t\treturn nil\n\t}\n\tc.mu.RUnlock()\n\n\treturn handler(&thrift.TMemoryBuffer{Buffer: bytes.NewBuffer(frame)})\n}", "func (c *Command) Do(handler func(cmd *Command)) {\n\tc.handler = handler\n}", "func (r *Execute) Run(thread *starlark.Thread, fn *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) {\n\tvar mids *starlark.List\n\terr := starlark.UnpackPositionalArgs(fn.Name(), args, kwargs, 1, &mids)\n\tif err != nil {\n\t\treturn starlark.None, fmt.Errorf(\"unpacking arguments: %v\", err.Error())\n\t}\n\n\tfor i := 0; i < mids.Len(); i++ {\n\t\tval, ok := mids.Index(i).(Middleware)\n\t\tif !ok {\n\t\t\treturn starlark.None, fmt.Errorf(\"cannot get module from execute\")\n\t\t}\n\n\t\tr.Modules = append(r.Modules, val)\n\t}\n\n\treturn starlark.None, nil\n}", "func (p *Plugin) ExecuteCommand(c *plugin.Context, args *model.CommandArgs) (*model.CommandResponse, *model.AppError) {\n\tsplit := strings.Fields(args.Command)\n\tif len(split) < 2 {\n\t\treturn p.executeCommandHelp(args), nil\n\t}\n\n\taction := split[1]\n\n\t//nolint:goconst\n\tswitch action {\n\tcase \"add\":\n\t\treturn p.executeCommandAdd(args), nil\n\tcase \"label\":\n\t\treturn p.executeCommandLabel(args), nil\n\tcase \"remove\":\n\t\treturn p.executeCommandRemove(args), nil\n\tcase \"view\":\n\t\treturn p.executeCommandView(args), nil\n\tcase \"help\":\n\t\treturn p.executeCommandHelp(args), nil\n\n\tdefault:\n\t\treturn p.responsef(args, fmt.Sprintf(\"Unknown command: \"+args.Command)), nil\n\t}\n}", "func (cmd *RunCommand) Register(r command.Registerer) {\n\tconst helpShort = \"Pass secrets as environment variables to a process.\"\n\tconst helpLong = \"To protect against secrets leaking via stdout and stderr, those output streams are monitored for secrets. Detected secrets are automatically masked by replacing them with \\\"\" + maskString + \"\\\". \" +\n\t\t\"The output is buffered to detect secrets, but to avoid blocking the buffering is limited to a maximum duration as defined by the --masking-timeout flag. \" +\n\t\t\"Therefore, you should regard the masking as a best effort attempt and should always prevent secrets ending up on stdout and stderr in the first place.\"\n\n\tclause := r.Command(\"run\", helpShort)\n\tclause.HelpLong(helpLong)\n\tclause.Alias(\"exec\")\n\tclause.Arg(\"command\", \"The command to execute\").Required().StringsVar(&cmd.command)\n\tclause.Flag(\"envar\", \"Source an environment variable from a secret at a given path with `NAME=<path>`\").Short('e').StringMapVar(&cmd.envar)\n\tclause.Flag(\"env-file\", \"The path to a file with environment variable mappings of the form `NAME=value`. Template syntax can be used to inject secrets.\").StringVar(&cmd.envFile)\n\tclause.Flag(\"template\", \"\").Hidden().StringVar(&cmd.envFile)\n\tclause.Flag(\"var\", \"Define the value for a template variable with `VAR=VALUE`, e.g. --var env=prod\").Short('v').StringMapVar(&cmd.templateVars)\n\tclause.Flag(\"env\", \"The name of the environment prepared by the set command (default is `default`)\").Default(\"default\").Hidden().StringVar(&cmd.env)\n\tclause.Flag(\"no-masking\", \"Disable masking of secrets on stdout and stderr\").BoolVar(&cmd.noMasking)\n\tclause.Flag(\"masking-timeout\", \"The maximum time output is buffered. Warning: lowering this value increases the chance of secrets not being masked.\").Default(\"1s\").DurationVar(&cmd.maskingTimeout)\n\tclause.Flag(\"template-version\", \"The template syntax version to be used. The options are v1, v2, latest or auto to automatically detect the version.\").Default(\"auto\").StringVar(&cmd.templateVersion)\n\tclause.Flag(\"ignore-missing-secrets\", \"Do not return an error when a secret does not exist and use an empty value instead.\").BoolVar(&cmd.ignoreMissingSecrets)\n\tclause.Flag(\"no-prompt\", \"Do not prompt when a template variable is missing and return an error instead.\").BoolVar(&cmd.dontPromptMissingTemplateVar)\n\n\tcommand.BindAction(clause, cmd.Run)\n}", "func Execute() {\n\t// cfg contains tenant related information, e.g. `travel0-dev`,\n\t// `travel0-prod`. some of its information can be sourced via:\n\t// 1. env var (e.g. AUTH0_API_KEY)\n\t// 2. global flag (e.g. --api-key)\n\t// 3. JSON file (e.g. api_key = \"...\" in ~/.config/auth0/config.json)\n\tcli := &cli{\n\t\trenderer: display.NewRenderer(),\n\t\ttracker: analytics.NewTracker(),\n\t}\n\n\trootCmd := buildRootCmd(cli)\n\n\trootCmd.SetUsageTemplate(namespaceUsageTemplate())\n\taddPersistentFlags(rootCmd, cli)\n\taddSubcommands(rootCmd, cli)\n\n\t// TODO(cyx): backport this later on using latest auth0/v5.\n\t// rootCmd.AddCommand(actionsCmd(cli))\n\t// rootCmd.AddCommand(triggersCmd(cli))\n\n\tdefer func() {\n\t\tif v := recover(); v != nil {\n\t\t\terr := fmt.Errorf(\"panic: %v\", v)\n\n\t\t\t// If we're in development mode, we should throw the\n\t\t\t// panic for so we have less surprises. For\n\t\t\t// non-developers, we'll swallow the panics.\n\t\t\tif instrumentation.ReportException(err) {\n\t\t\t\tfmt.Println(panicMessage)\n\t\t\t} else {\n\t\t\t\tpanic(v)\n\t\t\t}\n\t\t}\n\t}()\n\n\t// platform specific terminal initialization:\n\t// this should run for all commands,\n\t// for most of the architectures there's no requirements:\n\tansi.InitConsole()\n\n\tcancelCtx := contextWithCancel()\n\tif err := rootCmd.ExecuteContext(cancelCtx); err != nil {\n\t\tcli.renderer.Heading(\"error\")\n\t\tcli.renderer.Errorf(err.Error())\n\n\t\tinstrumentation.ReportException(err)\n\t\tos.Exit(1)\n\t}\n\n\ttimeoutCtx, cancel := context.WithTimeout(cancelCtx, 3*time.Second)\n\t// defers are executed in LIFO order\n\tdefer cancel()\n\tdefer cli.tracker.Wait(timeoutCtx) // No event should be tracked after this has run, or it will panic e.g. in earlier deferred functions\n}", "func (c *Command) Execute(args []string) error {\n\tlog.Printf(\"[INFO] start server on port %d. Debug mode: %t\", c.Port, c.Dbg)\n\n\tctx, cancel := context.WithCancel(context.Background())\n\tgo func() {\n\t\t// catch signal and invoke graceful termination\n\t\tstop := make(chan os.Signal, 1)\n\t\tsignal.Notify(stop, os.Interrupt, syscall.SIGTERM)\n\t\t<-stop\n\t\tlog.Printf(\"[WARN] interrupt signal\")\n\t\tcancel()\n\t}()\n\n\tserver, err := c.newServer()\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = server.run(ctx)\n\tif err != nil {\n\t\tlog.Printf(\"[ERROR] terminated with error %+v\", err)\n\t\treturn err\n\t}\n\n\tlog.Printf(\"[INFO] terminated\")\n\treturn nil\n}", "func (hh *HealthCheckHandler) Execute(w http.ResponseWriter, r *http.Request) {\n\tuuid := utils.ExtractUUID(r.URL.String())\n\tif uuid == \"\" {\n\t\thttp.Error(w, marshalError(\"invalid uuid\"), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tqueryParams := r.URL.Query()\n\ttimeout, err := time.ParseDuration(queryParams[\"timeout\"][0])\n\tif err != nil {\n\t\thttp.Error(w, marshalError(err.Error()), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\thc, err := hh.db.Get(uuid)\n\tif err != nil {\n\t\thttp.Error(w, marshalError(err.Error()), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t// make a copy and run the healthcheck\n\ttry := &models.HealthCheck{\n\t\tID: hc.ID,\n\t\tEndpoint: hc.Endpoint,\n\t}\n\n\ttry = service.Run(try, timeout)\n\n\tb, err := json.Marshal(try)\n\tif err != nil {\n\t\thttp.Error(w, marshalError(err.Error()), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tw.Write(b)\n}", "func Execute() {\n\tmainCmd.Execute()\n}", "func Execute() {\n\tv := viper.GetViper()\n\tinitViper(v)\n\trootCmd := getRoot(v, ListenUDPAndServe)\n\tif err := rootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n}", "func execute(w io.Writer, commandline string, req io.Reader) error {\n\targv, err := cmd.SplitQuoted(commandline)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// We treat a pipe command specially.\n\t// It will be splitted by the pipe binary.\n\tif strings.HasPrefix(commandline, \"pipe \") {\n\t\targv = []string{\"pipe\", commandline[5:]}\n\t}\n\n\tif len(argv) < 1 {\n\t\treturn fmt.Errorf(\"request contains no command\")\n\t}\n\n\t// Get installation directory of editor binary.\n\t// All subcommands must be in the same directory.\n\tvar installDir string\n\tprogname := os.Args[0]\n\tif p, err := filepath.Abs(progname); err != nil {\n\t\treturn fmt.Errorf(\"cannot get editor directory\")\n\t} else {\n\t\tinstallDir = filepath.Dir(p)\n\t}\n\n\tvar buf bytes.Buffer\n\tvar errbuf bytes.Buffer\n\targv[0] = filepath.Join(installDir, argv[0])\n\tctx, cancel := context.WithCancel(context.Background())\n\tc := exec.CommandContext(ctx, argv[0], argv[1:]...)\n\tc.Stdin = req\n\tc.Stdout = &buf\n\tc.Stderr = &errbuf\n\tif err := c.Start(); err != nil {\n\t\treturn err\n\t}\n\tpid := c.Process.Pid\n\tProcessList.Add(pid, argv, cancel)\n\n\terr = c.Wait()\n\tProcessList.Remove(pid)\n\tio.Copy(w, &buf)\n\n\t// Write stderr of commands to the console.\n\tif errbuf.Len() > 0 {\n\t\tif err != nil {\n\t\t\terrmsg, _ := ioutil.ReadAll(&errbuf)\n\t\t\terr = fmt.Errorf(\"%s\\n%s\\n\", err.Error(), string(errmsg))\n\t\t} else {\n\t\t\tio.Copy(os.Stdout, &errbuf)\n\t\t}\n\t}\n\treturn err\n}", "func (wc *CmdCheckWrapperCommand) Execute(args []string) error {\n\tif len(args) < wc.leastNumArgs {\n\t\terr := fmt.Errorf(\"Invalid arguments.\\nUsage: supervisord ctl %v\", wc.usage)\n\t\tfmt.Printf(\"%v\\n\", err)\n\t\treturn err\n\t}\n\treturn wc.cmd.Execute(args)\n}", "func executor(command string) (string, error) {\n\n\t// first string of command\n\t// entered with a delemiter\n\t// of space to check whether\n\t// command issues is a space\n\t// or a database command\n\tfirstTerm := extractFirstTerm(command)\n\n\t// if firstTerm matches a\n\t// spaceCommand then the\n\t// spaceExecutor is called\n\tif stringInSlice(firstTerm, spaceCommands) {\n\t\treturn spaceExecutor(command)\n\t}\n\n\t// else the databaseExecutor\n\t// command is called since\n\t// the first term would've\n\t// been the name of the database\n\treturn databaseExecutor(command)\n\n}", "func execute(fhandler *flowHandler, request []byte) ([]byte, error) {\n\tvar result []byte\n\tvar err error\n\n\tpipeline := fhandler.getPipeline()\n\n\tcurrentNode, _ := pipeline.GetCurrentNodeDag()\n\n\t// trace node - mark as start of node\n\tfhandler.tracer.startNodeSpan(currentNode.GetUniqueId(), fhandler.id)\n\n\t// Execute all operation\n\tfor _, operation := range currentNode.Operations() {\n\n\t\tswitch {\n\t\t// If function\n\t\tcase operation.Function != \"\":\n\t\t\tfmt.Printf(\"[Request `%s`] Executing function `%s`\\n\",\n\t\t\t\tfhandler.id, operation.Function)\n\t\t\tif result == nil {\n\t\t\t\tresult, err = executeFunction(pipeline, operation, request)\n\t\t\t} else {\n\t\t\t\tresult, err = executeFunction(pipeline, operation, result)\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\terr = fmt.Errorf(\"Node(%s), Function(%s), error: function execution failed, %v\",\n\t\t\t\t\tcurrentNode.GetUniqueId(), operation.Function, err)\n\t\t\t\tif operation.FailureHandler != nil {\n\t\t\t\t\terr = operation.FailureHandler(err)\n\t\t\t\t}\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t}\n\t\t// If callback\n\t\tcase operation.CallbackUrl != \"\":\n\t\t\tfmt.Printf(\"[Request `%s`] Executing callback `%s`\\n\",\n\t\t\t\tfhandler.id, operation.CallbackUrl)\n\t\t\tif result == nil {\n\t\t\t\terr = executeCallback(pipeline, operation, request)\n\t\t\t} else {\n\t\t\t\terr = executeCallback(pipeline, operation, result)\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\terr = fmt.Errorf(\"Node(%s), Callback(%s), error: callback failed, %v\",\n\t\t\t\t\tcurrentNode.GetUniqueId(), operation.CallbackUrl, err)\n\t\t\t\tif operation.FailureHandler != nil {\n\t\t\t\t\terr = operation.FailureHandler(err)\n\t\t\t\t}\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t}\n\n\t\t// If modifier\n\t\tdefault:\n\t\t\tfmt.Printf(\"[Request `%s`] Executing modifier\\n\", fhandler.id)\n\t\t\tif result == nil {\n\t\t\t\tresult, err = operation.Mod(request)\n\t\t\t} else {\n\t\t\t\tresult, err = operation.Mod(result)\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\terr = fmt.Errorf(\"Node(%s), error: Failed at modifier, %v\",\n\t\t\t\t\tcurrentNode.GetUniqueId(), err)\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tif result == nil {\n\t\t\t\tresult = []byte(\"\")\n\t\t\t}\n\t\t}\n\t}\n\n\tfmt.Printf(\"[Request `%s`] Completed execution of Node %s\\n\", fhandler.id, currentNode.GetUniqueId())\n\n\treturn result, nil\n}", "func Execute() {\n\tcmd := arrangeCommands()\n\n\tif err := cmd.Execute(); err != nil {\n\t\tlog.LogError(err)\n\t\tos.Exit(1)\n\t}\n}", "func (controller EditController) Execute() (string, error) {\r\n commander := dishwasher.NewDishwasher()\r\n config, oops := LoadConfig(controller.Source)\r\n\r\n if oops != nil {\r\n return \"\", oops\r\n } else if config.GetEditor() == \"\" {\r\n return \"\", errors.New(\"Editor not set in configuration file\")\r\n }\r\n\r\n if controller.Source != \".\" {\r\n controller.Source = \"src/\" + controller.Source\r\n }\r\n\r\n commander.RunCustomCommand(config.GetEditor() + \" \" + controller.Source)\r\n return commander.Execute()\r\n}", "func (h *Howdoi) Execute() {\n\tflag.Parse()\n\n\tif h.ShowHelp {\n\t\tfmt.Println(help)\n\t\tos.Exit(0)\n\t}\n\n\tif h.ShowVersion {\n\t\tfmt.Println(version)\n\t\tos.Exit(0)\n\t}\n\n\t// position must be > 0\n\tif h.Position == 0 {\n\t\th.Position = 1\n\t}\n\n\terr := h.sanitizeQuestion(flag.Args())\n\tif err != nil {\n\t\tfmt.Println(help)\n\t\tos.Exit(1)\n\t}\n\n\tlinks, err := h.getLinks()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tanswer, err := h.getAnswer(links)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfmt.Println(answer)\n}", "func (a *App) HandleRun(w http.ResponseWriter, r *http.Request) {\n\n\t// Get variables from the request\n\tvars := mux.Vars(r)\n\tvar variables RequestVariable\n\terr := variables.GetVariablesFromRequestVars(vars)\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t// Check if the secret we passed in is valid, otherwise, return error 400\n\tif !a.Secret.Valid(variables.Secret) {\n\t\ta.DmnLogFile.Log.Println(\"Bad secret!\")\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tabortcmd := func(reason string) {\n\t\ta.DmnLogFile.Log.Println(reason)\n\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tvar sc ScheduledCommand\n\t\tsc.Status = Failed\n\t\tsc.Coutput = reason\n\t\tout, _ := json.Marshal(sc)\n\t\tio.WriteString(w, string(out))\n\t}\n\n\t// Select the dmn.Command, otherwise, if the dmn.Command hash cannot be found, return error 400\n\tselectedCmd, cerr := a.SelectCmd(variables.CmdHash)\n\n\tif cerr != nil {\n\t\tabortcmd(\"Unable to select hash: \" + variables.CmdHash)\n\t\treturn\n\t}\n\n\t// if selectedCmd.CmdHash == \"\" {\n\t// \tabortcmd(\"Invalid hash\")\n\t// \treturn\n\t// }\n\n\t_, err = os.Stat(selectedCmd.WorkingDirectory)\n\tif os.IsNotExist(err) {\n\t\tabortcmd(\"Invalid working directory: \" + selectedCmd.WorkingDirectory)\n\t\treturn\n\t}\n\n\ta.DmnLogFile.Log.Printf(\"Scheduling command %v: %v\\n\", selectedCmd.CmdHash, selectedCmd.Status)\n\tselectedCmd.Status = Scheduled\n\ta.CommandScheduler.QueuedCommands = append(a.CommandScheduler.QueuedCommands, selectedCmd)\n\ta.CommandScheduler.CommandQueue <- selectedCmd\n\n\ta.DmnLogFile.Log.Printf(\"Completed command %v: %v\\n\", selectedCmd.CmdHash, selectedCmd.Status)\n\n\tcompletedCommand := <-a.CommandScheduler.CompletedQueue\n\n\ta.DmnLogFile.Log.Printf(\"Command received from CompletedQueue: %v: %v\\n\", completedCommand.CmdHash, selectedCmd.Status)\n\n\ta.UpdateCommandDuration(selectedCmd, completedCommand.Duration)\n\n\tfor index, cmd := range a.CommandScheduler.QueuedCommands {\n\t\tif cmd.CmdHash == selectedCmd.CmdHash {\n\t\t\ta.DmnLogFile.Log.Printf(\"Updating status for %v: %v\\n\", cmd.CmdHash, Completed)\n\t\t\ta.CommandScheduler.QueuedCommands[index].Status = Completed\n\t\t\tbreak\n\t\t}\n\t}\n\n\ta.DmnLogFile.Log.Printf(\"Vacuuming command %v\\n\", selectedCmd.CmdHash)\n\ta.CommandScheduler.VacuumQueue <- selectedCmd\n\n\tout, _ := json.Marshal(completedCommand)\n\tio.WriteString(w, string(out))\n}", "func (mcr *MiddlewareClusterRepo) Execute(command string, args ...interface{}) (middleware.Result, error) {\n\tconn, err := mcr.Database.Get()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer func() {\n\t\terr = conn.Close()\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"metadata MiddlewareClusterRepo.Execute(): close database connection failed.\\n%s\", err.Error())\n\t\t}\n\t}()\n\n\treturn conn.Execute(command, args...)\n}", "func (err *ErrBytesSent) Execute() error {\n\treturn executeCommand(err.config)\n}", "func Execute(args []string) (err error) {\n\treturn Cmd.Execute(args)\n}", "func (c *Authorize) Execute(\n\tctx context.Context,\n) error {\n\terr := cli.CheckEnvWarp(ctx)\n\tif err != nil {\n\t\treturn errors.Trace(err)\n\t}\n\n\tresult, err := cli.RunLocalCommand(ctx, warp.Command{\n\t\tType: warp.CmdTpState,\n\t\tArgs: []string{},\n\t})\n\tif err != nil {\n\t\treturn errors.Trace(err)\n\t}\n\n\tif result.Disconnected {\n\t\treturn errors.Trace(\n\t\t\terrors.Newf(\n\t\t\t\t\"The warp is currently disconnected. No client has access to \" +\n\t\t\t\t\t\"it and all previously authorized users will be revoked \" +\n\t\t\t\t\t\"upon reconnection.\",\n\t\t\t),\n\t\t)\n\t}\n\n\tusername := \"\"\n\tuser := \"\"\n\targs := []string{}\n\tmatches := 0\n\tfor _, u := range result.SessionState.Users {\n\t\tif !u.Hosting {\n\t\t\tif u.Username == c.usernameOrToken ||\n\t\t\t\tu.Token == c.usernameOrToken {\n\t\t\t\tmatches += 1\n\t\t\t\targs = append(args, u.Token)\n\t\t\t\tusername = u.Username\n\t\t\t\tuser = u.Token\n\t\t\t}\n\t\t}\n\t}\n\n\tif matches == 0 {\n\t\treturn errors.Trace(\n\t\t\terrors.Newf(\n\t\t\t\t\"Username or token not found: %s. Use `warp state` to \"+\n\t\t\t\t\t\"retrieve a list of currently connected warp clients.\",\n\t\t\t\tc.usernameOrToken,\n\t\t\t),\n\t\t)\n\t} else if matches > 1 {\n\t\treturn errors.Trace(\n\t\t\terrors.Newf(\n\t\t\t\t\"Username ambiguous, please provide a user token instead. \" +\n\t\t\t\t\t\"Warp clients user tokens can be retrieved with \" +\n\t\t\t\t\t\"`warp state`.\",\n\t\t\t),\n\t\t)\n\t}\n\n\tout.Normf(\"You are about to authorize the following user to write to \")\n\tout.Valuf(\"%s\\n\", os.Getenv(warp.EnvWarp))\n\tout.Normf(\" ID: \")\n\tout.Boldf(\"%s\", user)\n\tout.Normf(\" Username: \")\n\tout.Valuf(\"%s\\n\", username)\n\tout.Normf(\"Are you sure this is who you think this is? [Y/n]: \")\n\n\treader := bufio.NewReader(os.Stdin)\n\tconfirmation, _ := reader.ReadString('\\n')\n\tconfirmation = strings.TrimSpace(confirmation)\n\n\tif confirmation != \"\" && confirmation != \"Y\" && confirmation != \"y\" {\n\t\treturn errors.Trace(\n\t\t\terrors.Newf(\"Authorizxation aborted by user.\"),\n\t\t)\n\t}\n\tresult, err = cli.RunLocalCommand(ctx, warp.Command{\n\t\tType: warp.CmdTpAuthorize,\n\t\tArgs: args,\n\t})\n\tif err != nil {\n\t\treturn errors.Trace(err)\n\t}\n\n\tout.Normf(\"\\n\")\n\tout.Normf(\"Done! You can revoke authorizations at any time with \")\n\tout.Boldf(\"warp revoke\\n\")\n\tout.Normf(\"\\n\")\n\n\tPrintSessionState(ctx, result.Disconnected, result.SessionState)\n\n\treturn nil\n}", "func Execute() {\n\tAddCommands()\n\tIpvanishCmd.Execute()\n\t//\tutils.StopOnErr(IpvanishCmd.Execute())\n}", "func (m *mware) Exec(ctx Context, header []byte, n interface{}) (interface{}, error) {\n\tm.mu.Lock()\n\tdefer m.mu.Unlock()\n\n\tif m.scanner == nil {\n\t\tm.scanner = bufio.NewScanner(m.stdout)\n\t\tctx.Logger.Debug().Msg(\"scanner created\")\n\t}\n\n\tif len(header) > 0 {\n\t\tm.stdin.Write(header)\n\t\tm.stdin.Write([]byte{' '})\n\t}\n\n\terr := json.NewEncoder(m.stdin).Encode(n)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to encode: %w\", err)\n\t}\n\n\tctx.Logger.Debug().Msgf(\"successfully encoded to stdin with header %v\", header)\n\n\tif m.scanner.Scan() {\n\t\tctx.Logger.Debug().Msg(\"scanner value received\")\n\n\t\tif err := json.Unmarshal(m.scanner.Bytes(), n); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to unmarshal: %w\", err)\n\t\t}\n\n\t\tctx.Logger.Debug().Msg(\"successfully unmarshaled scanner value\")\n\t\treturn n, nil\n\t}\n\n\treturn nil, m.scanner.Err()\n}", "func Command() func(w http.ResponseWriter, r *http.Request) {\n\tschema := getJSONValidator(&commandSchemaStruct{})\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tbody, err := ioutil.ReadAll(r.Body)\n\t\tif err != nil {\n\t\t\tjsonResponse(w, \"fail\", map[string]interface{}{\n\t\t\t\t\"message\": err.Error(),\n\t\t\t})\n\t\t\treturn\n\t\t}\n\t\terr = validatePOSTRequest(body, schema)\n\t\tif err != nil {\n\t\t\tjsonResponse(w, \"fail\", map[string]interface{}{\n\t\t\t\t\"message\": err.Error(),\n\t\t\t})\n\t\t\treturn\n\t\t}\n\t\tvar responseStruct = &commandSchemaStruct{}\n\t\tjson.Unmarshal(body, responseStruct)\n\t\tif err != nil {\n\t\t\tjsonResponse(w, \"error\", map[string]interface{}{\n\t\t\t\t\"message\": err.Error(),\n\t\t\t})\n\t\t}\n\t\t// the json schema should ensure that these are the only possibilities\n\t\tif responseStruct.Command == \"start\" {\n\t\t\terr = container.ExecuteCode()\n\t\t\tif err != nil {\n\t\t\t\tjsonResponse(w, \"error\", map[string]interface{}{\n\t\t\t\t\t\"message\": err.Error(),\n\t\t\t\t})\n\t\t\t\treturn\n\t\t\t}\n\t\t\tjsonResponse(w, \"success\", map[string]interface{}{\n\t\t\t\t\"message\": \"code started\",\n\t\t\t})\n\t\t\treturn\n\t\t} else if responseStruct.Command == \"kill\" {\n\t\t\terr = container.KillCode()\n\t\t\tif err != nil {\n\t\t\t\tjsonResponse(w, \"error\", map[string]interface{}{\n\t\t\t\t\t\"message\": err.Error(),\n\t\t\t\t})\n\t\t\t\treturn\n\t\t\t}\n\t\t\tjsonResponse(w, \"success\", map[string]interface{}{\n\t\t\t\t\"message\": \"code killed\",\n\t\t\t})\n\t\t\treturn\n\t\t} else {\n\t\t\tjsonResponse(w, \"error\", map[string]interface{}{\n\t\t\t\t\"message\": \"need either kill or start\",\n\t\t\t})\n\t\t\treturn\n\t\t}\n\t}\n}", "func SlashCommandHandler(bots map[string][]robots.Robot) func(w http.ResponseWriter, r *http.Request) {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\terr := r.ParseForm()\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t\td := schema.NewDecoder()\n\t\tcommand := new(robots.SlashCommand)\n\t\terr = d.Decode(command, r.PostForm)\n\t\tif err != nil {\n\t\t\tlog.Println(\"Couldn't parse post request:\", err)\n\t\t}\n\t\tif command.Command == \"\" || command.Token == \"\" {\n\t\t\tlog.Printf(\"[DEBUG] Ignoring request from unidentified source: %s - %s\", command.Token, r.Host)\n\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t\tcommand.Robot = command.Command[1:]\n\n\t\tif token := os.Getenv(fmt.Sprintf(\"%s_SLACK_TOKEN\", strings.ToUpper(command.Robot))); token != \"\" && token != command.Token {\n\t\t\tlog.Printf(\"[DEBUG] Ignoring request from unidentified source: %s - %s\", command.Token, r.Host)\n\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t}\n\t\trobots := getRobots(bots, command.Robot)\n\t\tif len(robots) == 0 {\n\t\t\tplainResp(w, \"No robot for that command yet :(\")\n\t\t\treturn\n\t\t}\n\t\tresp := \"\"\n\t\tfor _, robot := range robots {\n\t\t\tresp += fmt.Sprintf(\"\\n%s\", robot.Run(&command.Payload))\n\t\t}\n\t\tplainResp(w, strings.TrimSpace(resp))\n\t}\n}", "func Execute(ctx context.Context, query string, vars map[string]interface{}) (map[string]*json.RawMessage, error) {\n\tmediaQuery := graphql.NewRequest(query)\n\tfor k, v := range vars {\n\t\tmediaQuery.Var(k, v)\n\t}\n\n\tvar res map[string]*json.RawMessage\n\tif err := client.Run(ctx, mediaQuery, &res); err != nil {\n\t\treturn map[string]*json.RawMessage{}, err\n\t}\n\treturn res, nil\n}", "func (tpl *Template) Execute(wr io.Writer, data interface{}) error {\n\treturn tpl.Run(wr, data)\n}", "func (x *CtlCommand) Execute(args []string) error {\n\tif len(args) == 0 {\n\t\treturn nil\n\t}\n\n\trpcc := x.createRPCClient()\n\tverb := args[0]\n\n\tswitch verb {\n\n\t////////////////////////////////////////////////////////////////////////////////\n\t// STATUS\n\t////////////////////////////////////////////////////////////////////////////////\n\tcase \"status\":\n\t\tx.status(rpcc, args[1:])\n\n\t\t////////////////////////////////////////////////////////////////////////////////\n\t\t// START or STOP\n\t\t////////////////////////////////////////////////////////////////////////////////\n\tcase \"start\", \"stop\":\n\t\tx.startStopProcesses(rpcc, verb, args[1:])\n\n\t\t////////////////////////////////////////////////////////////////////////////////\n\t\t// SHUTDOWN\n\t\t////////////////////////////////////////////////////////////////////////////////\n\tcase \"shutdown\":\n\t\tx.shutdown(rpcc)\n\tcase \"reload\":\n\t\tx.reload(rpcc)\n\tcase \"signal\":\n\t\tsigName, processes := args[1], args[2:]\n\t\tx.signal(rpcc, sigName, processes)\n\tcase \"pid\":\n\t\tx.getPid(rpcc, args[1])\n\tdefault:\n\t\tfmt.Println(\"unknown command\")\n\t}\n\n\treturn nil\n}", "func Execute(vers string, writeKey string) {\n\tchamberVersion = vers\n\n\tanalyticsWriteKey = writeKey\n\tanalyticsEnabled = analyticsWriteKey != \"\"\n\n\tif cmd, err := RootCmd.ExecuteC(); err != nil {\n\t\tif strings.Contains(err.Error(), \"arg(s)\") || strings.Contains(err.Error(), \"usage\") {\n\t\t\tcmd.Usage()\n\t\t}\n\t\tos.Exit(1)\n\t}\n}", "func (t *Template) Execute(w io.Writer, data interface{}) error {\n\t// TODO\n\treturn nil\n}", "func UseMiddleware(exec Executor, middleware ...ExecutorMiddleware) Executor {\n\t// Apply in reverse order.\n\tfor i := len(middleware) - 1; i >= 0; i-- {\n\t\tm := middleware[i]\n\t\texec = m(exec)\n\t}\n\treturn exec\n}", "func Execute() {\r\n\r\n\t// Create a database connection (Don't require DB for now)\r\n\tif err := database.Connect(applicationName); err != nil {\r\n\t\tchalker.Log(chalker.ERROR, fmt.Sprintf(\"Error connecting to database: %s\", err.Error()))\r\n\t} else {\r\n\t\t// Set this flag for caching detection\r\n\t\tdatabaseEnabled = true\r\n\r\n\t\t// Defer the database disconnection\r\n\t\tdefer func() {\r\n\t\t\tdbErr := database.GarbageCollection()\r\n\t\t\tif dbErr != nil {\r\n\t\t\t\tchalker.Log(chalker.ERROR, fmt.Sprintf(\"Error in database GarbageCollection: %s\", dbErr.Error()))\r\n\t\t\t}\r\n\r\n\t\t\tif dbErr = database.Disconnect(); dbErr != nil {\r\n\t\t\t\tchalker.Log(chalker.ERROR, fmt.Sprintf(\"Error in database Disconnect: %s\", dbErr.Error()))\r\n\t\t\t}\r\n\t\t}()\r\n\t}\r\n\r\n\t// Run root command\r\n\ter(rootCmd.Execute())\r\n\r\n\t// Generate documentation from all commands\r\n\tif generateDocs {\r\n\t\tgenerateDocumentation()\r\n\t}\r\n\r\n\t// Flush cache?\r\n\tif flushCache && databaseEnabled {\r\n\t\tif dbErr := database.Flush(); dbErr != nil {\r\n\t\t\tchalker.Log(chalker.ERROR, fmt.Sprintf(\"Error in database Flush: %s\", dbErr.Error()))\r\n\t\t} else {\r\n\t\t\tchalker.Log(chalker.SUCCESS, \"Successfully flushed the local database cache\")\r\n\t\t}\r\n\t}\r\n}", "func (self *Controller) ExecuteCommand(notification interfaces.INotification) {\n\tself.commandMapMutex.RLock()\n\tdefer self.commandMapMutex.RUnlock()\n\n\tvar commandFunc = self.commandMap[notification.Name()]\n\tif commandFunc == nil {\n\t\treturn\n\t}\n\tcommandInstance := commandFunc()\n\tcommandInstance.InitializeNotifier(self.Key)\n\tcommandInstance.Execute(notification)\n}", "func (h *PipelineManager) Handle(ctx context.Context, instruction Instruction) error {\n\thf := wrapMiddleware(h.handle, h.mids...)\n\treturn hf(ctx, instruction)\n}", "func (command *Command) trigger(ctx *Ctx) {\n\t// Check if the first argument matches a sub command\n\tif len(ctx.Arguments.arguments) > 0 {\n\t\targument := ctx.Arguments.Get(0).Raw()\n\t\tsubCommand := command.GetSubCmd(argument)\n\t\tif subCommand != nil {\n\t\t\t// Define the arguments for the sub command\n\t\t\targuments := ParseArguments(\"\")\n\t\t\tif ctx.Arguments.Amount() > 1 {\n\t\t\t\targuments = ParseArguments(strings.Join(strings.Split(ctx.Arguments.Raw(), \" \")[1:], \" \"))\n\t\t\t}\n\n\t\t\t// Trigger the sub command\n\t\t\tsubCommand.trigger(&Ctx{\n\t\t\t\tSession: ctx.Session,\n\t\t\t\tEvent: ctx.Event,\n\t\t\t\tArguments: arguments,\n\t\t\t\tCustomObjects: ctx.CustomObjects,\n\t\t\t\tRouter: ctx.Router,\n\t\t\t\tCommand: subCommand,\n\t\t\t})\n\t\t\treturn\n\t\t}\n\t}\n\n\t// Check if the user is being rate limited\n\tif command.RateLimiter != nil && !command.RateLimiter.NotifyExecution(ctx) {\n\t\treturn\n\t}\n\n\t// Run all middlewares assigned to this command\n\tfor _, flag := range command.Flags {\n\t\tfor _, middleware := range ctx.Router.Middlewares[flag] {\n\t\t\tif !middleware(ctx) {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\n\t// Handle this command if the first argument matched no sub command\n\tcommand.Handler(ctx)\n}", "func execute() {\n\terr := rootCmd.Execute()\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(0)\n\t}\n\n\t// viper.Debug()\n\n\tif !runSrv {\n\t\tos.Exit(0)\n\t}\n}", "func Execute(w io.Writer, fmt string, data interface{}, opt ...Option) error {\n\tvar funcs map[string]interface{}\n\tfor _, o := range opt {\n\t\tswitch o := o.(type) {\n\t\tcase optFuncMap:\n\t\t\tfuncs = o\n\t\t}\n\t}\n\tt, err := parseTemplate(fmt, funcs)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif ctx, ok := data.(*Context); ok {\n\t\tsort.Strings(ctx.Meta.Tags)\n\t}\n\treturn t.Execute(w, data)\n}", "func (h *FileHandler) Execute(ctx context.Context, args ...string) error {\n\tcmd := exec.CommandContext(ctx, h.file.Name(), args...)\n\tcmd.Env = os.Environ()\n\tcmd.Stdout = os.Stderr\n\tcmd.Stderr = os.Stderr\n\treturn cmd.Run()\n}" ]
[ "0.6153839", "0.60812134", "0.6061275", "0.57693285", "0.56747967", "0.5673223", "0.56219643", "0.562186", "0.5618159", "0.55443805", "0.55078334", "0.5487502", "0.54630804", "0.54364175", "0.5420066", "0.54042244", "0.5403575", "0.54031974", "0.5371576", "0.53653485", "0.5364604", "0.53287834", "0.5316462", "0.5314177", "0.5313698", "0.5285926", "0.5285674", "0.5278307", "0.52646625", "0.5243986", "0.5236011", "0.52292675", "0.5228697", "0.5217903", "0.5211341", "0.5209491", "0.52064544", "0.5203705", "0.51970863", "0.51923037", "0.5184854", "0.5178207", "0.51779795", "0.51763475", "0.51550287", "0.5150796", "0.5138971", "0.5132183", "0.51213354", "0.5116588", "0.5114947", "0.5104314", "0.5104135", "0.5102827", "0.50986683", "0.509414", "0.509401", "0.5091595", "0.508704", "0.508703", "0.50847095", "0.50837266", "0.5083682", "0.5083517", "0.508316", "0.5079821", "0.50741357", "0.50714386", "0.50683296", "0.50676835", "0.50669456", "0.5066822", "0.506503", "0.5051836", "0.5051122", "0.50475454", "0.5042634", "0.50407815", "0.5037203", "0.5035098", "0.5034904", "0.50343233", "0.5032897", "0.50326586", "0.5029456", "0.5026298", "0.5025816", "0.5025786", "0.5024676", "0.5022554", "0.5018112", "0.5016576", "0.5014413", "0.50137204", "0.5009731", "0.5001245", "0.49949968", "0.49926773", "0.49852803", "0.49852407" ]
0.5757006
4
Transaction returns a middleware.Transaction that could execute multiple commands as a transaction
func (r *Repository) Transaction() (middleware.Transaction, error) { return r.Database.Transaction() }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Transaction(next http.Handler) http.Handler {\n\tfn := func(w http.ResponseWriter, r *http.Request) {\n\t\tt, ctx := orm.NewTransaction(r.Context())\n\t\tdefer func() {\n\t\t\tif rec := recover(); rec != nil {\n\t\t\t\tt.Rollback()\n\t\t\t\t// Panic to let recoverer handle 500\n\t\t\t\tpanic(rec)\n\t\t\t} else {\n\t\t\t\terr := t.Commit()\n\t\t\t\tif err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t\tnext.ServeHTTP(w, r.WithContext(ctx))\n\t}\n\treturn http.HandlerFunc(fn)\n}", "func (mcr *MiddlewareClusterRepo) Transaction() (middleware.Transaction, error) {\n\treturn mcr.Database.Transaction()\n}", "func (s *Session) Transaction(f func(*Session) (interface{}, error)) (interface{}, error) {\n\terr := s.Begin()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\td, err := f(s)\n\tif err != nil {\n\t\ts.RollBack()\n\t} else {\n\t\ts.Commit()\n\t}\n\treturn d, err\n}", "func (s *SessionStore) Transaction(callback func(*SessionStore) error) error {\n\tif callback == nil {\n\t\treturn kallax.ErrInvalidTxCallback\n\t}\n\n\treturn s.Store.Transaction(func(store *kallax.Store) error {\n\t\treturn callback(&SessionStore{store})\n\t})\n}", "func (db *DB) Transaction(fc func(db *DB) error) (err error) {\n\tpanicked := true\n\ttx := &DB{db.DB.Begin()}\n\n\tdefer func() {\n\t\t// Make sure to rollback when panic, Block error or Commit error\n\t\tif panicked || err != nil {\n\t\t\ttx.Rollback()\n\t\t}\n\t}()\n\n\terr = fc(tx)\n\tif err == nil {\n\t\terr = tx.DB.Commit().Error\n\t}\n\tpanicked = false\n\treturn\n}", "func TransactionMiddleware() middleware.TransactionMiddleware {\n\tdb := gormer.GetDB()\n\ttxnDataSQL := gormrepo.NewTxnDataSQL(db)\n\ttransactionMiddleware := middleware.NewTransactionMiddleware(txnDataSQL)\n\treturn transactionMiddleware\n}", "func (s *PetStore) Transaction(callback func(*PetStore) error) error {\n\tif callback == nil {\n\t\treturn kallax.ErrInvalidTxCallback\n\t}\n\n\treturn s.Store.Transaction(func(store *kallax.Store) error {\n\t\treturn callback(&PetStore{store})\n\t})\n}", "func (conn Connection) Transaction() (datastore.Transact, error) {\n\tif conn.Config.DbType == config.PGDriver {\n\t\treturn conn.SQLDriver.Transaction()\n\t}\n\treturn conn.Controller.Transaction()\n}", "func (tx *tX) Transaction() (*tX, error) {\n\treturn tx, nil\n}", "func (c *Connection) Transaction(fn func(*Connection) error) error {\n\td := c.C.Begin()\n\tif err := fn(&Connection{C: d, log: c.log}); err != nil {\n\t\td.Rollback()\n\t\treturn err\n\t}\n\td.Commit()\n\treturn nil\n}", "func NewTransaction(commands []common.Command) Transaction {\n\tT := Transaction{\n\t\tTransactionID: xid.New(),\n\t\tcommandsInTransaction: commands,\n\t\tTransactionState: Queued,\n\t\tCurrentComand: \"None in excecution yet.\",\n\t}\n\n\tfor _, command := range commands {\n\t\tT.TransactionQueries = append(T.TransactionQueries, command.String())\n\t}\n\n\treturn T\n}", "func Transaction(ctx context.Context, db *sql.DB, f func(tx *sql.Tx) error) (err error) {\n\tfinish := func(tx *sql.Tx) {\n\t\tif err != nil {\n\t\t\tif err2 := tx.Rollback(); err2 != nil {\n\t\t\t\terr = multierror.Append(err, err2)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\terr = tx.Commit()\n\t}\n\n\tspan, ctx := opentracing.StartSpanFromContext(ctx, \"Transaction\")\n\tdefer func() {\n\t\tif err != nil {\n\t\t\text.Error.Set(span, true)\n\t\t\tspan.SetTag(\"err\", err.Error())\n\t\t}\n\t\tspan.Finish()\n\t}()\n\n\ttx, err := db.BeginTx(ctx, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer finish(tx)\n\treturn f(tx)\n}", "func (dao *PagesDao) Transaction(ctx context.Context, f func(ctx context.Context, tx *gdb.TX) error) (err error) {\n\treturn dao.Ctx(ctx).Transaction(ctx, f)\n}", "func (dao *ConfigAuditProcessDao) Transaction(ctx context.Context, f func(ctx context.Context, tx *gdb.TX) error) (err error) {\n\treturn dao.Ctx(ctx).Transaction(ctx, f)\n}", "func (dao *InfoDao) Transaction(ctx context.Context, f func(ctx context.Context, tx *gdb.TX) error) (err error) {\n\treturn dao.Ctx(ctx).Transaction(ctx, f)\n}", "func (d DB) Transaction(f func(DB) error) error {\n\tif _, ok := d.dbProxy.(*sql.Tx); ok {\n\t\t// Already in a nested transaction\n\t\treturn f(d)\n\t}\n\n\ttx, err := d.dbProxy.(*sql.DB).Begin()\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = f(DB{\n\t\tdbProxy: tx,\n\t\tStatementBuilderType: statementBuilder(tx),\n\t})\n\tif err != nil {\n\t\t// Rollback error is ignored as we already have one in progress\n\t\tif err2 := tx.Rollback(); err2 != nil {\n\t\t\tlevel.Warn(util_log.Logger).Log(\"msg\", \"transaction rollback error (ignored)\", \"err\", err2)\n\t\t}\n\t\treturn err\n\t}\n\treturn tx.Commit()\n}", "func (s *PollStore) Transaction(callback func(*PollStore) error) error {\n\tif callback == nil {\n\t\treturn kallax.ErrInvalidTxCallback\n\t}\n\n\treturn s.Store.Transaction(func(store *kallax.Store) error {\n\t\treturn callback(&PollStore{store})\n\t})\n}", "func (dao *SysConfigDao) Transaction(ctx context.Context, f func(ctx context.Context, tx gdb.TX) error) (err error) {\n\treturn dao.Ctx(ctx).Transaction(ctx, f)\n}", "func (s *Session) Transaction() *Transaction {\n\t// acquire lock\n\ts.mutex.Lock()\n\tdefer s.mutex.Unlock()\n\n\treturn s.txn\n}", "func (s *UserStore) Transaction(callback func(*UserStore) error) error {\n\tif callback == nil {\n\t\treturn kallax.ErrInvalidTxCallback\n\t}\n\n\treturn s.Store.Transaction(func(store *kallax.Store) error {\n\t\treturn callback(&UserStore{store})\n\t})\n}", "func (s *PollVoteStore) Transaction(callback func(*PollVoteStore) error) error {\n\tif callback == nil {\n\t\treturn kallax.ErrInvalidTxCallback\n\t}\n\n\treturn s.Store.Transaction(func(store *kallax.Store) error {\n\t\treturn callback(&PollVoteStore{store})\n\t})\n}", "func (s *PersonStore) Transaction(callback func(*PersonStore) error) error {\n\tif callback == nil {\n\t\treturn kallax.ErrInvalidTxCallback\n\t}\n\n\treturn s.Store.Transaction(func(store *kallax.Store) error {\n\t\treturn callback(&PersonStore{store})\n\t})\n}", "func (s *PollOptionStore) Transaction(callback func(*PollOptionStore) error) error {\n\tif callback == nil {\n\t\treturn kallax.ErrInvalidTxCallback\n\t}\n\n\treturn s.Store.Transaction(func(store *kallax.Store) error {\n\t\treturn callback(&PollOptionStore{store})\n\t})\n}", "func (c *Conn) Transaction(fn func(*Conn) error) error {\r\n\tvar (\r\n\t\ttx = c.Begin()\r\n\t\tconn = &Conn{}\r\n\t)\r\n\tcopier.Copy(conn, c)\r\n\tconn.DB = tx\r\n\tif err := fn(conn); err != nil {\r\n\t\ttx.Rollback()\r\n\t\treturn err\r\n\t}\r\n\ttx.Commit()\r\n\treturn nil\r\n}", "func (m Middleware) Tx(db *sql.DB) TxFunc {\n\treturn func(f func(tx daos.Transaction, w http.ResponseWriter, r *http.Request) error) http.HandlerFunc {\n\t\treturn func(w http.ResponseWriter, r *http.Request) {\n\n\t\t\tt, err := db.Begin()\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\n\t\t\tdefer func() {\n\t\t\t\tl := m.log.WithRequest(r)\n\t\t\t\tif p := recover(); p != nil {\n\t\t\t\t\tt.Rollback()\n\t\t\t\t\tl.Info(\"transaction rollbacked\")\n\t\t\t\t\tpanic(p)\n\t\t\t\t} else if err != nil {\n\t\t\t\t\tt.Rollback()\n\t\t\t\t\tl.Info(\"transaction rollbacked\")\n\t\t\t\t\tpanic(err)\n\t\t\t\t} else {\n\t\t\t\t\terr = t.Commit()\n\t\t\t\t\tl.Info(\"transaction commited\")\n\t\t\t\t}\n\t\t\t}()\n\n\t\t\terr = f(t, w, r)\n\t\t}\n\t}\n}", "func (ingest *Ingestion) Transaction(\n\tid int64,\n\ttx *core.Transaction,\n\tfee *core.TransactionFee,\n) error {\n\n\tsql := ingest.transactionInsertBuilder(id, tx, fee)\n\t_, err := ingest.DB.Exec(sql)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func Transaction(db *sql.DB, fns ...func(DB) error) error {\n\ttx, err := db.Begin()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, fn := range fns {\n\t\terr := fn(tx)\n\t\tif err != nil {\n\t\t\ttx.Rollback()\n\t\t\treturn err\n\t\t}\n\t}\n\n\terr = tx.Commit()\n\terr = interpretScanError(err)\n\treturn err\n}", "func (c *Conn) Transaction(t TransactionType, f func(c *Conn) error) error {\n\tvar err error\n\tif c.nTransaction == 0 {\n\t\terr = c.BeginTransaction(t)\n\t} else {\n\t\terr = c.Savepoint(strconv.Itoa(int(c.nTransaction)))\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\tc.nTransaction++\n\tdefer func() {\n\t\tc.nTransaction--\n\t\tif err != nil {\n\t\t\t_, ko := err.(*ConnError)\n\t\t\tif c.nTransaction == 0 || ko {\n\t\t\t\t_ = c.Rollback()\n\t\t\t} else {\n\t\t\t\tif rerr := c.RollbackSavepoint(strconv.Itoa(int(c.nTransaction))); rerr != nil {\n\t\t\t\t\tLog(-1, rerr.Error())\n\t\t\t\t} else if rerr := c.ReleaseSavepoint(strconv.Itoa(int(c.nTransaction))); rerr != nil {\n\t\t\t\t\tLog(-1, rerr.Error())\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tif c.nTransaction == 0 {\n\t\t\t\terr = c.Commit()\n\t\t\t} else {\n\t\t\t\terr = c.ReleaseSavepoint(strconv.Itoa(int(c.nTransaction)))\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\t_ = c.Rollback()\n\t\t\t}\n\t\t}\n\t}()\n\terr = f(c)\n\treturn err\n}", "func (context *Context) Transaction(req map[string]interface{}) (rsp map[string]interface{}, err error) {\n\t// Handle the special case where we are just processing a response\n\tvar reqJSON []byte\n\tif req == nil {\n\t\treqJSON = []byte(\"\")\n\t} else {\n\n\t\t// Marshal the request to JSON\n\t\treqJSON, err = note.JSONMarshal(req)\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"error marshaling request for module: %s\", err)\n\t\t\treturn\n\t\t}\n\n\t}\n\n\t// Perform the transaction\n\trspJSON, err2 := context.TransactionJSON(reqJSON)\n\tif err2 != nil {\n\t\terr = fmt.Errorf(\"error from TransactionJSON: %s\", err2)\n\t\treturn\n\t}\n\n\t// Unmarshal for convenience of the caller\n\terr = note.JSONUnmarshal(rspJSON, &rsp)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"error unmarshaling reply from module: %s %s: %s\", err, note.ErrCardIo, rspJSON)\n\t\treturn\n\t}\n\n\t// Done\n\treturn\n}", "func (s *StatsPeriodStore) Transaction(callback func(*StatsPeriodStore) error) error {\n\tif callback == nil {\n\t\treturn kallax.ErrInvalidTxCallback\n\t}\n\n\treturn s.Store.Transaction(func(store *kallax.Store) error {\n\t\treturn callback(&StatsPeriodStore{store})\n\t})\n}", "func (db *DB) Transaction(ctx context.Context, fn TxHandlerFunc) error {\n\tdb.mu.Lock()\n\tdefer db.mu.Unlock()\n\n\torigin, err := db.master.BeginTx(ctx, nil)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to begin transaction: %v\", err)\n\t}\n\ttx := &Tx{origin}\n\n\tif err := fn(ctx, tx); err != nil {\n\t\tif re := tx.parent.Rollback(); re != nil {\n\t\t\tif re.Error() != sql.ErrTxDone.Error() {\n\t\t\t\treturn fmt.Errorf(\"fialed to rollback: %v\", err)\n\t\t\t}\n\t\t}\n\t\treturn fmt.Errorf(\"failed to execcute transaction: %v\", err)\n\t}\n\treturn tx.parent.Commit()\n}", "func (t *Transaction) Transaction() <-chan *interfaces.TxWithBlock {\n\treturn t.sendTxFound\n}", "func Transaction(ctx context.Context, driver Driver, opts *TxOptions,\n\thandler func(driver Driver) error) error {\n\n\tif driver == nil {\n\t\treturn errors.Wrap(ErrInvalidDriver, \"makroud: cannot create a transaction\")\n\t}\n\n\ttx, err := driver.Begin(ctx, opts)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = handler(tx)\n\tif err != nil {\n\n\t\tthr := tx.Rollback()\n\t\tif thr != nil && driver.HasObserver() {\n\t\t\tthr = errors.Wrap(thr, \"makroud: trying to rollback transaction\")\n\t\t\tdriver.Observer().OnRollback(thr, nil)\n\t\t}\n\n\t\treturn err\n\t}\n\n\terr = tx.Commit()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func Transaction(db *sql.DB, f func()) {\n\n\ttx, err := db.Begin()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\terr := tx.Rollback()\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tpanic(r)\n\t\t} else {\n\t\t\terr = tx.Commit()\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t}\n\t}()\n\tf()\n}", "func (o *Customer) Transaction(mods ...qm.QueryMod) transactionQuery {\n\tqueryMods := []qm.QueryMod{\n\t\tqm.Where(\"id=?\", o.TransactionID),\n\t}\n\n\tqueryMods = append(queryMods, mods...)\n\n\tquery := Transactions(queryMods...)\n\tqueries.SetFrom(query.Query, \"\\\"transactions\\\"\")\n\n\treturn query\n}", "func Transactions(exec boil.Executor, mods ...qm.QueryMod) transactionQuery {\n\tmods = append(mods, qm.From(\"`transaction`\"))\n\treturn transactionQuery{NewQuery(exec, mods...)}\n}", "func (c *Client) Transaction() <-chan *interfaces.TxWithBlock {\n\treturn c.transactions\n}", "func NewTransaction(r *http.Request) *Transaction {\n\treturn &Transaction{r, nil, bodyprocessor.NewBodyProcessor(r), nil, time.Now(), make(map[string]interface{})}\n}", "func NewTransaction(opts *Opts, log logging.Logger) *Transaction {\n\tt := Transaction{\n\t\tattempts: opts.MaxTransactionAttempts,\n\t}\n\n\tt.Transaction.Starter = &t\n\tt.Transaction.Stopper = &t\n\treturn &t\n}", "func (tx *txDriver) Tx(context.Context) (dialect.Tx, error) { return tx, nil }", "func (tx *txDriver) Tx(context.Context) (dialect.Tx, error) { return tx, nil }", "func NewTransaction(writeable bool, db *DB) *Tx {\n\treturn &Tx{\n\t\tid: time.Now().UnixNano(),\n\t\tdb: db,\n\t\twrite: writeable,\n\t\trollbacks: make(map[string]*rollbackInfo),\n\t\trollbackBuckets: make(map[string]*bucket),\n\t\tcommits: make(map[string]*Item),\n\t\thooks: make([]func(), 0),\n\t}\n}", "func (m *Mongodb) ActionTransaction() {\n\t// 获取mongo的上下文适配对象\n\tmongo := m.GetObj(adapter.NewMongodb(\"test\", \"test\")).(*adapter.Mongodb)\n\tret,err:=mongo.DoTransaction(context.Background(), func(sessCtx context.Context) (i interface{}, e error) {\n\t\t// 通过map插入\n\t\tdoc1 := map[string]interface{}{\"f1\": \"val1\", \"f2\": true, \"f3\": 99}\n\t\tret,err := mongo.InsertOneCtx(sessCtx,doc1)\n\t\tif err !=nil {\n\t\t\treturn ret,err\n\t\t}\n\t\tfmt.Println(\"insert one doc1 id:\",ret.InsertedID,\"err\", err)\n\t\t// 通过bson.M插入\n\n\t\tdoc2 := bson.M{\"f1\": \"val2\", \"f2\": false, \"f3\": 10}\n\t\tret,err = mongo.InsertOneCtx(sessCtx,doc2)\n\t\tfmt.Println(\"insert one doc2 id:\",ret.InsertedID,\"err\", err)\n\t\treturn nil,nil\n\t})\n\n\tfmt.Println(\"itransaction:\",ret,\"err\", err)\n}", "func (T *Transaction) excecuteTransaction() {\n\tT.TransactionState = InProgress\n\n\tMutexesMap := new(sync.Map)\n\n\t//ADD A RWMUTEX FOR EACH TABLE INVOLVED IN THIS TRANSACTION EXCECUTION QUEUE.\n\tfor i := 0; i < len(T.commandsInTransaction); i++ {\n\t\tMutexesMap.Store(T.commandsInTransaction[i].TableName(), &sync.RWMutex{})\n\t}\n\n\t//INSERT LOCKS IN-BETWEEN COMMANDS.\n\tXSLOCKEDTRANSACTION := make([]interface{}, 0)\n\n\tfor i := 0; i < len(T.commandsInTransaction); i++ {\n\t\tif T.commandsInTransaction[i].InstructionType == 1 {\n\t\t\tXSLOCKEDTRANSACTION = append(XSLOCKEDTRANSACTION, \"RLOCK\")\n\t\t\tXSLOCKEDTRANSACTION = append(XSLOCKEDTRANSACTION, T.commandsInTransaction[i])\n\t\t\tXSLOCKEDTRANSACTION = append(XSLOCKEDTRANSACTION, \"RUNLOCK\")\n\t\t} else {\n\t\t\tXSLOCKEDTRANSACTION = append(XSLOCKEDTRANSACTION, \"LOCK\")\n\t\t\tXSLOCKEDTRANSACTION = append(XSLOCKEDTRANSACTION, T.commandsInTransaction[i])\n\t\t\tXSLOCKEDTRANSACTION = append(XSLOCKEDTRANSACTION, \"UNLOCK\")\n\t\t}\n\t}\n\n\t//EXCECUTE TRANSACTION EXCECUTION QUEUE.\n\tfor i := 0; i < len(XSLOCKEDTRANSACTION); i++ {\n\t\tswitch C := XSLOCKEDTRANSACTION[i].(type) {\n\t\tcase string:\n\t\t\tif C == \"RLOCK\" {\n\t\t\t\tLock, _ := MutexesMap.Load((XSLOCKEDTRANSACTION[i+1].(common.Command)).TableName())\n\t\t\t\t(Lock.(*sync.RWMutex)).RLock()\n\t\t\t} else if C == \"RUNLOCK\" {\n\t\t\t\tLock, _ := MutexesMap.Load((XSLOCKEDTRANSACTION[i-1].(common.Command)).TableName())\n\t\t\t\t(Lock.(*sync.RWMutex)).RUnlock()\n\t\t\t} else if C == \"LOCK\" {\n\t\t\t\tLock, _ := MutexesMap.Load((XSLOCKEDTRANSACTION[i+1].(common.Command)).TableName())\n\t\t\t\t(Lock.(*sync.RWMutex)).Lock()\n\t\t\t} else if C == \"UNLOCK\" {\n\t\t\t\tLock, _ := MutexesMap.Load((XSLOCKEDTRANSACTION[i-1].(common.Command)).TableName())\n\t\t\t\t(Lock.(*sync.RWMutex)).Unlock()\n\t\t\t}\n\t\tcase common.Command:\n\t\t\tT.CurrentComand = C.String()\n\t\t\tC.Instruction()\n\t\t\tDelay, _ := config.CommandsDelay.Int64()\n\t\t\ttime.Sleep(time.Second * time.Duration(Delay))\n\t\t\tT.CurrentComand = \"Waiting...\"\n\t\tdefault:\n\t\t\tlog.Println(\"Transaction Manager: Expected string or common.Command object\")\n\t\t}\n\n\t}\n\n\tT.TransactionState = Done\n\tT.CurrentComand = \"Transaction Finished.\"\n\texecWaitGroup.Done()\n\n}", "func (_ReserveSpenderMultiSig *ReserveSpenderMultiSigCaller) Transactions(opts *bind.CallOpts, arg0 *big.Int) (struct {\n\tDestination common.Address\n\tValue *big.Int\n\tData []byte\n\tExecuted bool\n}, error) {\n\tret := new(struct {\n\t\tDestination common.Address\n\t\tValue *big.Int\n\t\tData []byte\n\t\tExecuted bool\n\t})\n\tout := ret\n\terr := _ReserveSpenderMultiSig.contract.Call(opts, out, \"transactions\", arg0)\n\treturn *ret, err\n}", "func (s *Session) TransactionTx(f func(*Session) (interface{}, error), opts *sql.TxOptions) (interface{}, error) {\n\terr := s.BeginTx(opts)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\td, err := f(s)\n\tif err != nil {\n\t\ts.RollBack()\n\t} else {\n\t\ts.Commit()\n\t}\n\treturn d, err\n}", "func (_ReserveSpenderMultiSig *ReserveSpenderMultiSigSession) Transactions(arg0 *big.Int) (struct {\n\tDestination common.Address\n\tValue *big.Int\n\tData []byte\n\tExecuted bool\n}, error) {\n\treturn _ReserveSpenderMultiSig.Contract.Transactions(&_ReserveSpenderMultiSig.CallOpts, arg0)\n}", "func (cctx *MinterDefinitionTransaction) Transaction() types.Transaction {\n\treturn types.Transaction{\n\t\tVersion: TransactionVersionMinterDefinition,\n\t\tMinerFees: cctx.MinerFees,\n\t\tArbitraryData: cctx.ArbitraryData,\n\t\tExtension: &MinterDefinitionTransactionExtension{\n\t\t\tNonce: cctx.Nonce,\n\t\t\tMintFulfillment: cctx.MintFulfillment,\n\t\t\tMintCondition: cctx.MintCondition,\n\t\t},\n\t}\n}", "func (cctx *MinterDefinitionTransaction) Transaction() types.Transaction {\n\treturn types.Transaction{\n\t\tVersion: TransactionVersionMinterDefinition,\n\t\tMinerFees: cctx.MinerFees,\n\t\tArbitraryData: cctx.ArbitraryData,\n\t\tExtension: &MinterDefinitionTransactionExtension{\n\t\t\tNonce: cctx.Nonce,\n\t\t\tMintFulfillment: cctx.MintFulfillment,\n\t\t\tMintCondition: cctx.MintCondition,\n\t\t},\n\t}\n}", "func (t *TaskStore) applyTransaction(transaction []updateDiff) error {\n\tif err := t.journalAppend(transaction); err != nil {\n\t\treturn err\n\t}\n\tt.playTransaction(transaction, t.snapshotting)\n\treturn nil\n}", "func Transact(db *gorm.DB, tf func(tx *gorm.DB) error) (err error) {\n\tif commonDB, ok := db.CommonDB().(sqlTx); ok && commonDB != nil {\n\t\t// If the db is already in a transaction, just execute tf\n\t\t// and let the outer transaction handle Rollback and Commit.\n\t\treturn tf(db)\n\t}\n\n\ttx := db.Begin()\n\tif tx.Error != nil {\n\t\treturn fmt.Errorf(\"could not start transaction. %s\", err)\n\t}\n\tdefer func() {\n\t\tif p := recover(); p != nil {\n\t\t\ttx.Rollback()\n\t\t\tpanic(p)\n\t\t}\n\t\tif err != nil {\n\t\t\ttx.Rollback()\n\t\t} else {\n\t\t\terr = tx.Commit().Error\n\t\t}\n\t}()\n\treturn tf(tx)\n}", "func TransactionCreate(c *gin.Context) {\n\tvar t models.Transaction\n\tbuffer, err := ioutil.ReadAll(c.Request.Body)\n\tif err != nil {\n\t\tc.AbortWithError(http.StatusNotAcceptable, err)\n\t}\n\n\terr2 := jsonapi.Unmarshal(buffer, &t)\n\n\tif err2 != nil {\n\t\tparseFail := appError.JSONParseFailure\n\t\tparseFail.Detail = err2.Error()\n\t\tc.AbortWithError(http.StatusMethodNotAllowed, err2).\n\t\t\tSetMeta(parseFail)\n\t\treturn\n\t}\n\n\tt.CreatorID = c.Keys[\"CurrentUserID\"].(uint)\n\n\t// Validate our new transaction\n\tisValid, errApp := t.Validate()\n\n\tif isValid == false {\n\t\tc.AbortWithError(errApp.Status, errApp).\n\t\t\tSetMeta(errApp)\n\t\treturn\n\t}\n\n\tdatabase.DBCon.Create(&t)\n\n\tdatabase.DBCon.First(&t.Recipient, t.RecipientID)\n\tdatabase.DBCon.First(&t.Sender, t.SenderID)\n\tdatabase.DBCon.First(&t.Creator, t.CreatorID)\n\n\tdata, err := jsonapi.Marshal(&t)\n\n\tif err != nil {\n\t\tc.AbortWithError(http.StatusInternalServerError, err).\n\t\t\tSetMeta(appError.JSONParseFailure)\n\t\treturn\n\t}\n\n\tc.Data(http.StatusCreated, \"application/vnd.api+json\", data)\n}", "func sendTransaction(t *Transaction) {\n\ttransactionToSend := &Message{}\n\ttransactionToSend.Transaction = t\n\tfor _, conn := range connections{\n\t\tgo send(transactionToSend, conn)\n\t}\n\ttransactionIsUsed[t.Id] = true\n}", "func createTransaction(\n\tctx context.Context,\n\tdb storage.Database,\n\tappserviceID string,\n) (\n\ttransactionJSON []byte,\n\ttxnID, maxID int,\n\teventsRemaining bool,\n\terr error,\n) {\n\t// Retrieve the latest events from the DB (will return old events if they weren't successfully sent)\n\ttxnID, maxID, events, eventsRemaining, err := db.GetEventsWithAppServiceID(ctx, appserviceID, transactionBatchSize)\n\tif err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"appservice\": appserviceID,\n\t\t}).WithError(err).Fatalf(\"appservice worker unable to read queued events from DB\")\n\n\t\treturn\n\t}\n\n\t// Check if these events do not already have a transaction ID\n\tif txnID == -1 {\n\t\t// If not, grab next available ID from the DB\n\t\ttxnID, err = db.GetLatestTxnID(ctx)\n\t\tif err != nil {\n\t\t\treturn nil, 0, 0, false, err\n\t\t}\n\n\t\t// Mark new events with current transactionID\n\t\tif err = db.UpdateTxnIDForEvents(ctx, appserviceID, maxID, txnID); err != nil {\n\t\t\treturn nil, 0, 0, false, err\n\t\t}\n\t}\n\n\tvar ev []*gomatrixserverlib.HeaderedEvent\n\tfor i := range events {\n\t\tev = append(ev, &events[i])\n\t}\n\n\t// Create a transaction and store the events inside\n\ttransaction := gomatrixserverlib.ApplicationServiceTransaction{\n\t\tEvents: gomatrixserverlib.HeaderedToClientEvents(ev, gomatrixserverlib.FormatAll),\n\t}\n\n\ttransactionJSON, err = json.Marshal(transaction)\n\tif err != nil {\n\t\treturn\n\t}\n\n\treturn\n}", "func (r *InMemorySourceReader) Transaction() *Transaction {\n\treturn NewTransaction(r)\n}", "func (tangle *Tangle) Transaction(transactionID transaction.ID) *transaction.CachedTransaction {\n\treturn &transaction.CachedTransaction{CachedObject: tangle.transactionStorage.Load(transactionID.Bytes())}\n}", "func withTransaction(db *sql.DB, fn func(txn *sql.Tx) error) (err error) {\n\ttxn, err := db.Begin()\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\ttxn.Rollback()\n\t\t\tpanic(r)\n\t\t} else if err != nil {\n\t\t\ttxn.Rollback()\n\t\t} else {\n\t\t\terr = txn.Commit()\n\t\t}\n\t}()\n\terr = fn(txn)\n\treturn\n}", "func NewTransaction(opts *Opts, log logging.Logger, nestedCatalogDirs ...string) *Transaction {\n\tt := Transaction{\n\t\tRepo: opts.NightlyRepo,\n\t\tBinary: opts.Binary,\n\t\tNode: opts.ReleaseManager,\n\t\tattempts: opts.MaxTransactionAttempts,\n\t\tcatalogDirs: nestedCatalogDirs,\n\t}\n\n\tt.Transaction.Starter = &t\n\tt.Transaction.Stopper = &t\n\treturn &t\n}", "func (brtx *BotRegistrationTransaction) Transaction(oneCoin types.Currency) types.Transaction {\n\ttx := types.Transaction{\n\t\tVersion: TransactionVersionBotRegistration,\n\t\tCoinInputs: brtx.CoinInputs,\n\t\tMinerFees: []types.Currency{brtx.TransactionFee},\n\t\tExtension: &BotRegistrationTransactionExtension{\n\t\t\tAddresses: brtx.Addresses,\n\t\t\tNames: brtx.Names,\n\t\t\tNrOfMonths: brtx.NrOfMonths,\n\t\t\tIdentification: brtx.Identification,\n\t\t},\n\t}\n\tif brtx.RefundCoinOutput != nil {\n\t\ttx.CoinOutputs = append(tx.CoinOutputs, *brtx.RefundCoinOutput)\n\t}\n\treturn tx\n}", "func (s *searcher) Transaction(resp http.ResponseWriter, req *http.Request) {\n\tsearchTerms := mux.Vars(req)\n\n\ttransactionID := searchTerms[\"transaction_id\"]\n\tif len(transactionID) == 0 {\n\t\tresp.WriteHeader(http.StatusBadRequest)\n\t\tresp.Write([]byte(\"transaction ID is empty\"))\n\t\treturn\n\t}\n\n\tif len(transactionID) != 64 {\n\t\tresp.WriteHeader(http.StatusBadRequest)\n\t\tresp.Write([]byte(\"transaction ID is not 64 characters\"))\n\t\treturn\n\t}\n\n\tfileName, transactionIndex, err := s.searchIndex.GetTransactionPathByID(transactionID)\n\tif err != nil {\n\t\tresp.WriteHeader(http.StatusInternalServerError)\n\t\tresp.Write([]byte(fmt.Sprintf(\"error finding transaction: %s\", err.Error())))\n\t\treturn\n\t}\n\n\ttransactions, err := s.searchIndex.GetTransactionsFromSingleFile(fileName, []int{transactionIndex})\n\tif err != nil {\n\t\tresp.WriteHeader(http.StatusInternalServerError)\n\t\tresp.Write([]byte(fmt.Sprintf(\"error finding transaction: %s\", err.Error())))\n\t\treturn\n\t}\n\n\tresultBytes, err := json.Marshal(transactions)\n\tif err != nil {\n\t\tresp.WriteHeader(http.StatusInternalServerError)\n\t\tresp.Write([]byte(fmt.Sprintf(\"error marshallig transaction to json: %s\", err.Error())))\n\t\treturn\n\t}\n\n\tresp.WriteHeader(http.StatusOK)\n\tresp.Write(resultBytes)\n}", "func (_ReserveSpenderMultiSig *ReserveSpenderMultiSigCallerSession) Transactions(arg0 *big.Int) (struct {\n\tDestination common.Address\n\tValue *big.Int\n\tData []byte\n\tExecuted bool\n}, error) {\n\treturn _ReserveSpenderMultiSig.Contract.Transactions(&_ReserveSpenderMultiSig.CallOpts, arg0)\n}", "func (dm *Snapshotter) withTransaction(ctx context.Context, writable bool, fn func(ctx context.Context) error) error {\n\tctx, trans, err := dm.store.TransactionContext(ctx, writable)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar result *multierror.Error\n\n\terr = fn(ctx)\n\tif err != nil {\n\t\tresult = multierror.Append(result, err)\n\t}\n\n\t// Always rollback if transaction is not writable\n\tif err != nil || !writable {\n\t\tif terr := trans.Rollback(); terr != nil {\n\t\t\tlog.G(ctx).WithError(terr).Error(\"failed to rollback transaction\")\n\t\t\tresult = multierror.Append(result, errors.Wrap(terr, \"rollback failed\"))\n\t\t}\n\t} else {\n\t\tif terr := trans.Commit(); terr != nil {\n\t\t\tlog.G(ctx).WithError(terr).Error(\"failed to commit transaction\")\n\t\t\tresult = multierror.Append(result, errors.Wrap(terr, \"commit failed\"))\n\t\t}\n\t}\n\n\tif err := result.ErrorOrNil(); err != nil {\n\t\tlog.G(ctx).WithError(err).Debug(\"snapshotter error\")\n\n\t\t// Unwrap if just one error\n\t\tif result.Len() == 1 {\n\t\t\treturn result.Errors[0]\n\t\t}\n\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func NewDBTransactionMiddleware(\r\n\thandler infrastructure.Router,\r\n\tlogger infrastructure.Logger,\r\n\tdb infrastructure.Database,\r\n) DBTransactionMiddleware {\r\n\treturn DBTransactionMiddleware{\r\n\t\thandler: handler,\r\n\t\tlogger: logger,\r\n\t\tdb: db,\r\n\t}\r\n}", "func (m ResourceMutation) Tx() (*Tx, error) {\n\tif _, ok := m.driver.(*txDriver); !ok {\n\t\treturn nil, fmt.Errorf(\"ent: mutation is not running in a transaction\")\n\t}\n\ttx := &Tx{config: m.config}\n\ttx.init()\n\treturn tx, nil\n}", "func (adapter *GORMAdapter) DoInTransaction(fc func(tx orm.ORM) error) (err error) {\n\tgormTxFunc := func(tx *gorm.DB) error {\n\t\treturn fc(NewGORM(tx))\n\t}\n\n\treturn adapter.db.Transaction(gormTxFunc)\n}", "func Transactions(exec boil.Executor, mods ...qm.QueryMod) transactionQuery {\n\tmods = append(mods, qm.From(\"`transactions`\"))\n\treturn transactionQuery{NewQuery(exec, mods...)}\n}", "func Example_transactions() {\n\tdb, _ := dbx.Open(\"mysql\", \"user:pass@/example\")\n\n\tdb.Transactional(func(tx *dbx.Tx) error {\n\t\t_, err := tx.Insert(\"user\", dbx.Params{\n\t\t\t\"name\": \"user1\",\n\t\t}).Execute()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = tx.Insert(\"user\", dbx.Params{\n\t\t\t\"name\": \"user2\",\n\t\t}).Execute()\n\t\treturn err\n\t})\n}", "func (m OperativerecordMutation) Tx() (*Tx, error) {\n\tif _, ok := m.driver.(*txDriver); !ok {\n\t\treturn nil, fmt.Errorf(\"ent: mutation is not running in a transaction\")\n\t}\n\ttx := &Tx{config: m.config}\n\ttx.init()\n\treturn tx, nil\n}", "func (_ReserveSpenderMultiSig *ReserveSpenderMultiSigTransactorSession) ExecuteTransaction(transactionId *big.Int) (*types.Transaction, error) {\n\treturn _ReserveSpenderMultiSig.Contract.ExecuteTransaction(&_ReserveSpenderMultiSig.TransactOpts, transactionId)\n}", "func (rt *RecordingTracer) WithTransaction(f func(ctx context.Context)) (model.Transaction, []model.Span, []model.Error) {\n\treturn rt.WithTransactionOptions(apm.TransactionOptions{}, f)\n}", "func (m *Manager) RunInTransaction(ctx *context.Context, f func(tctx *context.Context) error) error {\n\ttx, err := m.db.Beginx()\n\tif err != nil {\n\t\ttx.Rollback()\n\t\treturn fmt.Errorf(\"error when creating transction: %v\", err)\n\t}\n\n\tctx = NewContext(ctx, tx)\n\terr = m.acknowledgeService.Prepare(ctx)\n\tif err != nil {\n\t\tfmt.Printf(\"\\n[Commerce-Kit - RunInTransaction - Prepare] Error: %v\\n\", err)\n\t}\n\terr = f(ctx)\n\tif err != nil {\n\t\ttx.Rollback()\n\t\tm.acknowledgeService.Acknowledge(ctx, \"rollback\", err.Error())\n\t\treturn err\n\t}\n\n\terr = tx.Commit()\n\tif err != nil {\n\t\tm.acknowledgeService.Acknowledge(ctx, \"rollback\", fmt.Sprintf(\"Error when commiting: %s\", err.Error()))\n\t\treturn fmt.Errorf(\"error when committing transaction: %v\", err)\n\t}\n\tm.acknowledgeService.Acknowledge(ctx, \"commit\", \"\")\n\tm.publishQueryModelEvents(ctx)\n\n\treturn nil\n}", "func (m PostMutation) Tx() (*Tx, error) {\n\tif _, ok := m.driver.(*txDriver); !ok {\n\t\treturn nil, fmt.Errorf(\"ent: mutation is not running in a transaction\")\n\t}\n\ttx := &Tx{config: m.config}\n\ttx.init()\n\treturn tx, nil\n}", "func (bnttx *BotNameTransferTransaction) Transaction(oneCoin types.Currency) types.Transaction {\n\ttx := types.Transaction{\n\t\tVersion: TransactionVersionBotNameTransfer,\n\t\tCoinInputs: bnttx.CoinInputs,\n\t\tMinerFees: []types.Currency{bnttx.TransactionFee},\n\t\tExtension: &BotNameTransferTransactionExtension{\n\t\t\tSender: bnttx.Sender,\n\t\t\tReceiver: bnttx.Receiver,\n\t\t\tNames: bnttx.Names,\n\t\t},\n\t}\n\tif bnttx.RefundCoinOutput != nil {\n\t\ttx.CoinOutputs = append(tx.CoinOutputs, *bnttx.RefundCoinOutput)\n\t}\n\treturn tx\n}", "func (m ManagerMutation) Tx() (*Tx, error) {\n\tif _, ok := m.driver.(*txDriver); !ok {\n\t\treturn nil, fmt.Errorf(\"ent: mutation is not running in a transaction\")\n\t}\n\ttx := &Tx{config: m.config}\n\ttx.init()\n\treturn tx, nil\n}", "func (_ReserveSpenderMultiSig *ReserveSpenderMultiSigSession) ExecuteTransaction(transactionId *big.Int) (*types.Transaction, error) {\n\treturn _ReserveSpenderMultiSig.Contract.ExecuteTransaction(&_ReserveSpenderMultiSig.TransactOpts, transactionId)\n}", "func (m CleaningroomMutation) Tx() (*Tx, error) {\n\tif _, ok := m.driver.(*txDriver); !ok {\n\t\treturn nil, fmt.Errorf(\"ent: mutation is not running in a transaction\")\n\t}\n\ttx := &Tx{config: m.config}\n\ttx.init()\n\treturn tx, nil\n}", "func NewTransaction() *Transaction {\n\treturn NewTransactionN(10)\n}", "func (m PermissionMutation) Tx() (*Tx, error) {\n\tif _, ok := m.driver.(*txDriver); !ok {\n\t\treturn nil, fmt.Errorf(\"ent: mutation is not running in a transaction\")\n\t}\n\ttx := &Tx{config: m.config}\n\ttx.init()\n\treturn tx, nil\n}", "func NewTransaction(p *requestParams) {\n\tw, r, c, u := p.w, p.r, p.c, p.u\n\n\td := json.NewDecoder(r.Body)\n\tvar request TransactionRequest\n\tif err := d.Decode(&request); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tif len(request.Amounts) != len(request.Accounts) {\n\t\thttp.Error(w, \"Amounts and accounts of different lengths\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tdate, err := time.Parse(dateStringFormat, request.Date)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tuserKey := userKey(c, u)\n\ttransactionId := uuid.NewRandom().String()\n\taccountKeys := make([]*datastore.Key, len(request.Accounts))\n\tsplitKeys := make([]*datastore.Key, len(request.Accounts))\n\tsplits := make([]*transaction.Split, len(request.Accounts))\n\n\tfor i := range request.Accounts {\n\t\taccountKeys[i] = datastore.NewKey(c, \"Account\", \"\", request.Accounts[i], userKey)\n\t\tsplitKeys[i] = datastore.NewKey(c, \"Split\", transactionId, 0, accountKeys[i])\n\t\tsplits[i] = &transaction.Split{\n\t\t\tAmount: request.Amounts[i],\n\t\t\tAccount: request.Accounts[i],\n\t\t\tMemo: request.Memo,\n\t\t\tDate: date,\n\t\t}\n\t}\n\n\tx := transaction.NewTransaction()\n\tx.AddSplits(splits)\n\n\tif err := x.ValidateAmount(); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\terr = datastore.RunInTransaction(c, func(c appengine.Context) error {\n\t\taccounts := make([]transaction.Account, len(accountKeys))\n\t\tif err := datastore.GetMulti(c, accountKeys, accounts); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfor i := range accounts {\n\t\t\tx.AddAccount(&accounts[i], accountKeys[i].IntID())\n\t\t}\n\n\t\tif err := x.Commit(); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tputStatus := make(chan error)\n\n\t\tgo func() {\n\t\t\t_, err := datastore.PutMulti(c, accountKeys, accounts)\n\t\t\tputStatus <- err\n\t\t}()\n\t\tgo func() {\n\t\t\t_, err := datastore.PutMulti(c, splitKeys, splits)\n\t\t\tputStatus <- err\n\t\t}()\n\n\t\terr := <-putStatus\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn <-putStatus\n\t}, nil)\n\tif err != nil {\n\t\t// TODO(cjc25): This might not be a 400: if e.g. datastore failed it should\n\t\t// be a 500. Interpret err and return the right thing.\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n}", "func (m UrgentMutation) Tx() (*Tx, error) {\n\tif _, ok := m.driver.(*txDriver); !ok {\n\t\treturn nil, fmt.Errorf(\"ent: mutation is not running in a transaction\")\n\t}\n\ttx := &Tx{config: m.config}\n\ttx.init()\n\treturn tx, nil\n}", "func (p *Postgres) Tx(ctx context.Context, txFunc store.TxFunc) (err error) {\n\ttx := shared.GetTx(ctx)\n\n\tif tx != nil {\n\t\treturn txFunc(ctx)\n\t}\n\n\ttx, err = p.BeginTx(ctx, nil)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"begin tx failed\")\n\t}\n\n\tctx = shared.WithTx(ctx, tx)\n\n\t//nolint:gocritic\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tif err := tx.Rollback(); err != nil {\n\t\t\t\tlog.Warn(ctx, \"tx rollback failed\", \"err\", err)\n\t\t\t}\n\t\t\tpanic(r)\n\t\t} else if err != nil {\n\t\t\tif err := tx.Rollback(); err != nil {\n\t\t\t\tlog.Warn(ctx, \"tx rollback failed\", \"err\", err)\n\t\t\t}\n\t\t} else {\n\t\t\terr = tx.Commit()\n\t\t}\n\t}()\n\n\terr = txFunc(ctx)\n\n\treturn err\n}", "func (m PeopleMutation) Tx() (*Tx, error) {\n\tif _, ok := m.driver.(*txDriver); !ok {\n\t\treturn nil, fmt.Errorf(\"ent: mutation is not running in a transaction\")\n\t}\n\ttx := &Tx{config: m.config}\n\ttx.init()\n\treturn tx, nil\n}", "func (brutx *BotRecordUpdateTransaction) Transaction(oneCoin types.Currency) types.Transaction {\n\ttx := types.Transaction{\n\t\tVersion: TransactionVersionBotRecordUpdate,\n\t\tCoinInputs: brutx.CoinInputs,\n\t\tMinerFees: []types.Currency{brutx.TransactionFee},\n\t\tExtension: &BotRecordUpdateTransactionExtension{\n\t\t\tIdentifier: brutx.Identifier,\n\t\t\tSignature: brutx.Signature,\n\t\t\tAddressUpdate: brutx.Addresses,\n\t\t\tNameUpdate: brutx.Names,\n\t\t\tNrOfMonths: brutx.NrOfMonths,\n\t\t},\n\t}\n\tif brutx.RefundCoinOutput != nil {\n\t\ttx.CoinOutputs = append(tx.CoinOutputs, *brutx.RefundCoinOutput)\n\t}\n\treturn tx\n}", "func (pg *Postgres) Tx(ctx context.Context, txFunc store.TxFunc) (err error) {\n\ttx := shared.GetTx(ctx)\n\n\tif tx != nil {\n\t\treturn txFunc(ctx)\n\t}\n\n\ttx, err = pg.BeginTx(ctx, nil)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"begin tx failed\")\n\t}\n\n\tctx = shared.WithTx(ctx, tx)\n\n\t//nolint:gocritic\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tif err := tx.Rollback(); err != nil {\n\t\t\t\tlog.Warn(ctx, \"tx rollback failed\", \"err\", err)\n\t\t\t}\n\t\t\tpanic(r)\n\t\t} else if err != nil {\n\t\t\tif err := tx.Rollback(); err != nil {\n\t\t\t\tlog.Warn(ctx, \"tx rollback failed\", \"err\", err)\n\t\t\t}\n\t\t} else {\n\t\t\terr = tx.Commit()\n\t\t}\n\t}()\n\n\terr = txFunc(ctx)\n\n\treturn err\n}", "func Transaction(c *gin.Context) {\n\n\tt_type,_ := strconv.Atoi(c.PostForm(\"transaction_type\")) // 1 : sales , 2 : missing products (hilang)\n\tstatus := 1\n\tmessage := \"Success\"\n var responseTransaction ResponseTransaction\n\tvar newstocks int\n\tvar products Products\n\tvar products_arr []Products\n\tvar stock_ins_arr []Stock_Ins\n\tvar stock_outs Stock_Outs\n\tvar stock_ins Stock_Ins\n\tvar note string\n\ttransaction_id := \"\"\n\tsellPrice,_ := strconv.Atoi(c.PostForm(\"sell_price\"))\n\tvar buyPrice int\n\tqtY,_ := strconv.Atoi(c.PostForm(\"qty\"))\n\tcurrentdatetime := time.Now().Format(\"2006-01-02 15:04:05\")\n\tdb := InitDb() //db intiate\n\t//get data products\n\tdb.Where(\"sku = ?\", c.PostForm(\"sku\")).First(&products).Limit(1).Scan(&products_arr)\n\n\t//check if the sku is exist?\n\tif(len(products_arr) > 0) {\n\t\ttx := db.Begin()\n\n\t\t/**\n\t * Identify product is gone / transaction by sales\n\t */\n\n\t\tif (t_type == 1) {\n\n\t\t\ttransaction_id = generateTransactionID()\n\n\t\t\t//get data products\n\t\t\tdb.Where(\"sku = ?\", c.PostForm(\"sku\")).First(&stock_ins).Limit(1).Scan(&stock_ins_arr)\n\n\t\t\t// get the data stock after transaction\n\t\t\tfor i,element := range stock_ins_arr{\n\t\t\t\tif (i == 0) {\n\t\t\t\t\tbuyPrice = element.Buy_Price\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tnote = \"Pesanan \"+transaction_id\n\t\t\ttransactions := Transactions{Id:transaction_id,Buy_Price:buyPrice,Sell_Price:sellPrice,Qty:qtY,Sku:c.PostForm(\"sku\"),Created_Date:currentdatetime}\n\t\t\tif err := tx.Create(&transactions).Error; err != nil {\n\t\t\t\ttx.Rollback()\n\t\t\t\tstatus = 0\n\t\t\t\tmessage = \"failed to insert data transaction\"\n\t\t\t}\n\n\n\t\t} else if (t_type == 2) {\n\n\t\t\tnote = \"Barang Hilang\"\n\n\t\t}\n\t\t//insert data to stock_outs\n\t\tstock_outs = Stock_Outs{Sku:c.PostForm(\"sku\"),Created_Date:currentdatetime,Qty:qtY,Note:note,Transaction_Id:transaction_id}\n\t\tif err := tx.Create(&stock_outs).Error; err != nil {\n\t\t\ttx.Rollback()\n\t\t\tstatus = 0\n\t\t\tmessage = \"failed to insert data stocks_outs\"\n\t\t}\n\n\t\t// get the data stock after transaction\n\t\tfor i,element := range products_arr{\n\t\t\tif (i == 0) {\n\t\t\t\tnewstocks = element.Stocks - qtY\n\t\t\t}\n\t\t}\n\n\t\t//update product stocks in table products\n\t\tif err := tx.Model(&products).Where(\"sku = ?\", c.PostForm(\"sku\")).Update(\"stocks\", newstocks).Error; err != nil {\n\t\t\ttx.Rollback()\n\t\t\tstatus = 0\n\t\t\tmessage = \"failed to update data products\"\n\t\t}\n\n\n\t\t//transaction commit\n\t\ttx.Commit()\n\t}else{\n\t\tstatus = 0\n\t\tmessage = \"SKU Not found!\"\n\t}\n\n\tif status == 1{\n\t\tresponseTransaction = ResponseTransaction{Status:status,Message:message,Data:DataTransaction{Sku:c.PostForm(\"sku\"),Buy_Price:buyPrice,Sell_Price:sellPrice,Created_Date:currentdatetime,Product_name:c.PostForm(\"product_name\"),Stocks:newstocks,Transaction_Id:transaction_id}}\n\t}else{\n\t\tresponseTransaction = ResponseTransaction{Status:status,Message:message}\n\t}\n\n\t// Close connection database\n\tdefer db.Close()\n\tc.JSON(200, responseTransaction)\n}", "func (m OperativeMutation) Tx() (*Tx, error) {\n\tif _, ok := m.driver.(*txDriver); !ok {\n\t\treturn nil, fmt.Errorf(\"ent: mutation is not running in a transaction\")\n\t}\n\ttx := &Tx{config: m.config}\n\ttx.init()\n\treturn tx, nil\n}", "func (_ReserveSpenderMultiSig *ReserveSpenderMultiSigTransactor) ExecuteTransaction(opts *bind.TransactOpts, transactionId *big.Int) (*types.Transaction, error) {\n\treturn _ReserveSpenderMultiSig.contract.Transact(opts, \"executeTransaction\", transactionId)\n}", "func (m RobberMutation) Tx() (*Tx, error) {\n\tif _, ok := m.driver.(*txDriver); !ok {\n\t\treturn nil, fmt.Errorf(\"ent: mutation is not running in a transaction\")\n\t}\n\ttx := &Tx{config: m.config}\n\ttx.init()\n\treturn tx, nil\n}", "func (m InspectionResultMutation) Tx() (*Tx, error) {\n\tif _, ok := m.driver.(*txDriver); !ok {\n\t\treturn nil, fmt.Errorf(\"ent: mutation is not running in a transaction\")\n\t}\n\ttx := &Tx{config: m.config}\n\ttx.init()\n\treturn tx, nil\n}", "func NewTransaction(streamer interfaces.Streamer, txID common.Hash, confirmationBlocks ...uint64) *Transaction {\n\tvar confirmBlocks uint64\n\t// check for confirmation blocks\n\tif len(confirmationBlocks) != 0 && confirmationBlocks[0] != 0 {\n\t\tconfirmBlocks = confirmationBlocks[0]\n\t}\n\t// instantiate instance\n\tt := &Transaction{\n\t\tstreamer: streamer,\n\t\tmu: sync.Mutex{},\n\t\talive: true,\n\t\ttxID: txID,\n\t\ttxFound: false,\n\t\ttx: nil,\n\t\tconfirmationBlocks: confirmBlocks,\n\t\tsendTxFound: make(chan *interfaces.TxWithBlock),\n\t\tshutdown: make(chan struct{}),\n\t\tshutdownOnce: sync.Once{},\n\t\tsignalClosed: make(chan struct{}),\n\t\terrs: make(chan error, 1),\n\t\tsendErrOnce: sync.Once{},\n\t}\n\t// start the loop\n\tgo t.loop()\n\t// return the instance\n\treturn t\n}", "func (m StatusdMutation) Tx() (*Tx, error) {\n\tif _, ok := m.driver.(*txDriver); !ok {\n\t\treturn nil, fmt.Errorf(\"ent: mutation is not running in a transaction\")\n\t}\n\ttx := &Tx{config: m.config}\n\ttx.init()\n\treturn tx, nil\n}", "func TransactionIndex(c *gin.Context) {\n\trelatedObjectID := c.Query(\"relatedObjectId\")\n\trelatedObjectType := c.Query(\"relatedObjectType\")\n\tisSettledQuery := c.Query(\"isSettled\")\n\tstatusQuery := c.Query(\"status\")\n\tcurUserID := c.Keys[\"CurrentUserID\"]\n\n\tvar transactions []models.Transaction\n\n\tquery := database.DBCon\n\n\tisSettled, err := strconv.ParseBool(isSettledQuery)\n\tif isSettledQuery != \"\" && err == nil {\n\t\tquery = query.Where(\"is_settled = ?\", isSettled)\n\t}\n\n\t// TODO: Check that statusQuery is a valid status\n\tif statusQuery != \"\" {\n\t\tquery = query.Where(\"status = ?\", statusQuery)\n\t}\n\n\tif relatedObjectID != \"\" && relatedObjectType != \"\" {\n\t\tquery.\n\t\t\tWhere(\"related_object_id = ? AND related_object_type = ?\", relatedObjectID, relatedObjectType).\n\t\t\tOrder(\"created_at desc\").\n\t\t\tFind(&transactions)\n\t} else {\n\t\tquery.\n\t\t\tWhere(\"creator_id = ?\", curUserID).\n\t\t\tFind(&transactions)\n\t}\n\n\t// Get creator and relatedUser\n\t// TODO: n + 1 query problem here, so we'll figure this out later\n\tfor i := range transactions {\n\t\tdatabase.DBCon.First(&transactions[i].Recipient, transactions[i].RecipientID)\n\t\tdatabase.DBCon.First(&transactions[i].Sender, transactions[i].SenderID)\n\t\tdatabase.DBCon.First(&transactions[i].Creator, transactions[i].CreatorID)\n\t}\n\n\tdata, err := jsonapi.Marshal(transactions)\n\n\tif err != nil {\n\t\tc.AbortWithError(http.StatusInternalServerError, err).\n\t\t\tSetMeta(appError.JSONParseFailure)\n\t\treturn\n\t}\n\n\tc.Data(http.StatusOK, \"application/vnd.api+json\", data)\n}", "func (m EventMutation) Tx() (*Tx, error) {\n\tif _, ok := m.driver.(*txDriver); !ok {\n\t\treturn nil, fmt.Errorf(\"ent: mutation is not running in a transaction\")\n\t}\n\ttx := &Tx{config: m.config}\n\ttx.init()\n\treturn tx, nil\n}", "func MakeTransactionHandlers(r *mux.Router, n negroni.Negroni, service transaction.UseCase) {\n\tr.Handle(\"/v1/transaction\", n.With(\n\t\tnegroni.Wrap(listTransactions(service)),\n\t)).Methods(\"GET\", \"OPTIONS\").Name(\"listTransactions\")\n\n\tr.Handle(\"/v1/transaction\", n.With(\n\t\tnegroni.Wrap(createTransaction(service)),\n\t)).Methods(\"POST\", \"OPTIONS\").Name(\"createTransaction\")\n\n\tr.Handle(\"/v1/transaction/{id}\", n.With(\n\t\tnegroni.Wrap(getTransaction(service)),\n\t)).Methods(\"GET\", \"OPTIONS\").Name(\"getTransaction\")\n\n\tr.Handle(\"/v1/transaction/{id}\", n.With(\n\t\tnegroni.Wrap(deleteTransaction(service)),\n\t)).Methods(\"DELETE\", \"OPTIONS\").Name(\"deleteTransaction\")\n}", "func getTransactions(r *http.Request) ([]Transaction, context.Context, error) {\n\tvar transactions []Transaction\n\tvar transaction Transaction\n\n\t// Context has not been set\n\tif r.Context().Value(transactionsKey) == nil {\n\t\t// Read request body\n\t\tjsonBytes, _ := ioutil.ReadAll(r.Body)\n\t\tr.Body = ioutil.NopCloser(bytes.NewBuffer(jsonBytes))\n\n\t\t// Determine if JSON is a single object or an array of objects\n\t\tbody := strings.TrimSpace(string(jsonBytes))\n\n\t\tif strings.HasPrefix(body, \"{\") {\n\t\t\t// Single Object\n\t\t\terr := json.Unmarshal(jsonBytes, &transaction)\n\n\t\t\tif err != nil {\n\t\t\t\treturn nil, nil, errors.New(\"PARSE_ERROR\")\n\t\t\t}\n\n\t\t\ttransactions = append(transactions, transaction)\n\t\t} else if strings.HasPrefix(body, \"[\") {\n\t\t\t// Array of Objects\n\t\t\terr := json.Unmarshal(jsonBytes, &transactions)\n\n\t\t\tif err != nil {\n\t\t\t\treturn nil, nil, errors.New(\"PARSE_ERROR\")\n\t\t\t}\n\t\t}\n\n\t\t// Add transactions to request context so subsequent middleware does not have to parse the transactions again\n\t\tctx := context.WithValue(r.Context(), transactionsKey, transactions)\n\t\treturn transactions, ctx, nil\n\t}\n\n\t// Context already exists\n\ttransactions = r.Context().Value(transactionsKey).([]Transaction)\n\treturn transactions, r.Context(), nil\n}", "func (m HarborMutation) Tx() (*Tx, error) {\n\tif _, ok := m.driver.(*txDriver); !ok {\n\t\treturn nil, fmt.Errorf(\"ent: mutation is not running in a transaction\")\n\t}\n\ttx := &Tx{config: m.config}\n\ttx.init()\n\treturn tx, nil\n}", "func (m JobHistoryMutation) Tx() (*Tx, error) {\n\tif _, ok := m.driver.(*txDriver); !ok {\n\t\treturn nil, fmt.Errorf(\"ent: mutation is not running in a transaction\")\n\t}\n\ttx := &Tx{config: m.config}\n\ttx.init()\n\treturn tx, nil\n}", "func (m PatientrightsMutation) Tx() (*Tx, error) {\n\tif _, ok := m.driver.(*txDriver); !ok {\n\t\treturn nil, fmt.Errorf(\"ent: mutation is not running in a transaction\")\n\t}\n\ttx := &Tx{config: m.config}\n\ttx.init()\n\treturn tx, nil\n}", "func (m TodoItemMutation) Tx() (*Tx, error) {\n\tif _, ok := m.driver.(*txDriver); !ok {\n\t\treturn nil, fmt.Errorf(\"ent: mutation is not running in a transaction\")\n\t}\n\ttx := &Tx{config: m.config}\n\ttx.init()\n\treturn tx, nil\n}", "func (m TokenMutation) Tx() (*Tx, error) {\n\tif _, ok := m.driver.(*txDriver); !ok {\n\t\treturn nil, fmt.Errorf(\"ent: mutation is not running in a transaction\")\n\t}\n\ttx := &Tx{config: m.config}\n\ttx.init()\n\treturn tx, nil\n}" ]
[ "0.6934194", "0.68263537", "0.66666955", "0.65281916", "0.65220726", "0.6500732", "0.64666903", "0.6418999", "0.6384218", "0.63415635", "0.6333283", "0.63124186", "0.630846", "0.6259748", "0.62529105", "0.6215971", "0.62130564", "0.6209439", "0.6205622", "0.6169113", "0.61445117", "0.6143163", "0.61317056", "0.61285836", "0.6120265", "0.61160135", "0.61045533", "0.6102975", "0.6039606", "0.60345", "0.5941336", "0.5935513", "0.5893517", "0.5865774", "0.58516777", "0.58383876", "0.5831686", "0.5815624", "0.5790342", "0.575954", "0.575954", "0.56943244", "0.5683902", "0.5665777", "0.56625384", "0.5618403", "0.56157625", "0.56094503", "0.56094503", "0.5606474", "0.5570343", "0.55683833", "0.55620384", "0.55395144", "0.55241805", "0.5512781", "0.55032843", "0.5503279", "0.55004483", "0.5498078", "0.54920334", "0.54890126", "0.5481556", "0.54776025", "0.5475548", "0.5472296", "0.5465032", "0.5456667", "0.5450667", "0.54468405", "0.544512", "0.54378825", "0.54352385", "0.5416196", "0.5414523", "0.54137594", "0.5412928", "0.54005903", "0.5393679", "0.53907615", "0.53901243", "0.53638303", "0.53594196", "0.5356895", "0.5349184", "0.5347611", "0.53443354", "0.5340938", "0.53370655", "0.5334417", "0.53337264", "0.53331405", "0.53329283", "0.53310674", "0.533042", "0.532471", "0.5318156", "0.53131926", "0.53115356", "0.5310187" ]
0.7005496
0
Save saves sql tuning advice into the middleware
func (r *Repository) Save(url string, toAddr, ccAddr []string, content string, status int) error { return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (aps *ApiServer) setMiddleware() {\n\t/*\n\t\taps.Engine.Use(func(c *gin.Context) {\n\t\t\tstart := time.Now()\n\t\t\tc.Next()\n\t\t\tend := time.Now()\n\t\t\tlatency := end.Sub(start)\n\t\t\tpath := c.Request.URL.Path\n\t\t\tclientIP := c.ClientIP()\n\t\t\tmethod := c.Request.Method\n\t\t\tstatusCode := c.Writer.Status()\n\t\t\tlogger.Info(\"api request\",\n\t\t\t\tzap.Int(\"status_code\", statusCode),\n\t\t\t\tzap.Duration(\"latency\", latency),\n\t\t\t\tzap.String(\"client_ip\", clientIP),\n\t\t\t\tzap.String(\"method\", method),\n\t\t\t\tzap.String(\"path\", path),\n\t\t\t)\n\t\t})\n\t*/\n\taps.Engine.Use(gin.Recovery())\n}", "func (srv *server) save() error {\n\ts, err := sql.Open(\"postgres\", srv.redshiftConfig())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif _, err := s.Query(srv.query); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (session *Session) saveLastSQL(sql string, args ...interface{}) {\n\tsession.lastSQL = sql\n\tsession.lastSQLArgs = args\n\tsession.Engine.logSQL(sql, args...)\n}", "func (s *BasePlSqlParserListener) EnterSavepoint_statement(ctx *Savepoint_statementContext) {}", "func Save() {\n\tenforcer.SavePolicy()\n}", "func Middleware(db Datastore) func(http.Handler) http.Handler {\n\treturn func(next http.Handler) http.Handler {\n\t\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\tctx := context.WithValue(r.Context(), dbCtxKey, db)\n\n\t\t\t// and call the next with our new context\n\t\t\tr = r.WithContext(ctx)\n\t\t\tnext.ServeHTTP(w, r)\n\t\t})\n\t}\n}", "func Save(r *http.Request, w http.ResponseWriter) error {\n\n}", "func (spy *StoreSpy) Save(r *http.Request, w http.ResponseWriter, s *sessions.Session) error {\n\tspy.SaveCalls++\n\treturn nil\n}", "func (cm *commonMiddlware) traceDB(ctx context.Context) trace.Span {\n\tif cm.ot == nil {\n\t\treturn nil\n\t}\n\tif span := trace.SpanFromContext(ctx); span != nil {\n\t\t_, sp := cm.ot.Start(ctx, \"Postgres Database Call\")\n\t\treturn sp\n\t}\n\t_, sp := cm.ot.Start(ctx, \"Asynchronous Postgres Database Call\")\n\treturn sp\n}", "func (s *siteData) save() error {\n\tvar err error\n\tif err = db.open(); err != nil {\n\t\treturn err\n\t}\n\tdefer db.close()\n\n\tsiteConf := []string{\"site\"}\n\tif err = db.bolt.SetValue(siteConf, \"title\", s.Title); err != nil {\n\t\treturn err\n\t}\n\tif err = db.bolt.SetInt(siteConf, \"port\", s.Port); err != nil {\n\t\treturn err\n\t}\n\tif err = db.bolt.SetValue(siteConf, \"session-name\", s.SessionName); err != nil {\n\t\treturn err\n\t}\n\treturn db.bolt.SetValue(siteConf, \"server-dir\", s.ServerDir)\n}", "func (self *botStats) save(t db.Table, index int) error {\n\tkey := fmt.Sprintf(\"%s-%2d\",botStatsRecordKey,index)\n\treturn t.Put(key,self)\n}", "func (d *Driver) Save(\n\tctx *sql.Context,\n\ti sql.Index,\n\titer sql.IndexKeyValueIter,\n) (err error) {\n\tvar colID uint64\n\tstart := time.Now()\n\n\tidx, ok := i.(*pilosaIndex)\n\tif !ok {\n\t\treturn errInvalidIndexType.New(i)\n\t}\n\n\tprocessingFile := d.processingFilePath(idx.Database(), idx.Table(), idx.ID())\n\tif err = index.CreateProcessingFile(processingFile); err != nil {\n\t\treturn err\n\t}\n\n\t// Retrieve the pilosa schema\n\tschema, err := d.client.Schema()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Create a pilosa index and frame objects in memory\n\tpilosaIndex, err := schema.Index(indexName(idx.Database(), idx.Table()))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\td.frames = make([]*pilosa.Frame, len(idx.Expressions()))\n\tfor i, e := range idx.Expressions() {\n\t\tfrm, err := pilosaIndex.Frame(frameName(idx.ID(), e))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// make sure we delete the index in every run before inserting, since there may\n\t\t// be previous data\n\t\tif err = d.client.DeleteFrame(frm); err != nil {\n\t\t\treturn errDeletePilosaFrame.New(frm.Name(), err)\n\t\t}\n\n\t\td.frames[i] = frm\n\t}\n\n\t// Make sure the index and frames exists on the server\n\terr = d.client.SyncSchema(schema)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Open mapping in create mode. After finishing the transaction is rolled\n\t// back unless all goes well and rollback value is changed.\n\trollback := true\n\tidx.mapping.openCreate(true)\n\tdefer func() {\n\t\tif rollback {\n\t\t\tidx.mapping.rollback()\n\t\t} else {\n\t\t\te := d.saveMapping(ctx, idx.mapping, colID, false)\n\t\t\tif e != nil && err == nil {\n\t\t\t\terr = e\n\t\t\t}\n\t\t}\n\n\t\tidx.mapping.close()\n\t}()\n\n\td.bitBatches = make([]*bitBatch, len(d.frames))\n\tfor i := range d.bitBatches {\n\t\td.bitBatches[i] = newBitBatch(sql.IndexBatchSize)\n\t}\n\n\tfor colID = uint64(0); err == nil; colID++ {\n\t\t// commit each batch of objects (pilosa and boltdb)\n\t\tif colID%sql.IndexBatchSize == 0 && colID != 0 {\n\t\t\td.saveBatch(ctx, idx.mapping, colID)\n\t\t}\n\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn ctx.Err()\n\n\t\tdefault:\n\t\t\tvar (\n\t\t\t\tvalues []interface{}\n\t\t\t\tlocation []byte\n\t\t\t)\n\t\t\tvalues, location, err = iter.Next()\n\t\t\tif err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tfor i, frm := range d.frames {\n\t\t\t\tif values[i] == nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\trowID, err := idx.mapping.getRowID(frm.Name(), values[i])\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\td.bitBatches[i].Add(rowID, colID)\n\t\t\t}\n\t\t\terr = idx.mapping.putLocation(pilosaIndex.Name(), colID, location)\n\t\t}\n\t}\n\n\tif err != nil && err != io.EOF {\n\t\treturn err\n\t}\n\n\trollback = false\n\n\terr = d.savePilosa(ctx, colID)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlogrus.WithFields(logrus.Fields{\n\t\t\"duration\": time.Since(start),\n\t\t\"pilosa\": d.timePilosa,\n\t\t\"mapping\": d.timeMapping,\n\t\t\"rows\": colID,\n\t\t\"id\": i.ID(),\n\t}).Debugf(\"finished pilosa indexing\")\n\n\treturn index.RemoveProcessingFile(processingFile)\n}", "func Save() {\n\tgo db.save()\n}", "func (w *Wrapper) saveTrace(err error, query string, startedAt time.Time) {\n\tvar stacks []map[string]interface{}\n\tfor skip := 0; ; skip++ {\n\t\tpc, file, line, ok := runtime.Caller(skip)\n\t\tif !ok {\n\t\t\tbreak\n\t\t}\n\t\tstacks = append(stacks, map[string]interface{}{\n\t\t\t\"Skip\": skip,\n\t\t\t\"PC\": pc,\n\t\t\t\"File\": file,\n\t\t\t\"Line\": line,\n\t\t})\n\t}\n\tw.Traces = append(w.Traces, Trace{\n\t\tQuery: query,\n\t\tDuration: time.Since(startedAt),\n\t\tStacks: stacks,\n\t\tError: err,\n\t})\n}", "func (l LiveAgent) Save() error {\n\n\tvar db = data.GetDB()\n\n\tquery := fmt.Sprintf(`\n\t\tINSERT INTO vicidial_live_agents \n\t\t\t(live_agent_id, user, server_ip, conf_exten, extension, status, lead_id, campaign_id, uniqueid, callerid, channel, random_id, \n\t\t\tlast_call_time, last_update_time, last_call_finish, closer_campaigns, call_server_ip, user_level, \n\t\t\tcomments, campaign_weight, calls_today, external_hangup, external_status, external_pause, external_dial, \n\t\t\texternal_ingroups, external_blended, external_igb_set_user, external_update_fields, external_update_fields_data, \n\t\t\texternal_timer_action, external_timer_action_message, external_timer_action_seconds, agent_log_id, last_state_change, \n\t\t\tagent_territories, outbound_autodial, manager_ingroup_set, ra_user, ra_extension, external_dtmf, external_transferconf, \n\t\t\texternal_park, external_timer_action_destination, on_hook_agent, on_hook_ring_time, ring_callerid, last_inbound_call_time, \n\t\t\tlast_inbound_call_finish, campaign_grade, external_recording, external_pause_code, pause_code, preview_lead_id, external_lead_id, \n\t\t\tlast_inbound_call_time_filtered, last_inbound_call_finish_filtered) \n\t\tVALUES \n\t\t\t(NULL, 'duser2', '172.16.10.209', '8600051', 'SIP/102', 'PAUSED', '0', 'DCAMP', '', '', '', '11036487', \n\t\t\t'2020-08-19 16:29:09', '2020-08-19 16:30:03', '2020-08-19 16:29:09', '-', NULL, '1', \n\t\t\tNULL, '0', '0', '', '', '', '', \n\t\t\tNULL, '0', '', '0', '', \n\t\t\t'', '', '-1', '196', '2020-08-19 16:29:14', NULL, 'N', 'N', '', '', '', '', '', '', 'N', '60', '', '2020-08-19 16:29:09', '2020-08-19 16:29:09', '10', '', '', 'LOGIN', '0', '0', '2020-08-19 16:29:09', '2020-08-19 16:29:09')\n\t`)\n\n\treturn db.Exec(query).Error\n}", "func (self *BenchDatabase) Save(docs ...SqlInserter) error {\n\tif self.db == nil {\n\t\terr := self.Connect()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\ttx, err := self.db.Begin()\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, doc := range docs {\n\t\tif err = doc.SqlInsert(tx); err != nil {\n\t\t\tif rb_err := tx.Rollback(); rb_err != nil {\n\t\t\t\treturn rb_err\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t}\n\tif err = tx.Commit(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (s *BasePlSqlParserListener) EnterSupplemental_db_logging(ctx *Supplemental_db_loggingContext) {}", "func (aws *ActionWithState) Save() error {\n\tdb, err := sqlconn.GetConnection()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer db.Close()\n\tupdate, err := db.Query(\"\" +\n\t\t\"UPDATE `action` \" +\n\t\t\"\tSET `state` = ?, \" +\n\t\t\"\t\t`processed` = ? \" +\n\t\t\"\tWHERE `id` = ?\", aws.FSM.Current(), aws.Processed, aws.id)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer update.Close()\n\treturn nil\n}", "func (s *Server) setupMiddleware() {\n\ts.engine.Use(gin.Recovery())\n\ts.engine.Use(MetricMiddleware())\n\ts.engine.Use(ErrorReportingMiddleware())\n}", "func writeToStore() {\n\tf, err := os.OpenFile(DBFile, os.O_APPEND|os.O_WRONLY, 0600)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer f.Close()\n\tif _, err := f.WriteString(strings.Join(passGenerator(), \"\") + \",\" + createSecrets() + \"\\n\"); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}", "func addDatabaseMiddleware(r *gin.Engine) {\n\tdb := initDB()\n\t// Add database to our context\n\tr.Use(func(c *gin.Context) {\n\t\tc.Set(ContextKeyDB, db)\n\t})\n}", "func DBMiddlware(db *gorm.DB) func(c *gin.Context) {\n\treturn func(c *gin.Context) {\n\t\tc.Set(\"db\", db)\n\t\tc.Next()\n\t}\n\n}", "func (a *adapter) SavePolicy(model model.Model) error {\n\tif a.filtered {\n\t\treturn errors.New(\"cannot save a filtered policy\")\n\t}\n\tif err := a.dropTable(); err != nil {\n\t\treturn err\n\t}\n\n\tvar lines []interface{}\n\n\tfor ptype, ast := range model[\"p\"] {\n\t\tfor _, rule := range ast.Policy {\n\t\t\tline := savePolicyLine(ptype, rule)\n\t\t\tlines = append(lines, &line)\n\t\t}\n\t}\n\n\tfor ptype, ast := range model[\"g\"] {\n\t\tfor _, rule := range ast.Policy {\n\t\t\tline := savePolicyLine(ptype, rule)\n\t\t\tlines = append(lines, &line)\n\t\t}\n\t}\n\n\tctx := context.TODO()\n\t_, err := a.collection.InsertMany(ctx, lines)\n\treturn err\n}", "func storeRecordedHints(shard Shard, hints recoverylog.FSMHints, etcd *clientv3.Client) error {\n\tvar val, err = json.Marshal(hints)\n\tif err != nil {\n\t\treturn extendErr(err, \"marshal FSMHints\")\n\t}\n\tvar asn = shard.Assignment()\n\n\t_, err = etcd.Txn(shard.Context()).\n\t\t// Verify our Assignment is still in effect (eg, we're still primary), then write |hints| to HintPrimaryKey.\n\t\t// Compare CreateRevision to allow for a raced ReplicaState update.\n\t\tIf(clientv3.Compare(clientv3.CreateRevision(string(asn.Raw.Key)), \"=\", asn.Raw.CreateRevision)).\n\t\tThen(clientv3.OpPut(shard.Spec().HintPrimaryKey(), string(val))).\n\t\tCommit()\n\n\tif etcdErr, ok := err.(rpctypes.EtcdError); ok && etcdErr.Code() == codes.Unavailable {\n\t\t// Recorded hints are advisory and can generally tolerate omitted\n\t\t// updates. It's also annoying for temporary Etcd partitions to abort\n\t\t// an otherwise-fine shard primary. So, log but allow shard processing\n\t\t// to continue; we'll retry on the next hints flush interval.\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"key\": shard.Spec().HintPrimaryKey(),\n\t\t\t\"err\": err,\n\t\t}).Warn(\"failed to store recorded FSMHints (will retry)\")\n\n\t} else if err != nil {\n\t\treturn extendErr(err, \"storing recorded FSMHints\")\n\t}\n\treturn nil\n}", "func statementCtx(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tstID := chi.URLParam(r, \"stID\")\n\t\tvar stmt model.Statement\n\t\terr := driver.DoOne(&stmt, stID, driver.GetOne)\n\t\tif err != nil {\n\t\t\thttp.Error(w, http.StatusText(404), 404)\n\t\t\treturn\n\t\t}\n\t\tctx := context.WithValue(r.Context(), statementContext, &stmt)\n\t\tlog.Printf(\"Data from DB: %+v with ID: %v\", stmt, stID)\n\n\t\tnext.ServeHTTP(w, r.WithContext(ctx))\n\t})\n}", "func (w work) SavingDB(ctx context.Context) {\n\tlog.Printf(\"save %s\", w.sth)\n\n\tstmt := \"select name from db.table\"\n\tdb := sql.DB{}\n\tconn, _ := db.Conn(ctx)\n\trows, err := conn.QueryContext(ctx, stmt)\n\tif err != nil {\n\t\tif err == context.DeadlineExceeded {\n\t\t\t// context canceled\n\t\t}\n\t\treturn\n\t}\n\n\tvar name string\n\tfor rows.Next() {\n\t\tif err := rows.Scan(&name); err != nil {\n\t\t\tif err == context.DeadlineExceeded {\n\t\t\t\tlog.Println(\"scan canceled\")\n\t\t\t}\n\t\t}\n\t}\n}", "func (m *AppVulnerabilityTask) SetInsights(value *string)() {\n err := m.GetBackingStore().Set(\"insights\", value)\n if err != nil {\n panic(err)\n }\n}", "func WithMiddleware(c *router.Context, next router.Handler) {\n\tdatabase, err := getDatabaseConnection(c.Context)\n\tif err != nil {\n\t\tlogging.Errorf(c.Context, \"Failed to retrieve a database connection: %s\", err.Error())\n\t\tc.Writer.Header().Set(\"Content-Type\", \"text/plain\")\n\t\tc.Writer.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\tc.Context = With(c.Context, database)\n\tnext(c)\n}", "func (da *DefaultAdvisor) adviseWithDefault(dbID int, sqlText string) (string, string, error) {\n\tuser := viper.GetString(config.DBSoarMySQLUserKey)\n\tpass := viper.GetString(config.DBSoarMySQLPassKey)\n\n\treturn da.advise(dbID, sqlText, user, pass)\n}", "func (store *middlewareStore) ApplyMiddleware(mw interface{}) error {\n\tswitch mw := mw.(type) {\n\tcase BytesReceived:\n\t\tif mw.Func == nil {\n\t\t\treturn errors.New(\"middleware function is nil\")\n\t\t}\n\t\tstore.bytesReceived = append(store.bytesReceived, mw)\n\t\tmiddleware.Sort(store.bytesReceived)\n\tcase LocalNodeWillStart:\n\t\tif mw.Func == nil {\n\t\t\treturn errors.New(\"middleware function is nil\")\n\t\t}\n\t\tstore.localNodeWillStart = append(store.localNodeWillStart, mw)\n\t\tmiddleware.Sort(store.localNodeWillStart)\n\tcase LocalNodeStarted:\n\t\tif mw.Func == nil {\n\t\t\treturn errors.New(\"middleware function is nil\")\n\t\t}\n\t\tstore.localNodeStarted = append(store.localNodeStarted, mw)\n\t\tmiddleware.Sort(store.localNodeStarted)\n\tcase LocalNodeWillStop:\n\t\tif mw.Func == nil {\n\t\t\treturn errors.New(\"middleware function is nil\")\n\t\t}\n\t\tstore.localNodeWillStop = append(store.localNodeWillStop, mw)\n\t\tmiddleware.Sort(store.localNodeWillStop)\n\tcase LocalNodeStopped:\n\t\tif mw.Func == nil {\n\t\t\treturn errors.New(\"middleware function is nil\")\n\t\t}\n\t\tstore.localNodeStopped = append(store.localNodeStopped, mw)\n\t\tmiddleware.Sort(store.localNodeStopped)\n\tcase WillConnectToNode:\n\t\tif mw.Func == nil {\n\t\t\treturn errors.New(\"middleware function is nil\")\n\t\t}\n\t\tstore.willConnectToNode = append(store.willConnectToNode, mw)\n\t\tmiddleware.Sort(store.willConnectToNode)\n\tcase ConnectionAccepted:\n\t\tif mw.Func == nil {\n\t\t\treturn errors.New(\"middleware function is nil\")\n\t\t}\n\t\tstore.connectionAccepted = append(store.connectionAccepted, mw)\n\t\tmiddleware.Sort(store.connectionAccepted)\n\tcase RemoteNodeConnected:\n\t\tif mw.Func == nil {\n\t\t\treturn errors.New(\"middleware function is nil\")\n\t\t}\n\t\tstore.remoteNodeConnected = append(store.remoteNodeConnected, mw)\n\t\tmiddleware.Sort(store.remoteNodeConnected)\n\tcase RemoteNodeReady:\n\t\tif mw.Func == nil {\n\t\t\treturn errors.New(\"middleware function is nil\")\n\t\t}\n\t\tstore.remoteNodeReady = append(store.remoteNodeReady, mw)\n\t\tmiddleware.Sort(store.remoteNodeReady)\n\tcase RemoteNodeDisconnected:\n\t\tif mw.Func == nil {\n\t\t\treturn errors.New(\"middleware function is nil\")\n\t\t}\n\t\tstore.remoteNodeDisconnected = append(store.remoteNodeDisconnected, mw)\n\t\tmiddleware.Sort(store.remoteNodeDisconnected)\n\tcase MessageEncoded:\n\t\tif mw.Func == nil {\n\t\t\treturn errors.New(\"middleware function is nil\")\n\t\t}\n\t\tstore.messageEncoded = append(store.messageEncoded, mw)\n\t\tmiddleware.Sort(store.messageEncoded)\n\tcase MessageWillDecode:\n\t\tif mw.Func == nil {\n\t\t\treturn errors.New(\"middleware function is nil\")\n\t\t}\n\t\tstore.messageWillDecode = append(store.messageWillDecode, mw)\n\t\tmiddleware.Sort(store.messageWillDecode)\n\tdefault:\n\t\treturn errors.New(\"unknown middleware type\")\n\t}\n\n\treturn nil\n}", "func (a *adapter) SavePolicy(model model.Model) error {\n\ta.open()\n\ta.dropTable()\n\tvar lines []policy\n\n\tfor PTYPE, ast := range model[\"p\"] {\n\t\tfor _, rule := range ast.Policy {\n\t\t\tline := a.writeTableLine(PTYPE, rule)\n\t\t\tlines = append(lines, line)\n\t\t}\n\t}\n\n\tfor PTYPE, ast := range model[\"g\"] {\n\t\tfor _, rule := range ast.Policy {\n\t\t\tline := a.writeTableLine(PTYPE, rule)\n\t\t\tlines = append(lines, line)\n\t\t}\n\t}\n\t_, err := r.DB(a.database).Table(a.table).Insert(lines).Run(a.session)\n\tif err != nil {\n\t\tfmt.Printf(\"Error: %v\\n\", err)\n\t\treturn err\n\t}\n\treturn nil\n}", "func Save(stmts []Stmt, enc Encoder) error {\n\treturn enc.Encode(stmts)\n}", "func (n *mockAgent) save() (s persistapi.AgentState) {\n\treturn\n}", "func (a *adapter) SavePolicy(model model.Model) error {\n\tif a.filtered {\n\t\treturn errors.New(\"cannot save a filtered policy\")\n\t}\n\tif err := a.collection.DropCollection(context.Background()); err != nil {\n\t\treturn err\n\t}\n\n\tvar lines []interface{}\n\n\tfor ptype, ast := range model[\"p\"] {\n\t\tfor _, rule := range ast.Policy {\n\t\t\tline := savePolicyLine(ptype, rule)\n\t\t\tlines = append(lines, &line)\n\t\t}\n\t}\n\n\tfor ptype, ast := range model[\"g\"] {\n\t\tfor _, rule := range ast.Policy {\n\t\t\tline := savePolicyLine(ptype, rule)\n\t\t\tlines = append(lines, &line)\n\t\t}\n\t}\n\n\tif _, err := a.collection.InsertMany(context.Background(), lines); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (o *ORM) Save(model interface{}) error {\n\tt := time.Now()\n\t_, clauses := o.clauses(model, true)\n\n\tstmt, args, err := o.builder.Insert(\"articles\").\n\t\tSetMap(clauses).\n\t\tSuffix(\"RETURNING *\").\n\t\tToSql()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer o.log(t, stmt, args)\n\n\treturn o.conn.QueryRowx(stmt, args...).StructScan(model)\n}", "func handleDBPostGeneratetokenized(w http.ResponseWriter, r *http.Request) {\n\tdefer func() {\n\t\tdb.Connection.Close(nil)\n\t}()\n var requestData modelito.RequestTokenized\n var errorGeneral string\n var errorGeneralNbr string\n \n errorGeneral=\"\"\n\n\n requestData,errorGeneral =obtainPostParmsGeneratetokenized(r,errorGeneral) //logicrequest_post.go\n\n\n\n\t////////////////////////////////////////////////validate parms\n\t/// START\n \n if errorGeneral==\"\" {\n\n\t\terrorGeneral,errorGeneralNbr= ProcessGeneratetokenized(w , requestData)\n\t}\n\n if errorGeneral!=\"\"{\n \t//send error response if any\n \t//prepare an error JSON Response, if any\n\t\tlog.Print(\"CZ STEP Get the ERROR response JSON ready\")\n\t\t\n\t\t\t/// START\n\t\tfieldDataBytesJson,err := getJsonResponseError(errorGeneral, errorGeneralNbr)\n\t\t////////// write the response (ERROR)\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tw.Write(fieldDataBytesJson)\t\n\t\tif(err!=nil){\n\t\t\t\n\t\t}\n\t\n } \n\t\t\t\t\t\n}", "func saveSecret(w http.ResponseWriter, r *http.Request) {\n\tdefer traceTime(here())()\n\n\tvar req StorePostRequest\n\terr := json.NewDecoder(r.Body).Decode(&req)\n\tif err != nil {\n\t\tTLog.Printf(ERROR_CONTENT, \"\", err.Error())\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\terr = api.secretEngine.StoreSecretString(req.Key, req.Value)\n\tif err != nil {\n\t\tTLog.Printf(ERROR_CONTENT, \"\", err.Error())\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\tTResult.Printf(VALUE_STORED, req.Key, req.Value)\n\tw.WriteHeader(http.StatusCreated)\n}", "func (a *Adapter) SavePolicy(model model.Model) error {\n\n\ta.dropTable()\n\ta.createTable()\n\n\tfor ptype, ast := range model[\"p\"] {\n\t\tfor _, rule := range ast.Policy {\n\t\t\tline := savePolicyLine(ptype, rule)\n\t\t\t_, err := a.db.Exec(sqlInsertPolicy, line.PType, line.V0, line.V1, line.V2, line.V3, line.V4, line.V5)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tfor ptype, ast := range model[\"g\"] {\n\t\tfor _, rule := range ast.Policy {\n\t\t\tline := savePolicyLine(ptype, rule)\n\t\t\t_, err := a.db.Exec(sqlInsertPolicy, line.PType, line.V0, line.V1, line.V2, line.V3, line.V4, line.V5)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func SaveQueryTextSearch(q *QueryTextSearch) {\n\tsession, err := mgo.Dial(\"mongodb://localhost\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer session.Close()\n\n\tcollQueriesTextSearch := common.GetCollection(session, \"queries.textsearch\")\n\n\terr = collQueriesTextSearch.Insert(q)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}", "func withDB(s *mgo.Session, h http.Handler) http.Handler {\n\treturn &dbwrapper{dbSession: s, h: h}\n}", "func (analytics *Analytics) Save() {\n\tDB.Set(\"Analytics\", analytics.UserID, analytics)\n}", "func (driver *SQLDriver) Save(paste *pastes.Paste) error {\n\t// Execute an INSERT statement to create the paste\n\t_, err := driver.database.Exec(\"INSERT INTO ? (?, ?, ?, ?, ?, ?)\", driver.table, paste.ID, paste.Content, paste.SuggestedSyntaxType, paste.DeletionToken, paste.Created, paste.AutoDelete)\n\treturn err\n}", "func (db *wrapDB) afterCallback(scope *gorm.Scope) {\n\tv, ok := scope.Get(gormCtx)\n\tif !ok {\n\t\treturn\n\t}\n\tctx := v.(context.Context)\n\n\tspan := opentracing.SpanFromContext(ctx)\n\tif span != nil {\n\t\tmethod := strings.Split(strings.TrimSpace(scope.SQL), \" \")[0]\n\n\t\t// scope.SQL is only exist after query is executed.\n\t\text.DBStatement.Set(span, strings.ToUpper(scope.SQL))\n\t\tspan.SetTag(\"db.table\", scope.TableName())\n\t\tspan.SetTag(\"db.method\", method)\n\n\t\t// if exec is raised an error\n\t\tif scope.DB().Error != nil {\n\t\t\text.Error.Set(span, true)\n\t\t\tspan.LogFields(log.Error(scope.DB().Error))\n\t\t}\n\t\tspan.SetTag(\"db.rows_affected\", scope.DB().RowsAffected)\n\t\tspan.Finish()\n\t}\n}", "func (s *BasePlSqlParserListener) EnterMaximize_standby_db_clause(ctx *Maximize_standby_db_clauseContext) {\n}", "func (this *DBHandler) StoreRun(run *Run) {\n\ttx, err := this.db.Begin()\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\tstmt, err := tx.Prepare(\"insert into runs(id_wpa, id_wordlist, result, time, started, session, status) values(?, ?, ?, ?, ?, ?, ?)\")\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\tdefer stmt.Close()\n\t_, err = stmt.Exec(run.id_wpa, run.id_wordlist, \"\", 0, run.started, run.session, RUNSTATUS_NOTSTARTED)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\ttx.Commit()\n}", "func ADDB(imr, amr operand.Op) { ctx.ADDB(imr, amr) }", "func writeToDB(exchange,queue,bindingKey,msg,errInfo string) {\n\t//fmt.Println(\"in writeToDB(),111\")\n\tstmt, err := SqlDB.Prepare(\"insert into failed_message(exchange,queue,binding_key,message,error) values(?,?,?,?,?)\")\n\tFailOnError(err, \"prepare error\")\n\t_,err = stmt.Exec(exchange, queue, bindingKey, msg, errInfo)\n\tFailOnError(err, \"insert error\")\n}", "func Save(db DB, table string, src interface{}) error {\n\treturn SaveContext(context.Background(), db, table, src)\n}", "func (f *freeClientPool) saveToDb() {\n\tnow := f.clock.Now()\n\tstorage := freeClientPoolStorage{\n\t\tLogOffset: uint64(f.logOffset(now)),\n\t\tList: make([]*freeClientPoolEntry, len(f.addressMap)),\n\t}\n\ti := 0\n\tfor _, e := range f.addressMap {\n\t\tif e.connected {\n\t\t\tf.calcLogUsage(e, now)\n\t\t}\n\t\tstorage.List[i] = e\n\t\ti++\n\t}\n\tenc, err := rlp.EncodeToBytes(storage)\n\tif err != nil {\n\t\tlog.Error(\"Failed to encode client list\", \"err\", err)\n\t} else {\n\t\tf.db.Put([]byte(\"freeClientPool\"), enc)\n\t}\n}", "func (s *TattooStorage) Dump() {\n\ts.MetadataDB.SaveIndex()\n\ts.ArticleDB.SaveIndex()\n\ts.ArticleHTMLDB.SaveIndex()\n}", "func (da *DefaultAdvisor) advise(dbID int, sqlText, user, pass string) (string, string, error) {\n\tdsn, err := da.getOnlineDSN(dbID, user, pass)\n\tif err != nil {\n\t\treturn constant.EmptyString, constant.EmptyString, err\n\t}\n\n\tcommand := fmt.Sprintf(`%s -config=%s -online-dsn=%s -query=\"%s\"`, da.soarBin, da.configFile, dsn, sqlText)\n\n\tresult, err := linux.ExecuteCommand(command)\n\tif err != nil {\n\t\treturn constant.EmptyString, constant.EmptyString, err\n\t}\n\n\treturn da.parseResult(result)\n}", "func (db *DB) writeDB(mappings map[string]string) error {\n\tvar data []byte\n\tvar err error\n\tif len(mappings) > 0 {\n\t\tdata, err = yaml.Marshal(mappings)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\ttmpfile := db.DBPath + \".tmp\"\n\terr = ioutil.WriteFile(tmpfile, data, 0644)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Rename(tmpfile, db.DBPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (s *DataStore) write(query string, args ...interface{}) error {\n\ttx, stmt, err := s.writeOperation(query)\n\tif err != nil {\n\t\tlog.WithFields(log.Fields{\"sql\": query, \"error\": err}).Error(\"error in sql\")\n\t\treturn err\n\t}\n\tdefer tx.Commit()\n\tdefer stmt.Close()\n\n\treturn s.execute(stmt, args...)\n}", "func SaveQueryTextWordMatch(q *QueryTextWordMatch) {\n\tsession, err := mgo.Dial(\"mongodb://localhost\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer session.Close()\n\n\tcollQueriesTextWordMatch := common.GetCollection(session, \"queries.textwordmatch\")\n\n\terr = collQueriesTextWordMatch.Insert(q)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}", "func handleDBGeneratetokenized(w http.ResponseWriter, r *http.Request) {\n\tdefer func() {\n\t\tdb.Connection.Close(nil)\n\t}()\n var requestData modelito.RequestTokenized\n var errorGeneral string\n var errorGeneralNbr string\n \n errorGeneral=\"\"\n requestData,errorGeneral =obtainParmsGeneratetokenized(r,errorGeneral)\n\n\n\t////////////////////////////////////////////////validate parms\n\t/// START\n \n if errorGeneral==\"\" {\n\n\t\terrorGeneral,errorGeneralNbr= ProcessGeneratetokenized(w , requestData)\n\t}\n\n if errorGeneral!=\"\"{\n \t//send error response if any\n \t//prepare an error JSON Response, if any\n\t\tlog.Print(\"CZ STEP Get the ERROR response JSON ready\")\n\t\t\n\t\t\t/// START\n\t\tfieldDataBytesJson,err := getJsonResponseError(errorGeneral, errorGeneralNbr)\n\t\t////////// write the response (ERROR)\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tw.Write(fieldDataBytesJson)\t\n\t\tif(err!=nil){\n\t\t\t\n\t\t}\n\t\n } \n\t\t\t\t\t\n}", "func saveVote(w http.ResponseWriter, r *http.Request, app *app) error {\n\tvar vote Vote\n\tif err := json.NewDecoder(r.Body).Decode(&vote); err != nil {\n\t\treturn fmt.Errorf(\"JSON DECODE: %v\", err)\n\t}\n\n\t// [START cloud_sql_postgres_databasesql_connection]\n\tsqlInsert := \"INSERT INTO votes(artist_name,video_id,user_name,tag_id)VALUES($1, $2, $3, $4)\"\n\n\tif _, err := app.db.Exec(sqlInsert, vote.Artist, vote.VideoID, vote.UserName, vote.TagID); err != nil {\n\t\tfmt.Fprintf(w, \"unable to save vote: %s\", err)\n\t\treturn fmt.Errorf(\"DB.Exec: %v\", err)\n\t}\n\n\tfmt.Fprintf(w, \"Vote successfully cast for %s!\\n\", vote.Artist)\n\treturn nil\n\t// [END cloud_sql_postgres_databasesql_connection]\n}", "func (hr *HelpRequest) Save(db XODB) error {\n\tif hr.Exists() {\n\t\treturn hr.Update(db)\n\t}\n\n\treturn hr.Insert(db)\n}", "func (s *server) serveReqLog(w http.ResponseWriter, r *http.Request) {\n\trecievedAt := s.now().UTC()\n\tvar u url.URL\n\tu = *r.URL\n\tu.Host = r.Host\n\tu.Scheme = \"http\"\n\tif r.TLS != nil {\n\t\tu.Scheme = \"https\"\n\t}\n\turl := u.String()\n\tmethod := r.Method\n\tremote := r.RemoteAddr\n\theaders := fmt.Sprint(r.Header)\n\tlength := 0\n\tif r.Body != nil {\n\t\tb, err := ioutil.ReadAll(r.Body)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"serveReqLog: got error reading request body: %v\", err)\n\t\t}\n\t\tlength = len(b)\n\t}\n\tprotocol := r.Proto\n\n\t_, err := s.db.ExecContext(r.Context(),\n\t\t`insert into request (rat, url, method, remote, headers, length, protocol)\n\t\tvalues (?, ?, ?, ?, ?, ?, ?)`,\n\t\trecievedAt, url, method, remote, headers, length, protocol,\n\t)\n\tif err != nil {\n\t\tlog.Printf(\"serveReqLog: got error writing to DB: %v\", err)\n\t\tjsonError(w, fmt.Sprintf(\"internal error: %v\", err), 500)\n\t\treturn\n\t}\n}", "func (cxa *CasbinXormAdapter) SavePolicy(model model.Model) (err error) {\n\t//policy_definition\n\tfor ptype, ast := range model[\"p\"] {\n\t\tfor _, policy := range ast.Policy {\n\t\t\tif err = cxa.mergePolicy(\"p\", ptype, policy); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\t//role_definition\n\tfor ptype, ast := range model[\"g\"] {\n\t\tfor _, policy := range ast.Policy {\n\t\t\tif err = cxa.mergePolicy(\"g\", ptype, policy); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}", "func PostAutoMigrate(db *gorm.DB) error {\n\t// These types don't apply for sqlite -- just mysql.\n\tif db.Dialect().GetName() == mySQLDialect {\n\t\tdb.Model(&Invocation{}).ModifyColumn(\"pattern\", \"text\")\n\t\tdb.Model(&Execution{}).ModifyColumn(\"serialized_operation\", \"text\")\n\t}\n\treturn nil\n}", "func (xsml *XfileServiceMetricLog) Save(db XODB) error {\n\tif xsml.Exists() {\n\t\treturn xsml.Update(db)\n\t}\n\n\treturn xsml.Replace(db)\n}", "func (au *AntenatalinformationUpdate) SetAdvice(s string) *AntenatalinformationUpdate {\n\tau.mutation.SetAdvice(s)\n\treturn au\n}", "func withConfig(getDB func() *sql.DB, handle func(getDB func() *sql.DB, w http.ResponseWriter, r *http.Request, ps httprouter.Params)) httprouter.Handle {\n\treturn httprouter.Handle(func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\t\thandle(getDB, w, r, ps)\n\t},\n\t)\n}", "func (s *BasePlSqlParserListener) ExitSavepoint_statement(ctx *Savepoint_statementContext) {}", "func (s *State) Middleware(h middleware.Handler) middleware.Handler {\n\treturn func(c context.Context, rw http.ResponseWriter, r *http.Request, p httprouter.Params) {\n\t\tstate, settings := s.checkSettings(c)\n\t\th(c, rw, r, p)\n\t\tif settings.Enabled {\n\t\t\ts.flushIfNeeded(c, state, settings)\n\t\t}\n\t}\n}", "func (db *Database) SaveWriter(w io.Writer, passphrase string) error {\n\t// new random values\n\tsalt, err := utils.SecureRandBytes(saltLen)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tb1, err := utils.SecureRandBytes(b1Len)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tb2, err := utils.SecureRandBytes(b2Len)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tb3, err := utils.SecureRandBytes(b3Len)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tb4, err := utils.SecureRandBytes(b4Len)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tiv, err := utils.SecureRandBytes(ivLen)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// create a digest of all the record data.\n\thm := hmac.New(sha256.New, append(b3, b4...))\n\n\tvar records bytes.Buffer\n\tif err = appendFields(hm, &records, db.header.fields); err != nil {\n\t\treturn IOError.Wrap(err)\n\t}\n\tfor _, record := range db.records {\n\t\tif err = appendFields(hm, &records, record.fields); err != nil {\n\t\t\treturn IOError.Wrap(err)\n\t\t}\n\t}\n\n\t// generate encryption key\n\tpkey, phash := makeKey(passphrase, salt, hashIterations)\n\n\t// encrypt keys\n\tkey_cipher, err := twofish.NewCipher(pkey)\n\tif err != nil {\n\t\treturn Error.New(\"unable to create key cipher: %s\", err)\n\t}\n\n\t// encrypt records\n\trecords_cipher, err := twofish.NewCipher(append(b1, b2...))\n\tif err != nil {\n\t\treturn Error.New(\"unable to create records cipher: %s\", err)\n\t}\n\trecords_encrypter := cipher.NewCBCEncrypter(records_cipher, iv)\n\n\traw_records := records.Bytes()\n\trecords_encrypter.CryptBlocks(raw_records, raw_records)\n\n\tkey_cipher.Encrypt(b1, b1)\n\tkey_cipher.Encrypt(b2, b2)\n\tkey_cipher.Encrypt(b3, b3)\n\tkey_cipher.Encrypt(b4, b4)\n\n\t// write it all out\n\t_, err = w.Write([]byte(v3Tag))\n\tif err != nil {\n\t\treturn IOError.Wrap(err)\n\t}\n\n\t_, err = w.Write(salt)\n\tif err != nil {\n\t\treturn IOError.Wrap(err)\n\t}\n\n\terr = binary.Write(w, binary.LittleEndian, hashIterations)\n\tif err != nil {\n\t\treturn IOError.Wrap(err)\n\t}\n\n\t_, err = w.Write(phash)\n\tif err != nil {\n\t\treturn IOError.Wrap(err)\n\t}\n\n\t_, err = w.Write(b1)\n\tif err != nil {\n\t\treturn IOError.Wrap(err)\n\t}\n\n\t_, err = w.Write(b2)\n\tif err != nil {\n\t\treturn IOError.Wrap(err)\n\t}\n\n\t_, err = w.Write(b3)\n\tif err != nil {\n\t\treturn IOError.Wrap(err)\n\t}\n\n\t_, err = w.Write(b4)\n\tif err != nil {\n\t\treturn IOError.Wrap(err)\n\t}\n\n\t_, err = w.Write(iv)\n\tif err != nil {\n\t\treturn IOError.Wrap(err)\n\t}\n\n\t_, err = w.Write(raw_records)\n\tif err != nil {\n\t\treturn IOError.Wrap(err)\n\t}\n\n\t_, err = w.Write([]byte(v3EOF))\n\tif err != nil {\n\t\treturn IOError.Wrap(err)\n\t}\n\n\t_, err = w.Write(hm.Sum(nil))\n\tif err != nil {\n\t\treturn IOError.Wrap(err)\n\t}\n\n\treturn nil\n}", "func init() {\n\taspect.RegisterDialect(\"postgres\", &PostGres{})\n}", "func (s *BasePlSqlParserListener) EnterSql_statement_shortcut(ctx *Sql_statement_shortcutContext) {}", "func (a *Adapter) SavePolicy(model model.Model) error {\n\treturn errors.New(\"not implemented\")\n}", "func SaveIndex(target string, source QueryList, verbose bool) {\n\tlogm(\"INFO\", fmt.Sprintf(\"saving index to %s...\", target), verbose)\n\tfile, err := os.Create(target)\n\tcheckResult(err)\n\tdefer file.Close()\n\n\tgr := gzip.NewWriter(file)\n\tdefer gr.Close()\n\n\tencoder := gob.NewEncoder(gr)\n\n\terr = encoder.Encode(source.Names)\n\tcheckResult(err)\n\tlogm(\"INFO\", fmt.Sprintf(\"%v sequence names saved\", len(source.Names)), verbose)\n\n\terr = encoder.Encode(source.SeedSize)\n\tcheckResult(err)\n\n\terr = encoder.Encode(source.Cgst)\n\tcheckResult(err)\n\n\t// save the index, but go has a size limit\n\tindexSize := len(source.Index)\n\terr = encoder.Encode(indexSize)\n\tcheckResult(err)\n\tlogm(\"INFO\", fmt.Sprintf(\"%v queries to save...\", indexSize), verbose)\n\n\tcount := 0\n\tfor key, value := range source.Index {\n\t\terr = encoder.Encode(key)\n\t\tcheckResult(err)\n\t\terr = encoder.Encode(value)\n\t\tcheckResult(err)\n\t\tcount++\n\t\tif count%10000 == 0 {\n\t\t\tlogm(\"INFO\", fmt.Sprintf(\"processing: saved %v items\", count), false)\n\t\t}\n\t}\n\n\tlogm(\"INFO\", fmt.Sprintf(\"saving index to %s: done\", target), verbose)\n}", "func (mu *MannerUpdate) Save(ctx context.Context) (int, error) {\n\tif v, ok := mu.mutation.MannerName(); ok {\n\t\tif err := manner.MannerNameValidator(v); err != nil {\n\t\t\treturn 0, &ValidationError{Name: \"Manner_Name\", err: fmt.Errorf(\"ent: validator failed for field \\\"Manner_Name\\\": %w\", err)}\n\t\t}\n\t}\n\n\tvar (\n\t\terr error\n\t\taffected int\n\t)\n\tif len(mu.hooks) == 0 {\n\t\taffected, err = mu.sqlSave(ctx)\n\t} else {\n\t\tvar mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {\n\t\t\tmutation, ok := m.(*MannerMutation)\n\t\t\tif !ok {\n\t\t\t\treturn nil, fmt.Errorf(\"unexpected mutation type %T\", m)\n\t\t\t}\n\t\t\tmu.mutation = mutation\n\t\t\taffected, err = mu.sqlSave(ctx)\n\t\t\tmutation.done = true\n\t\t\treturn affected, err\n\t\t})\n\t\tfor i := len(mu.hooks) - 1; i >= 0; i-- {\n\t\t\tmut = mu.hooks[i](mut)\n\t\t}\n\t\tif _, err := mut.Mutate(ctx, mu.mutation); err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t}\n\treturn affected, err\n}", "func (ds *DatabaseSession) AttachMiddleware() Middleware {\n\treturn func(next http.HandlerFunc) http.HandlerFunc {\n\t\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\t\tconnection := session.session.Clone()\n\n\t\t\tctx := context.WithValue(r.Context(), sessionKey, connection)\n\n\t\t\tgo func() {\n\t\t\t\tselect {\n\t\t\t\tcase <-ctx.Done():\n\t\t\t\t\tconnection.Close()\n\t\t\t\t}\n\t\t\t}()\n\n\t\t\tnext.ServeHTTP(w, r.WithContext(ctx))\n\t\t}\n\t}\n}", "func (auo *AntenatalinformationUpdateOne) SetAdvice(s string) *AntenatalinformationUpdateOne {\n\tauo.mutation.SetAdvice(s)\n\treturn auo\n}", "func convertDBtoSQL(fileName string) {\n\tdb, err := LoadDBFile(fileName)\n\tif err != nil {\n panic(err)\n }\n defer db.Close()\n\t\n\ttailHeight := lastBlockHeight(db)\n\tdata := convert(db, tailHeight)\n\n\t//fmt.Println(data)\n\n\t// //create new file and write data in the file\n\tfile, err := os.Create(\"dappleyweb.sql\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer file.Close()\n\n\t_, err = file.Write([]byte(data))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}", "func (db *MemoryStorage) EnableBatch() {\n}", "func (requestHandler *RequestHandler) handler(request events.APIGatewayProxyRequest) {\n\t//Initialize DB if requestHandler.Db = nil\n\tif errResponse := requestHandler.InitializeDB(); errResponse != (structs.ErrorResponse{}) {\n\t\tlog.Fatalf(\"Could not connect to DB when creating AOD/AODICE/QOD/QODICE\")\n\t}\n\tyear, month, day := time.Now().Date()\n\ttoday := fmt.Sprintf(\"%d-%d-%d\", year, month, day)\n\n\tvar wg sync.WaitGroup\n\twg.Add(5)\n\tgo func() { defer wg.Done(); requestHandler.insertEnglishQOD(today) }()\n\tgo func() { defer wg.Done(); requestHandler.insertIcelandicQOD(today) }()\n\tgo func() { defer wg.Done(); requestHandler.insertEnglishAOD(today) }()\n\tgo func() { defer wg.Done(); requestHandler.insertIcelandicAOD(today) }()\n\tgo func() { defer wg.Done(); requestHandler.insertTopicsQOD(today) }()\n\twg.Wait()\n}", "func (dao *SensitiveWordSqlDao) Create(word *SensitiveWord, sqlLike SqlLike) (rowID int64, err error) {\n\t// insert to sensitive word table\n\tif word == nil {\n\t\treturn\n\t}\n\tinsertStr, values := getSensitiveWordInsertSQL([]SensitiveWord{*word})\n\n\tresult, err := sqlLike.Exec(insertStr, values...)\n\tif err != nil {\n\t\tlogger.Error.Printf(\"insert sensitive word failed, sql: %s\\n\", insertStr)\n\t\tlogger.Error.Printf(\"values: %+v\\n\", values)\n\t\terr = fmt.Errorf(\"insert sensitive word failed in dao.Create, err: %s\", err.Error())\n\t\treturn\n\t}\n\n\t// get row id\n\trowID, err = result.LastInsertId()\n\tif err != nil {\n\t\terr = fmt.Errorf(\"get row id failed in dao.Create, err: %s\", err.Error())\n\t\tlogger.Error.Printf(err.Error())\n\t\treturn\n\t}\n\tword.ID = rowID\n\n\t// insert relation\n\tinsertStr, values = getSensitiveWordRelationInsertSQL([]SensitiveWord{*word})\n\tif len(values) > 0 {\n\t\t_, err = sqlLike.Exec(insertStr, values...)\n\t\tif err != nil {\n\t\t\tlogger.Error.Printf(\"insert sensitive word relation, sql: %s\\n\", insertStr)\n\t\t\tlogger.Error.Printf(\"values: %+v\\n\", values)\n\t\t\terr = fmt.Errorf(\"insert sensitive word sentence relation in dao.Create, err: %s\", err.Error())\n\t\t}\n\t}\n\n\t// update redis\n\tnames, ierr := dao.Names(sqlLike, true)\n\tif ierr != nil {\n\t\tlogger.Error.Printf(\"get sensitive names failed, err: %s\", ierr.Error())\n\t\treturn\n\t}\n\n\tif !general.IsNil(dao.Redis) {\n\t\tierr = dao.Redis.Do(radix.Cmd(nil, \"DEL\", redisKey))\n\t\tif ierr != nil {\n\t\t\tlogger.Error.Print(ierr)\n\t\t\treturn\n\t\t}\n\n\t\tcmds := append([]string{redisKey}, names...)\n\t\tierr = dao.Redis.Do(radix.Cmd(nil, \"LPUSH\", cmds...))\n\t\tif ierr != nil {\n\t\t\tlogger.Error.Print(ierr)\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}", "func (su *SkillUpdate) Exec(ctx context.Context) error {\n\t_, err := su.Save(ctx)\n\treturn err\n}", "func (s *SessionStore) Save(r *http.Request, w http.ResponseWriter) {\n\ts.session.Save(r, w)\n}", "func (d *Driver) Save(\n\tctx *sql.Context,\n\ti sql.Index,\n\titer sql.PartitionIndexKeyValueIter,\n) (err error) {\n\tstart := time.Now()\n\n\tidx, ok := i.(*pilosaIndex)\n\tif !ok {\n\t\treturn errInvalidIndexType.New(i)\n\t}\n\n\tif err := idx.index.Open(); err != nil {\n\t\treturn err\n\t}\n\tdefer idx.index.Close()\n\n\tidx.wg.Add(1)\n\tdefer idx.wg.Done()\n\n\tvar b = batch{\n\t\tfields: make([]*pilosa.Field, len(idx.Expressions())),\n\t\tbitBatches: make([]*bitBatch, len(idx.Expressions())),\n\t}\n\n\tctx.Context, idx.cancel = context.WithCancel(ctx.Context)\n\tprocessingFile := d.processingFilePath(i.Database(), i.Table(), i.ID())\n\tif err := index.WriteProcessingFile(\n\t\tprocessingFile,\n\t\t[]byte{processingFileOnSave},\n\t); err != nil {\n\t\treturn err\n\t}\n\n\tpilosaIndex := idx.index\n\tvar rows uint64\n\tfor {\n\t\tp, kviter, err := iter.Next()\n\t\tif err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\n\t\tnumRows, err := d.savePartition(ctx, p, kviter, idx, pilosaIndex, rows, &b)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\trows += numRows\n\t}\n\n\tlogrus.WithFields(logrus.Fields{\n\t\t\"duration\": time.Since(start),\n\t\t\"pilosa\": b.timePilosa,\n\t\t\"mapping\": b.timeMapping,\n\t\t\"rows\": rows,\n\t\t\"id\": i.ID(),\n\t}).Debugf(\"finished pilosa indexing\")\n\n\treturn index.RemoveProcessingFile(processingFile)\n}", "func (s *Server) sqlHandler(w http.ResponseWriter, req *http.Request) {\n if(s.block) {\n time.Sleep(1000000* time.Second)\n }\n\n\tquery, err := ioutil.ReadAll(req.Body)\n\tif err != nil {\n\t\tlog.Printf(\"Couldn't read body: %s\", err)\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t}\n\n\tif s.leader != s.listen {\n\n\t\tcs, errLeader := transport.Encode(s.leader)\n\t\t\n\t\tif errLeader != nil {\n\t\t\thttp.Error(w, \"Only the primary can service queries, but this is a secondary\", http.StatusBadRequest)\t\n\t\t\tlog.Printf(\"Leader ain't present?: %s\", errLeader)\n\t\t\treturn\n\t\t}\n\n\t\t//_, errLeaderHealthCheck := s.client.SafeGet(cs, \"/healthcheck\") \n\n //if errLeaderHealthCheck != nil {\n // http.Error(w, \"Primary is down\", http.StatusBadRequest)\t\n // return\n //}\n\n\t\tbody, errLResp := s.client.SafePost(cs, \"/sql\", bytes.NewBufferString(string(query)))\n\t\tif errLResp != nil {\n s.block = true\n http.Error(w, \"Can't forward request to primary, gotta block now\", http.StatusBadRequest)\t\n return \n\t//\t log.Printf(\"Didn't get reply from leader: %s\", errLResp)\n\t\t}\n\n formatted := fmt.Sprintf(\"%s\", body)\n resp := []byte(formatted)\n\n\t\tw.Write(resp)\n\t\treturn\n\n\t} else {\n\n\t\tlog.Debugf(\"Primary Received query: %#v\", string(query))\n\t\tresp, err := s.execute(query)\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\t}\n\n\t\tw.Write(resp)\n\t\treturn\n\t}\n}", "func (application *Application) ApplyDatabase(c *web.C, h http.Handler) http.Handler {\n\tfn := func(w http.ResponseWriter, r *http.Request) {\n\t\tc.Env[\"DBSession\"] = application.DBSession\n\t\tc.Env[\"Config\"] = application.Configuration\n\n\t\th.ServeHTTP(w, r)\n\t}\n\treturn http.HandlerFunc(fn)\n}", "func (tu *TypetreatmentUpdate) Save(ctx context.Context) (int, error) {\n\tif v, ok := tu.mutation.Typetreatment(); ok {\n\t\tif err := typetreatment.TypetreatmentValidator(v); err != nil {\n\t\t\treturn 0, &ValidationError{Name: \"Typetreatment\", err: fmt.Errorf(\"ent: validator failed for field \\\"Typetreatment\\\": %w\", err)}\n\t\t}\n\t}\n\n\tvar (\n\t\terr error\n\t\taffected int\n\t)\n\tif len(tu.hooks) == 0 {\n\t\taffected, err = tu.sqlSave(ctx)\n\t} else {\n\t\tvar mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {\n\t\t\tmutation, ok := m.(*TypetreatmentMutation)\n\t\t\tif !ok {\n\t\t\t\treturn nil, fmt.Errorf(\"unexpected mutation type %T\", m)\n\t\t\t}\n\t\t\ttu.mutation = mutation\n\t\t\taffected, err = tu.sqlSave(ctx)\n\t\t\tmutation.done = true\n\t\t\treturn affected, err\n\t\t})\n\t\tfor i := len(tu.hooks) - 1; i >= 0; i-- {\n\t\t\tmut = tu.hooks[i](mut)\n\t\t}\n\t\tif _, err := mut.Mutate(ctx, tu.mutation); err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t}\n\treturn affected, err\n}", "func Madvise(b []byte, advice int) (err error) {\n\treturn\n}", "func (em *entityManager) Use(mw ...MiddlewareFunc) {\n\tem.mwStack = append(em.mwStack, mw...)\n}", "func SaveToDB(g *Game) error {\n\n db, err := GetDBConnection(g.databaseURL)\n if err != nil {\n return err\n }\n defer db.Close()\n\n for _, hero := range g.heroes {\n stmt, err := db.Prepare(\"INSERT INTO hero \" +\n \"(player_name, player_lastname, hero_name, email, twitter, hclass, hero_online, token, hero_level, race, title, ttl, xpos, ypos, \" +\n \" ring, amulet, charm, weapon, helm, tunic, gloves, shield, leggings, boots \" +\n \") \" +\n \"VALUES( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? ) \" +\n \"ON DUPLICATE KEY UPDATE \" +\n \"hero_online=VALUES(hero_online), hero_level=VALUES(hero_level), ttl=VALUES(ttl), xpos=VALUES(xpos), ypos=VALUES(ypos), \" +\n \"ring=VALUES(ring), amulet=VALUES(amulet), charm=VALUES(charm), weapon=VALUES(weapon), \" +\n \"helm=VALUES(helm), tunic=VALUES(tunic), gloves=VALUES(gloves), shield=VALUES(shield), \" +\n \"leggings=VALUES(leggings), boots=VALUES(boots);\")\n if err != nil {\n log.Error(err)\n }\n\n ttl := int(hero.nextLevelAt.Sub(time.Now()).Seconds())\n res, err := stmt.Exec(hero.FirstName, hero.LastName, hero.HeroName, hero.Email, hero.Twitter, hero.HeroClass, hero.Enabled, hero.token,\n hero.Level, hero.HeroRace, hero.HeroTitle, ttl, hero.Xpos, hero.Ypos,\n hero.Equipment.Ring, hero.Equipment.Amulet, hero.Equipment.Charm, hero.Equipment.Weapon, hero.Equipment.Helm, hero.Equipment.Tunic, hero.Equipment.Gloves, hero.Equipment.Shield, hero.Equipment.Leggings, hero.Equipment.Boots)\n if err != nil {\n log.Error(err)\n }\n\n lastID, err := res.LastInsertId()\n if err != nil {\n log.Error(err)\n } else {\n hero.id = lastID\n }\n }\n\n return nil\n}", "func Write(ctx *gin.Context) {\n\tbegin := time.Now()\n\tlog.Logger.WithFields(logrus.Fields{\n\t\tPath: WritePath,\n\t}).Info(\"receive request from prometheus\")\n\t//解析request\n\trequest := &prompb.WriteRequest{}\n\tif err := prometheus.Unmarshal(request, ctx.Request); err != nil {\n\t\tlog.Logger.WithError(err).WithFields(logrus.Fields{\n\t\t\tPath: WritePath,\n\t\t}).Info(\"unmarshal request error\")\n\t\tctx.AbortWithError(http.StatusInternalServerError, err)\n\t\treturn\n\t}\n\t//打印request相关信息\n\tlog.Logger.WithFields(logrus.Fields{\n\t\tPath: WritePath,\n\t}).Debug(\"request is \" + request.String())\n\tlog.Logger.Info(\"len of timeSeries is \" + strconv.Itoa(len(request.Timeseries)))\n\t//存储数据\n\tif err := Storage.Write(request.Timeseries); err != nil {\n\t\tlog.Logger.WithError(err).WithFields(logrus.Fields{\n\t\t\tPath: WritePath,\n\t\t}).Error(\"write error\")\n\t\tctx.AbortWithError(http.StatusInternalServerError, err)\n\t\treturn\n\t}\n\t//打印消费时间\n\tconsume := time.Since(begin).Seconds()\n\tlog.Logger.WithFields(logrus.Fields{\n\t\tPath: WritePath,\n\t}).Info(\"consume time \" + strconv.FormatFloat(consume, 'f', 3, 64))\n}", "func Middleware(ce *casbin.Enforcer, sc DataSource) echo.MiddlewareFunc {\n\tc := DefaultConfig\n\tc.Enforcer = ce\n\tc.Source = sc\n\treturn MiddlewareWithConfig(c)\n}", "func (t *Title) Save() error {\n\tvar op = dbi.DB.Operation()\n\top.Dbg = dbi.Debug\n\treturn t.SaveOp(op)\n}", "func (s *BasePlSqlParserListener) EnterExplain_statement(ctx *Explain_statementContext) {}", "func (s store) Save() {\n\ts.writeToDisk()\n}", "func StorePostedSurvey(rw http.ResponseWriter, r *http.Request) {\n\n\tbody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tlog.Printf(`event=\"Failed to read posted data\" error=\"%v\"`, err)\n\t\tapi.WriteProblemResponse(api.Problem{\n\t\t\tTitle: \"Failed to read posted data\",\n\t\t\tStatus: http.StatusBadRequest,\n\t\t}, rw)\n\t\treturn\n\t}\n\n\tvar survey Survey\n\tif err = json.Unmarshal(body, &survey); err != nil {\n\t\tlog.Printf(`event=\"Failed to parse posted data\" error=\"%v\"`, err)\n\t\tapi.WriteProblemResponse(api.Problem{\n\t\t\tTitle: \"Failed to parse posted data\",\n\t\t\tStatus: http.StatusBadRequest,\n\t\t}, rw)\n\t\treturn\n\t}\n\n\tlog.Printf(`event=\"Would be attempting to store survey\" tx_id=\"%s\"`, survey.TxID)\n\n\trw.WriteHeader(http.StatusOK)\n}", "func LogMiddleware(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(responseWriter http.ResponseWriter, request *http.Request) {\n\t\tTemplateInput := getNewTemplateInput(responseWriter, request)\n\t\tlogging.WriteLog(logging.LogLevelVerbose, \"loggingmiddleware/LogMiddleware\", TemplateInput.UserInformation.GetCompositeID(), logging.ResultInfo, []string{request.RequestURI})\n\t\t//Save template input to context\n\t\trequest = request.WithContext(context.WithValue(request.Context(), TemplateInputKeyID, TemplateInput))\n\t\tnext.ServeHTTP(responseWriter, request) // call ServeHTTP on the original handler\n\t})\n}", "func (hu *HeartbeatUpdate) Save(ctx context.Context) (int, error) {\n\tvar (\n\t\terr error\n\t\taffected int\n\t)\n\tif len(hu.hooks) == 0 {\n\t\tif err = hu.check(); err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\taffected, err = hu.sqlSave(ctx)\n\t} else {\n\t\tvar mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {\n\t\t\tmutation, ok := m.(*HeartbeatMutation)\n\t\t\tif !ok {\n\t\t\t\treturn nil, fmt.Errorf(\"unexpected mutation type %T\", m)\n\t\t\t}\n\t\t\tif err = hu.check(); err != nil {\n\t\t\t\treturn 0, err\n\t\t\t}\n\t\t\thu.mutation = mutation\n\t\t\taffected, err = hu.sqlSave(ctx)\n\t\t\tmutation.done = true\n\t\t\treturn affected, err\n\t\t})\n\t\tfor i := len(hu.hooks) - 1; i >= 0; i-- {\n\t\t\tmut = hu.hooks[i](mut)\n\t\t}\n\t\tif _, err := mut.Mutate(ctx, hu.mutation); err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t}\n\treturn affected, err\n}", "func ConfigureStatements(unpreparedStmts map[string]string) {\n // User Repositories.\n AddCreateUserRepositoryStatement(unpreparedStmts)\n AddGetAllUserRepositoriesStatements(unpreparedStmts)\n AddUpdateUserRepositoryStatement(unpreparedStmts)\n AddDeleteUserRepositoryStatement(unpreparedStmts)\n\n // Repositories\n AddCreateRepositoryStatement(unpreparedStmts)\n AddGetRepositoryStatement(unpreparedStmts)\n AddUpdateRepositoryStatement(unpreparedStmts)\n AddDeleteRepositoryStatement(unpreparedStmts)\n}", "func (ktu *KqiTargetUpdate) Save(ctx context.Context) (int, error) {\n\tvar (\n\t\terr error\n\t\taffected int\n\t)\n\tktu.defaults()\n\tif len(ktu.hooks) == 0 {\n\t\tif err = ktu.check(); err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\taffected, err = ktu.sqlSave(ctx)\n\t} else {\n\t\tvar mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {\n\t\t\tmutation, ok := m.(*KqiTargetMutation)\n\t\t\tif !ok {\n\t\t\t\treturn nil, fmt.Errorf(\"unexpected mutation type %T\", m)\n\t\t\t}\n\t\t\tif err = ktu.check(); err != nil {\n\t\t\t\treturn 0, err\n\t\t\t}\n\t\t\tktu.mutation = mutation\n\t\t\taffected, err = ktu.sqlSave(ctx)\n\t\t\tmutation.done = true\n\t\t\treturn affected, err\n\t\t})\n\t\tfor i := len(ktu.hooks) - 1; i >= 0; i-- {\n\t\t\tmut = ktu.hooks[i](mut)\n\t\t}\n\t\tif _, err := mut.Mutate(ctx, ktu.mutation); err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t}\n\treturn affected, err\n}", "func (w *Witness) setSTH(tx *sql.Tx, logID string, sth []byte) error {\n\tif _, err := tx.Exec(`INSERT OR REPLACE INTO sths (logID, sth) VALUES (?, ?)`, logID, sth); err != nil {\n\t\treturn fmt.Errorf(\"failed to update STH; %v\", err)\n\t}\n\treturn tx.Commit()\n}", "func (s *server) middleware(n httprouter.Handle) httprouter.Handle {\n\treturn func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\t\tnow := time.Now()\n\n\t\t// Set the Tarmac server response header\n\t\tw.Header().Set(\"Server\", \"tarmac\")\n\n\t\t// Log the basics\n\t\tlog.WithFields(logrus.Fields{\n\t\t\t\"method\": r.Method,\n\t\t\t\"remote-addr\": r.RemoteAddr,\n\t\t\t\"http-protocol\": r.Proto,\n\t\t\t\"headers\": r.Header,\n\t\t\t\"content-length\": r.ContentLength,\n\t\t}).Debugf(\"HTTP Request to %s\", r.URL)\n\n\t\t// Verify if PProf\n\t\tif isPProf.MatchString(r.URL.Path) && !cfg.GetBool(\"enable_pprof\") {\n\t\t\tlog.WithFields(logrus.Fields{\n\t\t\t\t\"method\": r.Method,\n\t\t\t\t\"remote-addr\": r.RemoteAddr,\n\t\t\t\t\"http-protocol\": r.Proto,\n\t\t\t\t\"headers\": r.Header,\n\t\t\t\t\"content-length\": r.ContentLength,\n\t\t\t}).Debugf(\"Request to PProf Address failed, PProf disabled\")\n\t\t\tw.WriteHeader(http.StatusForbidden)\n\n\t\t\tstats.srv.WithLabelValues(r.URL.Path).Observe(time.Since(now).Seconds())\n\t\t\treturn\n\t\t}\n\n\t\t// Call registered handler\n\t\tn(w, r, ps)\n\t\tstats.srv.WithLabelValues(r.URL.Path).Observe(time.Since(now).Seconds())\n\t}\n}", "func (tu *TimingUpdate) Save(ctx context.Context) (int, error) {\n\n\tvar (\n\t\terr error\n\t\taffected int\n\t)\n\tif len(tu.hooks) == 0 {\n\t\taffected, err = tu.sqlSave(ctx)\n\t} else {\n\t\tvar mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {\n\t\t\tmutation, ok := m.(*TimingMutation)\n\t\t\tif !ok {\n\t\t\t\treturn nil, fmt.Errorf(\"unexpected mutation type %T\", m)\n\t\t\t}\n\t\t\ttu.mutation = mutation\n\t\t\taffected, err = tu.sqlSave(ctx)\n\t\t\tmutation.done = true\n\t\t\treturn affected, err\n\t\t})\n\t\tfor i := len(tu.hooks) - 1; i >= 0; i-- {\n\t\t\tmut = tu.hooks[i](mut)\n\t\t}\n\t\tif _, err := mut.Mutate(ctx, tu.mutation); err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t}\n\treturn affected, err\n}", "func (c *Conn) Savepoint(name string) error {\n\treturn c.FastExec(Mprintf(\"SAVEPOINT %Q\", name))\n}", "func sqlMigration(db *IndexerDb, state *MigrationState, sqlLines []string) error {\n\tdb.accountingLock.Lock()\n\tdefer db.accountingLock.Unlock()\n\n\tnextState := *state\n\tnextState.NextMigration++\n\n\tf := func(ctx context.Context, tx *sql.Tx) error {\n\t\tdefer tx.Rollback()\n\n\t\tfor _, cmd := range sqlLines {\n\t\t\t_, err := tx.Exec(cmd)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\n\t\t\t\t\t\"migration %d exec cmd: \\\"%s\\\" err: %w\", state.NextMigration, cmd, err)\n\t\t\t}\n\t\t}\n\t\tmigrationStateJSON := encoding.EncodeJSON(nextState)\n\t\t_, err := tx.Exec(setMetastateUpsert, migrationMetastateKey, migrationStateJSON)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"migration %d exec metastate err: %w\", state.NextMigration, err)\n\t\t}\n\t\treturn tx.Commit()\n\t}\n\terr := db.txWithRetry(context.Background(), serializable, f)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"migration %d commit err: %w\", state.NextMigration, err)\n\t}\n\n\t*state = nextState\n\treturn nil\n}" ]
[ "0.531296", "0.50333303", "0.4953532", "0.48831928", "0.48323447", "0.47431743", "0.46773672", "0.4666421", "0.46413967", "0.460012", "0.45993534", "0.45916364", "0.45906082", "0.45842862", "0.45842552", "0.45771828", "0.4524724", "0.45106146", "0.45050916", "0.45030513", "0.44853088", "0.4447671", "0.44431067", "0.44356436", "0.44301268", "0.44279352", "0.44243166", "0.4422328", "0.4420635", "0.4407059", "0.43997893", "0.43961972", "0.43638173", "0.43367425", "0.43198815", "0.43153283", "0.43122658", "0.4292737", "0.42799515", "0.42752948", "0.42699817", "0.42682993", "0.42586547", "0.42580166", "0.42560786", "0.42518643", "0.4251573", "0.42476818", "0.4244661", "0.42370078", "0.42268297", "0.42239615", "0.42236054", "0.42208433", "0.42192358", "0.42158914", "0.42098483", "0.42069882", "0.41997963", "0.4197854", "0.41843837", "0.41834214", "0.41746807", "0.416752", "0.41535205", "0.4153212", "0.4147024", "0.41370076", "0.41363463", "0.41344097", "0.412962", "0.41294116", "0.41283983", "0.41176498", "0.41114268", "0.41109762", "0.41054845", "0.41036746", "0.409923", "0.4098957", "0.40968004", "0.40952018", "0.4092259", "0.4091882", "0.40902698", "0.4087087", "0.4083731", "0.40804428", "0.40741366", "0.40693218", "0.40681404", "0.40625829", "0.40612838", "0.40601626", "0.4059566", "0.4058642", "0.40582293", "0.40577608", "0.40532714", "0.40527213", "0.40525222" ]
0.0
-1
a,b integer values x 8/56 fixed point value
func incomplete(a, b, x int64) Fixed { // Iₓ(a,b) = (xᵃ*(1-x)ᵇ)/(a*B(a,b)) * (1/(1+(d₁/(1+(d₂/(1+...)))))) // (xᵃ*(1-x)ᵇ)/B(a,b) = exp(lgamma(a+b) - lgamma(a) - lgamma(b) + a*log(x) + b*log(1-x)) // d_{2m+1} = -(a+m)(a+b+m)x/((a+2m)(a+2m+1)) // d_{2m} = m(b-m)x/((a+2m-1)(a+2m)) if a > int64(1)<<30 || b > int64(1)<<30 { panic(ErrOverflow) } bt := fixed(0) if 0 < x && x < oneValue { bt = exp(addx(subx(lgamma(a+b), lgamma(a), lgamma(b)), alogx(x, a), alogx(oneValue-x, b))) } else if x < 0 || x > oneValue { panic(ErrOverflow) } bcfx := func() Fixed { if bt.iszero() { return bt } h := bcf(x, a, b) return div(mul(bt, h), fixed(a)) } if x > div(fixed(a+1), fixed(a+b+2)).fixed56() { // symmetry transform // 1 - bt/b*bcf(1-x,b,a) x, a, b = oneValue-x, b, a return sub(fixedOne, bcfx()) } return bcfx() }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func HexAxpy(c, b []float64, s float64, ci, bi int)", "func PMINSBm128int8(X1 []int8, X2 []int8)", "func HexadecAxpy(c, b []float64, s float64, ci, bi int)", "func Changebytetoint(b []byte) (x int64) {\n\tfor i, val := range b {\n\t\tif i == 0 {\n\t\t\tx = x + int64(val)\n\t\t} else {\n\t\t\tx = x + int64(2<<7*int64(i)*int64(val))\n\t\t}\n\t}\n\treturn\n}", "func MaxInt16x8(a, b Int16x8) Int16x8", "func f ( a int , a float64) int {\r\n\r\n}", "func PMAXSBm128int8(X1 []int8, X2 []int8)", "func (difficulty *Difficulty) internalSetPdiff(f float64) float64 {\n\tif f <= 1.0 {\n\t\tdifficulty.internalSetToUnity()\n\t\treturn 1.0\n\t}\n\tdifficulty.pdiff = f\n\n\tintPart := math.Trunc(f)\n\tfracPart := math.Trunc((f - intPart) * 10 * constScale)\n\n\tq := new(big.Int)\n\tr := new(big.Int)\n\n\tq.SetUint64(uint64(intPart))\n\tr.SetUint64(uint64(fracPart))\n\tq.Mul(&scale, q)\n\tq.Add(q, r)\n\n\tq.DivMod(&one, q, r) // can get divide by zero error\n\n\tq.Mul(&scale, q)\n\tq.Add(q, r)\n\tdifficulty.big.Set(q)\n\n\tbuffer := q.Bytes()\n\tfor i, b := range buffer {\n\t\tif 0 != 0x80&b {\n\t\t\te := uint32(len(buffer) - i + 1)\n\t\t\tu := e<<24 | uint32(b)<<8\n\t\t\tif i+1 < len(buffer) {\n\t\t\t\tu |= uint32(buffer[i+1])\n\t\t\t}\n\t\t\tif i+2 < len(buffer) && 0 != 0x80&buffer[i+2] {\n\t\t\t\tif 0 == 0x00ff000&(u+1) {\n\t\t\t\t\tu += 1\n\t\t\t\t}\n\t\t\t}\n\t\t\tdifficulty.bits = u\n\t\t\tbreak\n\t\t} else if 0 != b {\n\t\t\te := uint32(len(buffer) - i)\n\t\t\tu := e<<24 | uint32(b)<<16\n\t\t\tif i+1 < len(buffer) {\n\t\t\t\tu |= uint32(buffer[i+1]) << 8\n\t\t\t}\n\t\t\tif i+2 < len(buffer) {\n\t\t\t\tu |= uint32(buffer[i+2])\n\t\t\t}\n\t\t\tif i+3 < len(buffer) && 0 != 0x80&buffer[i+3] {\n\t\t\t\tif 0 == 0x00800000&(u+1) {\n\t\t\t\t\tu += 1\n\t\t\t\t}\n\t\t\t}\n\t\t\tdifficulty.bits = u\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn difficulty.pdiff\n}", "func A(l, r, y Num) Num {\n\tin, b := Concat(y, l, r), make([]byte, 64)\n\tnumb := cdiv(int(in.Domain()), 8)\n\tin.ToBlakeBytes(b)\n\tsum := blake3.Sum256(b[:numb])\n\treturn NewNum(new(big.Int).SetBytes(sum[:]), 256)\n}", "func New_point(x big.Int,y big.Int) (Point) {\n\t\n\n\t// if !(0 <= x && float64(x)<FIELD_MODULUS) || !(0 <= y && float64(y)< FIELD_MODULUS){\n\t// \treturn Point{value: val},errors.New(\"Invalid value\")\n\t// }\n\t// y_packed = y | ((x & 1) << 255)\n\ttemp_x := x\n\ttemp_y := y\n\ty_packed := new(big.Int).Or(&temp_y,new(big.Int).Mul(new(big.Int).And(&temp_x,big.NewInt(1)),new(big.Int).Exp(big.NewInt(2), big.NewInt(255), nil)))\n\tval := y_packed.Bytes()\n\tfor i, j := 0, len(val)-1; i < j; i, j = i+1, j-1 { // reversal of bytes for little endian rep.\n\t\tval[i], val[j] = val[j], val[i]\n\t}\n\tfor len(val)<32 {\n\t\tval = append(val,0)\n\t}\n\treturn Point{x,y,val}\n\t// TODO: Error Checking\n\t\n\t\n}", "func WideAxpy(c, b []float64, s float64, ci, bi int)", "func main(){\n\ta := 40\n\tb := float64(a)\n\tc := uint(b)\n\n\tfmt.Println(a,b,c)\n\n\tfmt.Println(i,f,u)\n\n}", "func fiat_p448_from_bytes(out1 *[8]uint64, arg1 *[56]uint8) {\n var x1 uint64 = (uint64((arg1[55])) << 48)\n var x2 uint64 = (uint64((arg1[54])) << 40)\n var x3 uint64 = (uint64((arg1[53])) << 32)\n var x4 uint64 = (uint64((arg1[52])) << 24)\n var x5 uint64 = (uint64((arg1[51])) << 16)\n var x6 uint64 = (uint64((arg1[50])) << 8)\n var x7 uint8 = (arg1[49])\n var x8 uint64 = (uint64((arg1[48])) << 48)\n var x9 uint64 = (uint64((arg1[47])) << 40)\n var x10 uint64 = (uint64((arg1[46])) << 32)\n var x11 uint64 = (uint64((arg1[45])) << 24)\n var x12 uint64 = (uint64((arg1[44])) << 16)\n var x13 uint64 = (uint64((arg1[43])) << 8)\n var x14 uint8 = (arg1[42])\n var x15 uint64 = (uint64((arg1[41])) << 48)\n var x16 uint64 = (uint64((arg1[40])) << 40)\n var x17 uint64 = (uint64((arg1[39])) << 32)\n var x18 uint64 = (uint64((arg1[38])) << 24)\n var x19 uint64 = (uint64((arg1[37])) << 16)\n var x20 uint64 = (uint64((arg1[36])) << 8)\n var x21 uint8 = (arg1[35])\n var x22 uint64 = (uint64((arg1[34])) << 48)\n var x23 uint64 = (uint64((arg1[33])) << 40)\n var x24 uint64 = (uint64((arg1[32])) << 32)\n var x25 uint64 = (uint64((arg1[31])) << 24)\n var x26 uint64 = (uint64((arg1[30])) << 16)\n var x27 uint64 = (uint64((arg1[29])) << 8)\n var x28 uint8 = (arg1[28])\n var x29 uint64 = (uint64((arg1[27])) << 48)\n var x30 uint64 = (uint64((arg1[26])) << 40)\n var x31 uint64 = (uint64((arg1[25])) << 32)\n var x32 uint64 = (uint64((arg1[24])) << 24)\n var x33 uint64 = (uint64((arg1[23])) << 16)\n var x34 uint64 = (uint64((arg1[22])) << 8)\n var x35 uint8 = (arg1[21])\n var x36 uint64 = (uint64((arg1[20])) << 48)\n var x37 uint64 = (uint64((arg1[19])) << 40)\n var x38 uint64 = (uint64((arg1[18])) << 32)\n var x39 uint64 = (uint64((arg1[17])) << 24)\n var x40 uint64 = (uint64((arg1[16])) << 16)\n var x41 uint64 = (uint64((arg1[15])) << 8)\n var x42 uint8 = (arg1[14])\n var x43 uint64 = (uint64((arg1[13])) << 48)\n var x44 uint64 = (uint64((arg1[12])) << 40)\n var x45 uint64 = (uint64((arg1[11])) << 32)\n var x46 uint64 = (uint64((arg1[10])) << 24)\n var x47 uint64 = (uint64((arg1[9])) << 16)\n var x48 uint64 = (uint64((arg1[8])) << 8)\n var x49 uint8 = (arg1[7])\n var x50 uint64 = (uint64((arg1[6])) << 48)\n var x51 uint64 = (uint64((arg1[5])) << 40)\n var x52 uint64 = (uint64((arg1[4])) << 32)\n var x53 uint64 = (uint64((arg1[3])) << 24)\n var x54 uint64 = (uint64((arg1[2])) << 16)\n var x55 uint64 = (uint64((arg1[1])) << 8)\n var x56 uint8 = (arg1[0])\n var x57 uint64 = (x55 + uint64(x56))\n var x58 uint64 = (x54 + x57)\n var x59 uint64 = (x53 + x58)\n var x60 uint64 = (x52 + x59)\n var x61 uint64 = (x51 + x60)\n var x62 uint64 = (x50 + x61)\n var x63 uint64 = (x48 + uint64(x49))\n var x64 uint64 = (x47 + x63)\n var x65 uint64 = (x46 + x64)\n var x66 uint64 = (x45 + x65)\n var x67 uint64 = (x44 + x66)\n var x68 uint64 = (x43 + x67)\n var x69 uint64 = (x41 + uint64(x42))\n var x70 uint64 = (x40 + x69)\n var x71 uint64 = (x39 + x70)\n var x72 uint64 = (x38 + x71)\n var x73 uint64 = (x37 + x72)\n var x74 uint64 = (x36 + x73)\n var x75 uint64 = (x34 + uint64(x35))\n var x76 uint64 = (x33 + x75)\n var x77 uint64 = (x32 + x76)\n var x78 uint64 = (x31 + x77)\n var x79 uint64 = (x30 + x78)\n var x80 uint64 = (x29 + x79)\n var x81 uint64 = (x27 + uint64(x28))\n var x82 uint64 = (x26 + x81)\n var x83 uint64 = (x25 + x82)\n var x84 uint64 = (x24 + x83)\n var x85 uint64 = (x23 + x84)\n var x86 uint64 = (x22 + x85)\n var x87 uint64 = (x20 + uint64(x21))\n var x88 uint64 = (x19 + x87)\n var x89 uint64 = (x18 + x88)\n var x90 uint64 = (x17 + x89)\n var x91 uint64 = (x16 + x90)\n var x92 uint64 = (x15 + x91)\n var x93 uint64 = (x13 + uint64(x14))\n var x94 uint64 = (x12 + x93)\n var x95 uint64 = (x11 + x94)\n var x96 uint64 = (x10 + x95)\n var x97 uint64 = (x9 + x96)\n var x98 uint64 = (x8 + x97)\n var x99 uint64 = (x6 + uint64(x7))\n var x100 uint64 = (x5 + x99)\n var x101 uint64 = (x4 + x100)\n var x102 uint64 = (x3 + x101)\n var x103 uint64 = (x2 + x102)\n var x104 uint64 = (x1 + x103)\n out1[0] = x62\n out1[1] = x68\n out1[2] = x74\n out1[3] = x80\n out1[4] = x86\n out1[5] = x92\n out1[6] = x98\n out1[7] = x104\n}", "func fiat_p448_sat_from_bytes(out1 *[8]uint64, arg1 *[56]uint8) {\n var x1 uint64 = (uint64((arg1[55])) << 56)\n var x2 uint64 = (uint64((arg1[54])) << 48)\n var x3 uint64 = (uint64((arg1[53])) << 40)\n var x4 uint64 = (uint64((arg1[52])) << 32)\n var x5 uint64 = (uint64((arg1[51])) << 24)\n var x6 uint64 = (uint64((arg1[50])) << 16)\n var x7 uint64 = (uint64((arg1[49])) << 8)\n var x8 uint8 = (arg1[48])\n var x9 uint64 = (uint64((arg1[47])) << 56)\n var x10 uint64 = (uint64((arg1[46])) << 48)\n var x11 uint64 = (uint64((arg1[45])) << 40)\n var x12 uint64 = (uint64((arg1[44])) << 32)\n var x13 uint64 = (uint64((arg1[43])) << 24)\n var x14 uint64 = (uint64((arg1[42])) << 16)\n var x15 uint64 = (uint64((arg1[41])) << 8)\n var x16 uint8 = (arg1[40])\n var x17 uint64 = (uint64((arg1[39])) << 56)\n var x18 uint64 = (uint64((arg1[38])) << 48)\n var x19 uint64 = (uint64((arg1[37])) << 40)\n var x20 uint64 = (uint64((arg1[36])) << 32)\n var x21 uint64 = (uint64((arg1[35])) << 24)\n var x22 uint64 = (uint64((arg1[34])) << 16)\n var x23 uint64 = (uint64((arg1[33])) << 8)\n var x24 uint8 = (arg1[32])\n var x25 uint64 = (uint64((arg1[31])) << 56)\n var x26 uint64 = (uint64((arg1[30])) << 48)\n var x27 uint64 = (uint64((arg1[29])) << 40)\n var x28 uint64 = (uint64((arg1[28])) << 32)\n var x29 uint64 = (uint64((arg1[27])) << 24)\n var x30 uint64 = (uint64((arg1[26])) << 16)\n var x31 uint64 = (uint64((arg1[25])) << 8)\n var x32 uint8 = (arg1[24])\n var x33 uint64 = (uint64((arg1[23])) << 56)\n var x34 uint64 = (uint64((arg1[22])) << 48)\n var x35 uint64 = (uint64((arg1[21])) << 40)\n var x36 uint64 = (uint64((arg1[20])) << 32)\n var x37 uint64 = (uint64((arg1[19])) << 24)\n var x38 uint64 = (uint64((arg1[18])) << 16)\n var x39 uint64 = (uint64((arg1[17])) << 8)\n var x40 uint8 = (arg1[16])\n var x41 uint64 = (uint64((arg1[15])) << 56)\n var x42 uint64 = (uint64((arg1[14])) << 48)\n var x43 uint64 = (uint64((arg1[13])) << 40)\n var x44 uint64 = (uint64((arg1[12])) << 32)\n var x45 uint64 = (uint64((arg1[11])) << 24)\n var x46 uint64 = (uint64((arg1[10])) << 16)\n var x47 uint64 = (uint64((arg1[9])) << 8)\n var x48 uint8 = (arg1[8])\n var x49 uint64 = (uint64((arg1[7])) << 56)\n var x50 uint64 = (uint64((arg1[6])) << 48)\n var x51 uint64 = (uint64((arg1[5])) << 40)\n var x52 uint64 = (uint64((arg1[4])) << 32)\n var x53 uint64 = (uint64((arg1[3])) << 24)\n var x54 uint64 = (uint64((arg1[2])) << 16)\n var x55 uint64 = (uint64((arg1[1])) << 8)\n var x56 uint8 = (arg1[0])\n var x57 uint64 = (x55 + uint64(x56))\n var x58 uint64 = (x54 + x57)\n var x59 uint64 = (x53 + x58)\n var x60 uint64 = (x52 + x59)\n var x61 uint64 = (x51 + x60)\n var x62 uint64 = (x50 + x61)\n var x63 uint64 = (x49 + x62)\n var x64 uint64 = (x47 + uint64(x48))\n var x65 uint64 = (x46 + x64)\n var x66 uint64 = (x45 + x65)\n var x67 uint64 = (x44 + x66)\n var x68 uint64 = (x43 + x67)\n var x69 uint64 = (x42 + x68)\n var x70 uint64 = (x41 + x69)\n var x71 uint64 = (x39 + uint64(x40))\n var x72 uint64 = (x38 + x71)\n var x73 uint64 = (x37 + x72)\n var x74 uint64 = (x36 + x73)\n var x75 uint64 = (x35 + x74)\n var x76 uint64 = (x34 + x75)\n var x77 uint64 = (x33 + x76)\n var x78 uint64 = (x31 + uint64(x32))\n var x79 uint64 = (x30 + x78)\n var x80 uint64 = (x29 + x79)\n var x81 uint64 = (x28 + x80)\n var x82 uint64 = (x27 + x81)\n var x83 uint64 = (x26 + x82)\n var x84 uint64 = (x25 + x83)\n var x85 uint64 = (x23 + uint64(x24))\n var x86 uint64 = (x22 + x85)\n var x87 uint64 = (x21 + x86)\n var x88 uint64 = (x20 + x87)\n var x89 uint64 = (x19 + x88)\n var x90 uint64 = (x18 + x89)\n var x91 uint64 = (x17 + x90)\n var x92 uint64 = (x15 + uint64(x16))\n var x93 uint64 = (x14 + x92)\n var x94 uint64 = (x13 + x93)\n var x95 uint64 = (x12 + x94)\n var x96 uint64 = (x11 + x95)\n var x97 uint64 = (x10 + x96)\n var x98 uint64 = (x9 + x97)\n var x99 uint64 = (x7 + uint64(x8))\n var x100 uint64 = (x6 + x99)\n var x101 uint64 = (x5 + x100)\n var x102 uint64 = (x4 + x101)\n var x103 uint64 = (x3 + x102)\n var x104 uint64 = (x2 + x103)\n var x105 uint64 = (x1 + x104)\n out1[0] = x63\n out1[1] = x70\n out1[2] = x77\n out1[3] = x84\n out1[4] = x91\n out1[5] = x98\n out1[6] = x105\n out1[7] = uint64(0x0)\n}", "func Minf(a, b float32) float32", "func Int8s(b0, b1 []int8) int {\n\td := 0\n\tfor i, x := range b0 {\n\t\td += Int8(x, b1[i])\n\t}\n\treturn d\n}", "func RtoB(r int) uint64", "func wrap2(a,b any,f func(float64,float64)float64)any{\n var t string = fmt.Sprintf(\"%T\", a)\n var t2 string = fmt.Sprintf(\"%T\", b)\n var b2 float64\n switch t2 {\n case \"int\": b2 = float64(b.(int))\n case \"int8\": b2 = float64(b.(int8))\n case \"int16\": b2 = float64(b.(int16))\n case \"int32\": b2 = float64(b.(int32))\n case \"int64\": b2 = float64(b.(int64))\n case \"uint\": b2 = float64(b.(uint))\n case \"uint8\": b2 = float64(b.(uint8))\n case \"uint16\": b2 = float64(b.(uint16))\n case \"uint32\": b2 = float64(b.(uint32))\n case \"uint64\": b2 = float64(b.(uint64))\n case \"float32\": b2 = float64(b.(float32))\n case \"float64\": b2 = b.(float64)\n default: fmt.Println(\"Invalid type\")\n }\n switch t {\n case \"int\": return f(float64(a.(int)),b2)\n case \"int8\": return f(float64(a.(int8)),b2)\n case \"int16\": return f(float64(a.(int16)),b2)\n case \"int32\": return f(float64(a.(int32)),b2)\n case \"int64\": return f(float64(a.(int64)),b2)\n case \"uint\": return f(float64(a.(uint)),b2)\n case \"uint8\": return f(float64(a.(uint8)),b2)\n case \"uint16\": return f(float64(a.(uint16)),b2)\n case \"uint32\": return f(float64(a.(uint32)),b2)\n case \"uint64\": return f(float64(a.(uint64)),b2)\n case \"float32\": return f(float64(a.(float32)),b2)\n case \"float64\": return f(a.(float64),b2)\n default: fmt.Println(\"Invalid type\")\n }\n return nil\n}", "func main() {\n\tnum := \"0.00035485293981043127901785714285714285714285714285714285714285714285714285714285714285714285714285714285714285714285714285714285714\"\n\tsteps := 5120\n\tdelta := \"0.00000000000000000001089913504464285714285714285714285714285714285714285714285714285714285714285714285714285714285714286\"\n\tfinal := \"0.0003548529398104870825892857142857142857142857142857142857142857142857142857142857142857142857142857143\"\n\tfinalNeg := \"-0.0003548529398104870825892857142857142857142857142857142857142857142857142857142857142857142857142857143\"\n\n\tbits := uint(96)\n\n\t{ // big float\n\t\tx, _, err := big.ParseFloat(num, 10, bits, big.ToNearestEven)\n\t\tg.Die(err)\n\t\tdelta_, _, err := big.ParseFloat(delta, 10, bits, big.ToNearestEven)\n\t\tg.Die(err)\n\t\tfinal_, _, err := big.ParseFloat(final, 10, bits, big.ToNearestEven)\n\t\tg.Die(err)\n\n\t\tfor i := 0; i < steps; i++ {\n\t\t\tx.Add(x, delta_)\n\t\t}\n\t\tx.Sub(x, final_)\n\t\tfmt.Println(x.Text('f', int(bits)))\n\t}\n\n\t{ // fixnum\n\t\tx, err := naive_fixnum.FromString(num)\n\t\tg.Die(err)\n\t\tdelta_, err := naive_fixnum.FromString(delta)\n\t\tg.Die(err)\n\t\t//final_, err := fixnum.FromString(final)\n\t\t//g.Die(err)\n\t\tfinalNeg_, err := naive_fixnum.FromString(finalNeg)\n\t\tg.Die(err)\n\n\t\tfor i := 0; i < steps; i++ {\n\t\t\tx.Add(x, delta_)\n\t\t}\n\t\tx.Add(x, finalNeg_)\n\t\tfmt.Println(x)\n\t}\n}", "func FloatInt(x *big.Float, z *big.Int,) (*big.Int, big.Accuracy,)", "func sm2P256Square(b, a *sm2P256FieldElement) {\n\tvar tmp sm2P256LargeFieldElement\n\n\ttmp[0] = uint64(a[0]) * uint64(a[0])\n\ttmp[1] = uint64(a[0]) * (uint64(a[1]) << 1)\n\ttmp[2] = uint64(a[0])*(uint64(a[2])<<1) +\n\t\tuint64(a[1])*(uint64(a[1])<<1)\n\ttmp[3] = uint64(a[0])*(uint64(a[3])<<1) +\n\t\tuint64(a[1])*(uint64(a[2])<<1)\n\ttmp[4] = uint64(a[0])*(uint64(a[4])<<1) +\n\t\tuint64(a[1])*(uint64(a[3])<<2) +\n\t\tuint64(a[2])*uint64(a[2])\n\ttmp[5] = uint64(a[0])*(uint64(a[5])<<1) +\n\t\tuint64(a[1])*(uint64(a[4])<<1) +\n\t\tuint64(a[2])*(uint64(a[3])<<1)\n\ttmp[6] = uint64(a[0])*(uint64(a[6])<<1) +\n\t\tuint64(a[1])*(uint64(a[5])<<2) +\n\t\tuint64(a[2])*(uint64(a[4])<<1) +\n\t\tuint64(a[3])*(uint64(a[3])<<1)\n\ttmp[7] = uint64(a[0])*(uint64(a[7])<<1) +\n\t\tuint64(a[1])*(uint64(a[6])<<1) +\n\t\tuint64(a[2])*(uint64(a[5])<<1) +\n\t\tuint64(a[3])*(uint64(a[4])<<1)\n\t// tmp[8] has the greatest value of 2**61 + 2**60 + 2**61 + 2**60 + 2**60,\n\t// which is < 2**64 as required.\n\ttmp[8] = uint64(a[0])*(uint64(a[8])<<1) +\n\t\tuint64(a[1])*(uint64(a[7])<<2) +\n\t\tuint64(a[2])*(uint64(a[6])<<1) +\n\t\tuint64(a[3])*(uint64(a[5])<<2) +\n\t\tuint64(a[4])*uint64(a[4])\n\ttmp[9] = uint64(a[1])*(uint64(a[8])<<1) +\n\t\tuint64(a[2])*(uint64(a[7])<<1) +\n\t\tuint64(a[3])*(uint64(a[6])<<1) +\n\t\tuint64(a[4])*(uint64(a[5])<<1)\n\ttmp[10] = uint64(a[2])*(uint64(a[8])<<1) +\n\t\tuint64(a[3])*(uint64(a[7])<<2) +\n\t\tuint64(a[4])*(uint64(a[6])<<1) +\n\t\tuint64(a[5])*(uint64(a[5])<<1)\n\ttmp[11] = uint64(a[3])*(uint64(a[8])<<1) +\n\t\tuint64(a[4])*(uint64(a[7])<<1) +\n\t\tuint64(a[5])*(uint64(a[6])<<1)\n\ttmp[12] = uint64(a[4])*(uint64(a[8])<<1) +\n\t\tuint64(a[5])*(uint64(a[7])<<2) +\n\t\tuint64(a[6])*uint64(a[6])\n\ttmp[13] = uint64(a[5])*(uint64(a[8])<<1) +\n\t\tuint64(a[6])*(uint64(a[7])<<1)\n\ttmp[14] = uint64(a[6])*(uint64(a[8])<<1) +\n\t\tuint64(a[7])*(uint64(a[7])<<1)\n\ttmp[15] = uint64(a[7]) * (uint64(a[8]) << 1)\n\ttmp[16] = uint64(a[8]) * uint64(a[8])\n\tsm2P256ReduceDegree(b, &tmp)\n}", "func main() {\n\n\ta := \"3141592653589793238462643383279502884197169399375105820974944592\"\n\tb := \"2718281828459045235360287471352662497757247093699959574966967627\"\n\n\tx := make([]int, len(a))\n\ty := make([]int, len(b))\n\tfor i, v := range a {\n\t\tx[i], _ = strconv.Atoi(string(v))\n\t}\n\tfor i, v := range b {\n\t\ty[i], _ = strconv.Atoi(string(v))\n\t}\n\n\tout := multiplication(x, y)\n\tvar str string\n\t//remove padding zero\n\tfor _, v := range removePaddingZero(out) {\n\t\tstr += fmt.Sprint(v)\n\t}\n\n\tfmt.Println(str)\n\t//output: 8539734222673567065463550869546574495034888535765114961879601127067743044893204848617875072216249073013374895871952806582723184\n\n}", "func float642Uints(val float64) (bool, int, uint64, uint64) {\n\t//dst := make([]byte, 0, 24)\n\tvar bits uint64\n\tvar flt *floatInfo\n\tbits = math.Float64bits(val)\n\tflt = &float64info\n\n\tneg := bits>>(flt.expbits+flt.mantbits) != 0\n\texp := int(bits>>flt.mantbits) & (1<<flt.expbits - 1)\n\tmant := bits & (uint64(1)<<flt.mantbits - 1)\n\n\tswitch exp {\n\tcase 1<<flt.expbits - 1:\n\t\treturn neg, 0, 0, 0\n\tcase 0:\n\t\t// denormalized\n\t\texp++\n\tdefault:\n\t\t// add implicit top bit\n\t\tmant |= uint64(1) << flt.mantbits\n\t}\n\n\texp += flt.bias\n\n\tvar prec int\n\tvar digs decimalSlice\n\tok := false\n\t// Try Grisu3 algorithm.\n\tf := new(extFloat)\n\tlower, upper := f.AssignComputeBounds(mant, exp, neg, flt)\n\tvar buf [32]byte\n\tdigs.d = buf[:]\n\tok = f.ShortestDecimal(&digs, &lower, &upper)\n\tif !ok {\n\t\td := new(decimal)\n\t\td.Assign(mant)\n\t\td.Shift(exp - int(flt.mantbits))\n\t\tvar digs decimalSlice\n\t\troundShortest(d, mant, exp, flt)\n\t\tdigs = decimalSlice{d: d.d[:], nd: d.nd, dp: d.dp}\n\t\t// Precision for shortest representation mode.\n\t\tprec = max(digs.nd-digs.dp, 0)\n\t} else {\n\t\tprec = max(digs.nd-digs.dp, 0)\n\t}\n\t//\n\tvar integer, fraction uint64\n\n\t// integer, padded with zeros as needed.\n\tif digs.dp > 0 {\n\t\tm := min(digs.nd, digs.dp)\n\t\tfor i := 0; i < m; i++ {\n\t\t\tinteger *= 10\n\t\t\tinteger += uint64(digs.d[i]-'0')\n\t\t}\n\t\tfor ; m < digs.dp; m++ {\n\t\t\tinteger *= 10\n\t\t}\n\t}\n\n\t// fraction\n\tif prec > 0 {\n\t\tfor i := 0; i < prec; i++ {\n\t\t\tch := uint64(0)\n\t\t\tif j := digs.dp + i; 0 <= j && j < digs.nd {\n\t\t\t\tch = uint64(digs.d[j]-'0')\n\t\t\t}\n\t\t\tfraction *= 10\n\t\t\tfraction += ch\n\t\t}\n\t}\n\n\treturn neg, prec, integer, fraction\n}", "func getUint64BE(b *[8]byte) uint64 {\n\tvar v uint64\n\tv += uint64(b[0]) << 56\n\tv += uint64(b[1]) << 48\n\tv += uint64(b[2]) << 40\n\tv += uint64(b[3]) << 32\n\tv += uint64(b[4]) << 24\n\tv += uint64(b[5]) << 16\n\tv += uint64(b[6]) << 8\n\tv += uint64(b[7])\n\treturn v\n}", "func FloatSetInt(z *big.Float, x *big.Int,) *big.Float", "func PMINSBm128byte(X1 []byte, X2 []byte)", "func Int8(a, b interface{}) int {\n\ti1, _ := a.(int8)\n\ti2, _ := b.(int8)\n\tswitch {\n\tcase i1 < i2:\n\t\treturn -1\n\tcase i1 > i2:\n\t\treturn 1\n\tdefault:\n\t\treturn 0\n\t}\n}", "func fiat_p448_carry_square(out1 *[8]uint64, arg1 *[8]uint64) {\n var x1 uint64 = (arg1[7])\n var x2 uint64 = (arg1[7])\n var x3 uint64 = (x1 * 0x2)\n var x4 uint64 = (x2 * 0x2)\n var x5 uint64 = ((arg1[7]) * 0x2)\n var x6 uint64 = (arg1[6])\n var x7 uint64 = (arg1[6])\n var x8 uint64 = (x6 * 0x2)\n var x9 uint64 = (x7 * 0x2)\n var x10 uint64 = ((arg1[6]) * 0x2)\n var x11 uint64 = (arg1[5])\n var x12 uint64 = (arg1[5])\n var x13 uint64 = (x11 * 0x2)\n var x14 uint64 = (x12 * 0x2)\n var x15 uint64 = ((arg1[5]) * 0x2)\n var x16 uint64 = (arg1[4])\n var x17 uint64 = (arg1[4])\n var x18 uint64 = ((arg1[4]) * 0x2)\n var x19 uint64 = ((arg1[3]) * 0x2)\n var x20 uint64 = ((arg1[2]) * 0x2)\n var x21 uint64 = ((arg1[1]) * 0x2)\n var x22 uint64\n var x23 uint64\n x23, x22 = bits.Mul64((arg1[7]), x1)\n var x24 uint64\n var x25 uint64\n x25, x24 = bits.Mul64((arg1[6]), x3)\n var x26 uint64\n var x27 uint64\n x27, x26 = bits.Mul64((arg1[6]), x6)\n var x28 uint64\n var x29 uint64\n x29, x28 = bits.Mul64((arg1[5]), x3)\n var x30 uint64\n var x31 uint64\n x31, x30 = bits.Mul64((arg1[7]), x1)\n var x32 uint64\n var x33 uint64\n x33, x32 = bits.Mul64((arg1[6]), x3)\n var x34 uint64\n var x35 uint64\n x35, x34 = bits.Mul64((arg1[6]), x6)\n var x36 uint64\n var x37 uint64\n x37, x36 = bits.Mul64((arg1[5]), x3)\n var x38 uint64\n var x39 uint64\n x39, x38 = bits.Mul64((arg1[7]), x2)\n var x40 uint64\n var x41 uint64\n x41, x40 = bits.Mul64((arg1[6]), x4)\n var x42 uint64\n var x43 uint64\n x43, x42 = bits.Mul64((arg1[6]), x7)\n var x44 uint64\n var x45 uint64\n x45, x44 = bits.Mul64((arg1[5]), x4)\n var x46 uint64\n var x47 uint64\n x47, x46 = bits.Mul64((arg1[5]), x9)\n var x48 uint64\n var x49 uint64\n x49, x48 = bits.Mul64((arg1[5]), x8)\n var x50 uint64\n var x51 uint64\n x51, x50 = bits.Mul64((arg1[5]), x12)\n var x52 uint64\n var x53 uint64\n x53, x52 = bits.Mul64((arg1[5]), x11)\n var x54 uint64\n var x55 uint64\n x55, x54 = bits.Mul64((arg1[4]), x4)\n var x56 uint64\n var x57 uint64\n x57, x56 = bits.Mul64((arg1[4]), x3)\n var x58 uint64\n var x59 uint64\n x59, x58 = bits.Mul64((arg1[4]), x9)\n var x60 uint64\n var x61 uint64\n x61, x60 = bits.Mul64((arg1[4]), x8)\n var x62 uint64\n var x63 uint64\n x63, x62 = bits.Mul64((arg1[4]), x14)\n var x64 uint64\n var x65 uint64\n x65, x64 = bits.Mul64((arg1[4]), x13)\n var x66 uint64\n var x67 uint64\n x67, x66 = bits.Mul64((arg1[4]), x17)\n var x68 uint64\n var x69 uint64\n x69, x68 = bits.Mul64((arg1[4]), x16)\n var x70 uint64\n var x71 uint64\n x71, x70 = bits.Mul64((arg1[3]), x4)\n var x72 uint64\n var x73 uint64\n x73, x72 = bits.Mul64((arg1[3]), x3)\n var x74 uint64\n var x75 uint64\n x75, x74 = bits.Mul64((arg1[3]), x9)\n var x76 uint64\n var x77 uint64\n x77, x76 = bits.Mul64((arg1[3]), x8)\n var x78 uint64\n var x79 uint64\n x79, x78 = bits.Mul64((arg1[3]), x14)\n var x80 uint64\n var x81 uint64\n x81, x80 = bits.Mul64((arg1[3]), x13)\n var x82 uint64\n var x83 uint64\n x83, x82 = bits.Mul64((arg1[3]), x18)\n var x84 uint64\n var x85 uint64\n x85, x84 = bits.Mul64((arg1[3]), (arg1[3]))\n var x86 uint64\n var x87 uint64\n x87, x86 = bits.Mul64((arg1[2]), x4)\n var x88 uint64\n var x89 uint64\n x89, x88 = bits.Mul64((arg1[2]), x3)\n var x90 uint64\n var x91 uint64\n x91, x90 = bits.Mul64((arg1[2]), x9)\n var x92 uint64\n var x93 uint64\n x93, x92 = bits.Mul64((arg1[2]), x8)\n var x94 uint64\n var x95 uint64\n x95, x94 = bits.Mul64((arg1[2]), x15)\n var x96 uint64\n var x97 uint64\n x97, x96 = bits.Mul64((arg1[2]), x18)\n var x98 uint64\n var x99 uint64\n x99, x98 = bits.Mul64((arg1[2]), x19)\n var x100 uint64\n var x101 uint64\n x101, x100 = bits.Mul64((arg1[2]), (arg1[2]))\n var x102 uint64\n var x103 uint64\n x103, x102 = bits.Mul64((arg1[1]), x4)\n var x104 uint64\n var x105 uint64\n x105, x104 = bits.Mul64((arg1[1]), x3)\n var x106 uint64\n var x107 uint64\n x107, x106 = bits.Mul64((arg1[1]), x10)\n var x108 uint64\n var x109 uint64\n x109, x108 = bits.Mul64((arg1[1]), x15)\n var x110 uint64\n var x111 uint64\n x111, x110 = bits.Mul64((arg1[1]), x18)\n var x112 uint64\n var x113 uint64\n x113, x112 = bits.Mul64((arg1[1]), x19)\n var x114 uint64\n var x115 uint64\n x115, x114 = bits.Mul64((arg1[1]), x20)\n var x116 uint64\n var x117 uint64\n x117, x116 = bits.Mul64((arg1[1]), (arg1[1]))\n var x118 uint64\n var x119 uint64\n x119, x118 = bits.Mul64((arg1[0]), x5)\n var x120 uint64\n var x121 uint64\n x121, x120 = bits.Mul64((arg1[0]), x10)\n var x122 uint64\n var x123 uint64\n x123, x122 = bits.Mul64((arg1[0]), x15)\n var x124 uint64\n var x125 uint64\n x125, x124 = bits.Mul64((arg1[0]), x18)\n var x126 uint64\n var x127 uint64\n x127, x126 = bits.Mul64((arg1[0]), x19)\n var x128 uint64\n var x129 uint64\n x129, x128 = bits.Mul64((arg1[0]), x20)\n var x130 uint64\n var x131 uint64\n x131, x130 = bits.Mul64((arg1[0]), x21)\n var x132 uint64\n var x133 uint64\n x133, x132 = bits.Mul64((arg1[0]), (arg1[0]))\n var x134 uint64\n var x135 fiat_p448_uint1\n x134, x135 = fiat_p448_addcarryx_u64(x54, x46, 0x0)\n var x136 uint64\n x136, _ = fiat_p448_addcarryx_u64(x55, x47, x135)\n var x138 uint64\n var x139 fiat_p448_uint1\n x138, x139 = fiat_p448_addcarryx_u64(x114, x134, 0x0)\n var x140 uint64\n x140, _ = fiat_p448_addcarryx_u64(x115, x136, x139)\n var x142 uint64\n var x143 fiat_p448_uint1\n x142, x143 = fiat_p448_addcarryx_u64(x126, x138, 0x0)\n var x144 uint64\n x144, _ = fiat_p448_addcarryx_u64(x127, x140, x143)\n var x146 uint64 = ((x142 >> 56) | ((x144 << 8) & 0xffffffffffffffff))\n var x147 uint64 = (x142 & 0xffffffffffffff)\n var x148 uint64\n var x149 fiat_p448_uint1\n x148, x149 = fiat_p448_addcarryx_u64(x56, x48, 0x0)\n var x150 uint64\n x150, _ = fiat_p448_addcarryx_u64(x57, x49, x149)\n var x152 uint64\n var x153 fiat_p448_uint1\n x152, x153 = fiat_p448_addcarryx_u64(x82, x148, 0x0)\n var x154 uint64\n x154, _ = fiat_p448_addcarryx_u64(x83, x150, x153)\n var x156 uint64\n var x157 fiat_p448_uint1\n x156, x157 = fiat_p448_addcarryx_u64(x94, x152, 0x0)\n var x158 uint64\n x158, _ = fiat_p448_addcarryx_u64(x95, x154, x157)\n var x160 uint64\n var x161 fiat_p448_uint1\n x160, x161 = fiat_p448_addcarryx_u64(x106, x156, 0x0)\n var x162 uint64\n x162, _ = fiat_p448_addcarryx_u64(x107, x158, x161)\n var x164 uint64\n var x165 fiat_p448_uint1\n x164, x165 = fiat_p448_addcarryx_u64(x118, x160, 0x0)\n var x166 uint64\n x166, _ = fiat_p448_addcarryx_u64(x119, x162, x165)\n var x168 uint64\n var x169 fiat_p448_uint1\n x168, x169 = fiat_p448_addcarryx_u64(x38, x30, 0x0)\n var x170 uint64\n x170, _ = fiat_p448_addcarryx_u64(x39, x31, x169)\n var x172 uint64\n var x173 fiat_p448_uint1\n x172, x173 = fiat_p448_addcarryx_u64(x52, x168, 0x0)\n var x174 uint64\n x174, _ = fiat_p448_addcarryx_u64(x53, x170, x173)\n var x176 uint64\n var x177 fiat_p448_uint1\n x176, x177 = fiat_p448_addcarryx_u64(x60, x172, 0x0)\n var x178 uint64\n x178, _ = fiat_p448_addcarryx_u64(x61, x174, x177)\n var x180 uint64\n var x181 fiat_p448_uint1\n x180, x181 = fiat_p448_addcarryx_u64(x72, x176, 0x0)\n var x182 uint64\n x182, _ = fiat_p448_addcarryx_u64(x73, x178, x181)\n var x184 uint64\n var x185 fiat_p448_uint1\n x184, x185 = fiat_p448_addcarryx_u64(x84, x180, 0x0)\n var x186 uint64\n x186, _ = fiat_p448_addcarryx_u64(x85, x182, x185)\n var x188 uint64\n var x189 fiat_p448_uint1\n x188, x189 = fiat_p448_addcarryx_u64(x96, x184, 0x0)\n var x190 uint64\n x190, _ = fiat_p448_addcarryx_u64(x97, x186, x189)\n var x192 uint64\n var x193 fiat_p448_uint1\n x192, x193 = fiat_p448_addcarryx_u64(x108, x188, 0x0)\n var x194 uint64\n x194, _ = fiat_p448_addcarryx_u64(x109, x190, x193)\n var x196 uint64\n var x197 fiat_p448_uint1\n x196, x197 = fiat_p448_addcarryx_u64(x120, x192, 0x0)\n var x198 uint64\n x198, _ = fiat_p448_addcarryx_u64(x121, x194, x197)\n var x200 uint64\n var x201 fiat_p448_uint1\n x200, x201 = fiat_p448_addcarryx_u64(x40, x32, 0x0)\n var x202 uint64\n x202, _ = fiat_p448_addcarryx_u64(x41, x33, x201)\n var x204 uint64\n var x205 fiat_p448_uint1\n x204, x205 = fiat_p448_addcarryx_u64(x64, x200, 0x0)\n var x206 uint64\n x206, _ = fiat_p448_addcarryx_u64(x65, x202, x205)\n var x208 uint64\n var x209 fiat_p448_uint1\n x208, x209 = fiat_p448_addcarryx_u64(x76, x204, 0x0)\n var x210 uint64\n x210, _ = fiat_p448_addcarryx_u64(x77, x206, x209)\n var x212 uint64\n var x213 fiat_p448_uint1\n x212, x213 = fiat_p448_addcarryx_u64(x88, x208, 0x0)\n var x214 uint64\n x214, _ = fiat_p448_addcarryx_u64(x89, x210, x213)\n var x216 uint64\n var x217 fiat_p448_uint1\n x216, x217 = fiat_p448_addcarryx_u64(x98, x212, 0x0)\n var x218 uint64\n x218, _ = fiat_p448_addcarryx_u64(x99, x214, x217)\n var x220 uint64\n var x221 fiat_p448_uint1\n x220, x221 = fiat_p448_addcarryx_u64(x110, x216, 0x0)\n var x222 uint64\n x222, _ = fiat_p448_addcarryx_u64(x111, x218, x221)\n var x224 uint64\n var x225 fiat_p448_uint1\n x224, x225 = fiat_p448_addcarryx_u64(x122, x220, 0x0)\n var x226 uint64\n x226, _ = fiat_p448_addcarryx_u64(x123, x222, x225)\n var x228 uint64\n var x229 fiat_p448_uint1\n x228, x229 = fiat_p448_addcarryx_u64(x36, x34, 0x0)\n var x230 uint64\n x230, _ = fiat_p448_addcarryx_u64(x37, x35, x229)\n var x232 uint64\n var x233 fiat_p448_uint1\n x232, x233 = fiat_p448_addcarryx_u64(x42, x228, 0x0)\n var x234 uint64\n x234, _ = fiat_p448_addcarryx_u64(x43, x230, x233)\n var x236 uint64\n var x237 fiat_p448_uint1\n x236, x237 = fiat_p448_addcarryx_u64(x44, x232, 0x0)\n var x238 uint64\n x238, _ = fiat_p448_addcarryx_u64(x45, x234, x237)\n var x240 uint64\n var x241 fiat_p448_uint1\n x240, x241 = fiat_p448_addcarryx_u64(x68, x236, 0x0)\n var x242 uint64\n x242, _ = fiat_p448_addcarryx_u64(x69, x238, x241)\n var x244 uint64\n var x245 fiat_p448_uint1\n x244, x245 = fiat_p448_addcarryx_u64(x80, x240, 0x0)\n var x246 uint64\n x246, _ = fiat_p448_addcarryx_u64(x81, x242, x245)\n var x248 uint64\n var x249 fiat_p448_uint1\n x248, x249 = fiat_p448_addcarryx_u64(x92, x244, 0x0)\n var x250 uint64\n x250, _ = fiat_p448_addcarryx_u64(x93, x246, x249)\n var x252 uint64\n var x253 fiat_p448_uint1\n x252, x253 = fiat_p448_addcarryx_u64(x100, x248, 0x0)\n var x254 uint64\n x254, _ = fiat_p448_addcarryx_u64(x101, x250, x253)\n var x256 uint64\n var x257 fiat_p448_uint1\n x256, x257 = fiat_p448_addcarryx_u64(x104, x252, 0x0)\n var x258 uint64\n x258, _ = fiat_p448_addcarryx_u64(x105, x254, x257)\n var x260 uint64\n var x261 fiat_p448_uint1\n x260, x261 = fiat_p448_addcarryx_u64(x112, x256, 0x0)\n var x262 uint64\n x262, _ = fiat_p448_addcarryx_u64(x113, x258, x261)\n var x264 uint64\n var x265 fiat_p448_uint1\n x264, x265 = fiat_p448_addcarryx_u64(x124, x260, 0x0)\n var x266 uint64\n x266, _ = fiat_p448_addcarryx_u64(x125, x262, x265)\n var x268 uint64\n var x269 fiat_p448_uint1\n x268, x269 = fiat_p448_addcarryx_u64(x50, x22, 0x0)\n var x270 uint64\n x270, _ = fiat_p448_addcarryx_u64(x51, x23, x269)\n var x272 uint64\n var x273 fiat_p448_uint1\n x272, x273 = fiat_p448_addcarryx_u64(x58, x268, 0x0)\n var x274 uint64\n x274, _ = fiat_p448_addcarryx_u64(x59, x270, x273)\n var x276 uint64\n var x277 fiat_p448_uint1\n x276, x277 = fiat_p448_addcarryx_u64(x70, x272, 0x0)\n var x278 uint64\n x278, _ = fiat_p448_addcarryx_u64(x71, x274, x277)\n var x280 uint64\n var x281 fiat_p448_uint1\n x280, x281 = fiat_p448_addcarryx_u64(x116, x276, 0x0)\n var x282 uint64\n x282, _ = fiat_p448_addcarryx_u64(x117, x278, x281)\n var x284 uint64\n var x285 fiat_p448_uint1\n x284, x285 = fiat_p448_addcarryx_u64(x128, x280, 0x0)\n var x286 uint64\n x286, _ = fiat_p448_addcarryx_u64(x129, x282, x285)\n var x288 uint64\n var x289 fiat_p448_uint1\n x288, x289 = fiat_p448_addcarryx_u64(x62, x24, 0x0)\n var x290 uint64\n x290, _ = fiat_p448_addcarryx_u64(x63, x25, x289)\n var x292 uint64\n var x293 fiat_p448_uint1\n x292, x293 = fiat_p448_addcarryx_u64(x74, x288, 0x0)\n var x294 uint64\n x294, _ = fiat_p448_addcarryx_u64(x75, x290, x293)\n var x296 uint64\n var x297 fiat_p448_uint1\n x296, x297 = fiat_p448_addcarryx_u64(x86, x292, 0x0)\n var x298 uint64\n x298, _ = fiat_p448_addcarryx_u64(x87, x294, x297)\n var x300 uint64\n var x301 fiat_p448_uint1\n x300, x301 = fiat_p448_addcarryx_u64(x130, x296, 0x0)\n var x302 uint64\n x302, _ = fiat_p448_addcarryx_u64(x131, x298, x301)\n var x304 uint64\n var x305 fiat_p448_uint1\n x304, x305 = fiat_p448_addcarryx_u64(x28, x26, 0x0)\n var x306 uint64\n x306, _ = fiat_p448_addcarryx_u64(x29, x27, x305)\n var x308 uint64\n var x309 fiat_p448_uint1\n x308, x309 = fiat_p448_addcarryx_u64(x66, x304, 0x0)\n var x310 uint64\n x310, _ = fiat_p448_addcarryx_u64(x67, x306, x309)\n var x312 uint64\n var x313 fiat_p448_uint1\n x312, x313 = fiat_p448_addcarryx_u64(x78, x308, 0x0)\n var x314 uint64\n x314, _ = fiat_p448_addcarryx_u64(x79, x310, x313)\n var x316 uint64\n var x317 fiat_p448_uint1\n x316, x317 = fiat_p448_addcarryx_u64(x90, x312, 0x0)\n var x318 uint64\n x318, _ = fiat_p448_addcarryx_u64(x91, x314, x317)\n var x320 uint64\n var x321 fiat_p448_uint1\n x320, x321 = fiat_p448_addcarryx_u64(x102, x316, 0x0)\n var x322 uint64\n x322, _ = fiat_p448_addcarryx_u64(x103, x318, x321)\n var x324 uint64\n var x325 fiat_p448_uint1\n x324, x325 = fiat_p448_addcarryx_u64(x132, x320, 0x0)\n var x326 uint64\n x326, _ = fiat_p448_addcarryx_u64(x133, x322, x325)\n var x328 uint64\n var x329 fiat_p448_uint1\n x328, x329 = fiat_p448_addcarryx_u64(x146, x264, 0x0)\n var x330 uint64 = (uint64(x329) + x266)\n var x331 uint64 = ((x164 >> 56) | ((x166 << 8) & 0xffffffffffffffff))\n var x332 uint64 = (x164 & 0xffffffffffffff)\n var x333 uint64\n var x334 fiat_p448_uint1\n x333, x334 = fiat_p448_addcarryx_u64(x328, x331, 0x0)\n var x335 uint64 = (uint64(x334) + x330)\n var x336 uint64 = ((x333 >> 56) | ((x335 << 8) & 0xffffffffffffffff))\n var x337 uint64 = (x333 & 0xffffffffffffff)\n var x338 uint64\n var x339 fiat_p448_uint1\n x338, x339 = fiat_p448_addcarryx_u64(x324, x331, 0x0)\n var x340 uint64 = (uint64(x339) + x326)\n var x341 uint64\n var x342 fiat_p448_uint1\n x341, x342 = fiat_p448_addcarryx_u64(x336, x224, 0x0)\n var x343 uint64 = (uint64(x342) + x226)\n var x344 uint64 = ((x338 >> 56) | ((x340 << 8) & 0xffffffffffffffff))\n var x345 uint64 = (x338 & 0xffffffffffffff)\n var x346 uint64\n var x347 fiat_p448_uint1\n x346, x347 = fiat_p448_addcarryx_u64(x344, x300, 0x0)\n var x348 uint64 = (uint64(x347) + x302)\n var x349 uint64 = ((x341 >> 56) | ((x343 << 8) & 0xffffffffffffffff))\n var x350 uint64 = (x341 & 0xffffffffffffff)\n var x351 uint64\n var x352 fiat_p448_uint1\n x351, x352 = fiat_p448_addcarryx_u64(x349, x196, 0x0)\n var x353 uint64 = (uint64(x352) + x198)\n var x354 uint64 = ((x346 >> 56) | ((x348 << 8) & 0xffffffffffffffff))\n var x355 uint64 = (x346 & 0xffffffffffffff)\n var x356 uint64\n var x357 fiat_p448_uint1\n x356, x357 = fiat_p448_addcarryx_u64(x354, x284, 0x0)\n var x358 uint64 = (uint64(x357) + x286)\n var x359 uint64 = ((x351 >> 56) | ((x353 << 8) & 0xffffffffffffffff))\n var x360 uint64 = (x351 & 0xffffffffffffff)\n var x361 uint64 = (x359 + x332)\n var x362 uint64 = ((x356 >> 56) | ((x358 << 8) & 0xffffffffffffffff))\n var x363 uint64 = (x356 & 0xffffffffffffff)\n var x364 uint64 = (x362 + x147)\n var x365 uint64 = (x361 >> 56)\n var x366 uint64 = (x361 & 0xffffffffffffff)\n var x367 uint64 = (x364 >> 56)\n var x368 uint64 = (x364 & 0xffffffffffffff)\n var x369 uint64 = (x337 + x365)\n var x370 uint64 = (x345 + x365)\n var x371 uint64 = (x367 + x369)\n var x372 fiat_p448_uint1 = fiat_p448_uint1((x371 >> 56))\n var x373 uint64 = (x371 & 0xffffffffffffff)\n var x374 uint64 = (uint64(x372) + x350)\n var x375 fiat_p448_uint1 = fiat_p448_uint1((x370 >> 56))\n var x376 uint64 = (x370 & 0xffffffffffffff)\n var x377 uint64 = (uint64(x375) + x355)\n out1[0] = x376\n out1[1] = x377\n out1[2] = x363\n out1[3] = x368\n out1[4] = x373\n out1[5] = x374\n out1[6] = x360\n out1[7] = x366\n}", "func lookup8(k []byte, level uint64) uint64 {\n\t// uint8_t *k; /* the key */\n\t// uint64_t length; /* the length of the key */\n\t// uint64_t level; /* the previous hash, or an arbitrary value */\n\tvar a, b, c uint64\n\tvar length int\n\n\t/* Set up the internal state */\n\tlength = len(k)\n\ta = level\n\tb = level /* the previous hash value */\n\tc = 0x9e3779b97f4a7c13 /* the golden ratio; an arbitrary value */\n\tp := 0\n\t/*---------------------------------------- handle most of the key */\n\tfor length >= 24 {\n\t\ta += uint64(k[p+0]) + (uint64(k[p+1]) << 8) + (uint64(k[p+2]) << 16) + (uint64(k[p+3]) << 24) + (uint64(k[p+4]) << 32) + (uint64(k[p+5]) << 40) + (uint64(k[p+6]) << 48) + (uint64(k[p+7]) << 56)\n\t\tb += uint64(k[p+8]) + (uint64(k[p+9]) << 8) + (uint64(k[p+10]) << 16) + (uint64(k[p+11]) << 24) + (uint64(k[p+12]) << 32) + (uint64(k[p+13]) << 40) + (uint64(k[p+14]) << 48) + (uint64(k[p+15]) << 56)\n\t\tc += uint64(k[p+16]) + (uint64(k[p+17]) << 8) + (uint64(k[p+18]) << 16) + (uint64(k[p+19]) << 24) + (uint64(k[p+20]) << 32) + (uint64(k[p+21]) << 40) + (uint64(k[p+22]) << 48) + (uint64(k[p+23]) << 56)\n\t\tmix64(&a, &b, &c)\n\t\tp += 24\n\t\tlength -= 24\n\t}\n\n\t/*------------------------------------- handle the last 23 bytes */\n\tc += uint64(len(k))\n\tswitch length { /* all the case statements fall through */\n\tcase 23:\n\t\tc += (uint64(k[p+22]) << 56)\n\t\tfallthrough\n\tcase 22:\n\t\tc += (uint64(k[p+21]) << 48)\n\t\tfallthrough\n\tcase 21:\n\t\tc += (uint64(k[p+20]) << 40)\n\t\tfallthrough\n\tcase 20:\n\t\tc += (uint64(k[p+19]) << 32)\n\t\tfallthrough\n\tcase 19:\n\t\tc += (uint64(k[p+18]) << 24)\n\t\tfallthrough\n\tcase 18:\n\t\tc += (uint64(k[p+17]) << 16)\n\t\tfallthrough\n\tcase 17:\n\t\tc += (uint64(k[p+16]) << 8)\n\t\tfallthrough\n\t/* the first byte of c is reserved for the length */\n\tcase 16:\n\t\tb += (uint64(k[p+15]) << 56)\n\t\tfallthrough\n\tcase 15:\n\t\tb += (uint64(k[p+14]) << 48)\n\t\tfallthrough\n\tcase 14:\n\t\tb += (uint64(k[p+13]) << 40)\n\t\tfallthrough\n\tcase 13:\n\t\tb += (uint64(k[p+12]) << 32)\n\t\tfallthrough\n\tcase 12:\n\t\tb += (uint64(k[p+11]) << 24)\n\t\tfallthrough\n\tcase 11:\n\t\tb += (uint64(k[p+10]) << 16)\n\t\tfallthrough\n\tcase 10:\n\t\tb += (uint64(k[p+9]) << 8)\n\t\tfallthrough\n\tcase 9:\n\t\tb += (uint64(k[p+8]))\n\t\tfallthrough\n\tcase 8:\n\t\ta += (uint64(k[p+7]) << 56)\n\t\tfallthrough\n\tcase 7:\n\t\ta += (uint64(k[p+6]) << 48)\n\t\tfallthrough\n\tcase 6:\n\t\ta += (uint64(k[p+5]) << 40)\n\t\tfallthrough\n\tcase 5:\n\t\ta += (uint64(k[p+4]) << 32)\n\t\tfallthrough\n\tcase 4:\n\t\ta += (uint64(k[p+3]) << 24)\n\t\tfallthrough\n\tcase 3:\n\t\ta += (uint64(k[p+2]) << 16)\n\t\tfallthrough\n\tcase 2:\n\t\ta += (uint64(k[p+1]) << 8)\n\t\tfallthrough\n\tcase 1:\n\t\ta += uint64(k[p+0])\n\t\t/* case 0: nothing left to add */\n\t}\n\tmix64(&a, &b, &c)\n\t/*-------------------------------------------- report the result */\n\treturn c\n}", "func h(a, b []int32) float64 {\n\tx1, y1, x2, y2 := a[0], a[1], b[0], b[1]\n\treturn math.Abs(float64(x1)-float64(x2)) + math.Abs(float64(y1)-float64(y2))\n}", "func compatible(a uint32, b uint32) int8 {\n // first grab only the bits that differ\n var diff uint32 = a ^ b;\n // then check that they all differ in the same way\n return all_on(a, diff);\n}", "func MinMaxInt8(x, min, max int8) int8 { return x }", "func vvFromInt(i int64) (result []*vdl.Value) { //nolint:gocyclo\n\tu, f := uint64(i), float64(i)\n\tswitch {\n\tcase math.MinInt8 <= i && i <= math.MaxInt8:\n\t\tresult = append(result,\n\t\t\tvdl.IntValue(vdl.Int8Type, i),\n\t\t\tvdl.IntValue(vdl.Int8TypeN, i))\n\t\tfallthrough\n\tcase math.MinInt16 <= i && i <= math.MaxInt16:\n\t\tresult = append(result,\n\t\t\tvdl.IntValue(vdl.Int16Type, i),\n\t\t\tvdl.IntValue(vdl.Int16TypeN, i))\n\t\tfallthrough\n\tcase -1<<24 <= i && i <= 1<<24:\n\t\tresult = append(result,\n\t\t\tvdl.FloatValue(vdl.Float32Type, f),\n\t\t\tvdl.FloatValue(vdl.Float32TypeN, f))\n\t\tfallthrough\n\tcase math.MinInt32 <= i && i <= math.MaxInt32:\n\t\tresult = append(result,\n\t\t\tvdl.IntValue(vdl.Int32Type, i),\n\t\t\tvdl.IntValue(vdl.Int32TypeN, i))\n\t\tfallthrough\n\tcase -1<<53 <= i && i <= 1<<53:\n\t\tresult = append(result,\n\t\t\tvdl.FloatValue(vdl.Float64Type, f),\n\t\t\tvdl.FloatValue(vdl.Float64TypeN, f))\n\t\tfallthrough\n\tdefault:\n\t\tresult = append(result,\n\t\t\tvdl.IntValue(vdl.Int64Type, i),\n\t\t\tvdl.IntValue(vdl.Int64TypeN, i))\n\t}\n\tif i < 0 {\n\t\treturn\n\t}\n\tswitch {\n\tcase i <= math.MaxUint8:\n\t\tresult = append(result,\n\t\t\tvdl.UintValue(vdl.ByteType, u),\n\t\t\tvdl.UintValue(vdl.ByteTypeN, u))\n\t\tfallthrough\n\tcase i <= math.MaxUint16:\n\t\tresult = append(result,\n\t\t\tvdl.UintValue(vdl.Uint16Type, u),\n\t\t\tvdl.UintValue(vdl.Uint16TypeN, u))\n\t\tfallthrough\n\tcase i <= math.MaxUint32:\n\t\tresult = append(result,\n\t\t\tvdl.UintValue(vdl.Uint32Type, u),\n\t\t\tvdl.UintValue(vdl.Uint32TypeN, u))\n\t\tfallthrough\n\tdefault:\n\t\tresult = append(result,\n\t\t\tvdl.UintValue(vdl.Uint64Type, u),\n\t\t\tvdl.UintValue(vdl.Uint64TypeN, u))\n\t}\n\treturn\n}", "func gbit8(b []byte) (uint8, []byte) {\n\treturn uint8(b[0]), b[1:]\n}", "func gfToBig(a *[4]uint64, p *big.Int) big.Int {\n\tvar t [4]uint64\n\tcopy(t[:], a[:])\n\treturn int256ToBigMod(&t, p)\n}", "func calc(a int) (x, y int) {\n\tx = a*0 + 2\n\ty = a - 2*0\n\treturn\n}", "func IntMulRange(z *big.Int, a, b int64) *big.Int", "func Raw(x *Big) (*uint64, *big.Int) { return &x.compact, &x.unscaled }", "func fiat_25519_from_bytes(out1 *[10]uint32, arg1 *[32]uint8) {\n var x1 uint32 = (uint32((arg1[31])) << 18)\n var x2 uint32 = (uint32((arg1[30])) << 10)\n var x3 uint32 = (uint32((arg1[29])) << 2)\n var x4 uint32 = (uint32((arg1[28])) << 20)\n var x5 uint32 = (uint32((arg1[27])) << 12)\n var x6 uint32 = (uint32((arg1[26])) << 4)\n var x7 uint32 = (uint32((arg1[25])) << 21)\n var x8 uint32 = (uint32((arg1[24])) << 13)\n var x9 uint32 = (uint32((arg1[23])) << 5)\n var x10 uint32 = (uint32((arg1[22])) << 23)\n var x11 uint32 = (uint32((arg1[21])) << 15)\n var x12 uint32 = (uint32((arg1[20])) << 7)\n var x13 uint32 = (uint32((arg1[19])) << 24)\n var x14 uint32 = (uint32((arg1[18])) << 16)\n var x15 uint32 = (uint32((arg1[17])) << 8)\n var x16 uint8 = (arg1[16])\n var x17 uint32 = (uint32((arg1[15])) << 18)\n var x18 uint32 = (uint32((arg1[14])) << 10)\n var x19 uint32 = (uint32((arg1[13])) << 2)\n var x20 uint32 = (uint32((arg1[12])) << 19)\n var x21 uint32 = (uint32((arg1[11])) << 11)\n var x22 uint32 = (uint32((arg1[10])) << 3)\n var x23 uint32 = (uint32((arg1[9])) << 21)\n var x24 uint32 = (uint32((arg1[8])) << 13)\n var x25 uint32 = (uint32((arg1[7])) << 5)\n var x26 uint32 = (uint32((arg1[6])) << 22)\n var x27 uint32 = (uint32((arg1[5])) << 14)\n var x28 uint32 = (uint32((arg1[4])) << 6)\n var x29 uint32 = (uint32((arg1[3])) << 24)\n var x30 uint32 = (uint32((arg1[2])) << 16)\n var x31 uint32 = (uint32((arg1[1])) << 8)\n var x32 uint8 = (arg1[0])\n var x33 uint32 = (x31 + uint32(x32))\n var x34 uint32 = (x30 + x33)\n var x35 uint32 = (x29 + x34)\n var x36 uint32 = (x35 & 0x3ffffff)\n var x37 uint8 = uint8((x35 >> 26))\n var x38 uint32 = (x28 + uint32(x37))\n var x39 uint32 = (x27 + x38)\n var x40 uint32 = (x26 + x39)\n var x41 uint32 = (x40 & 0x1ffffff)\n var x42 uint8 = uint8((x40 >> 25))\n var x43 uint32 = (x25 + uint32(x42))\n var x44 uint32 = (x24 + x43)\n var x45 uint32 = (x23 + x44)\n var x46 uint32 = (x45 & 0x3ffffff)\n var x47 uint8 = uint8((x45 >> 26))\n var x48 uint32 = (x22 + uint32(x47))\n var x49 uint32 = (x21 + x48)\n var x50 uint32 = (x20 + x49)\n var x51 uint32 = (x50 & 0x1ffffff)\n var x52 uint8 = uint8((x50 >> 25))\n var x53 uint32 = (x19 + uint32(x52))\n var x54 uint32 = (x18 + x53)\n var x55 uint32 = (x17 + x54)\n var x56 uint32 = (x15 + uint32(x16))\n var x57 uint32 = (x14 + x56)\n var x58 uint32 = (x13 + x57)\n var x59 uint32 = (x58 & 0x1ffffff)\n var x60 uint8 = uint8((x58 >> 25))\n var x61 uint32 = (x12 + uint32(x60))\n var x62 uint32 = (x11 + x61)\n var x63 uint32 = (x10 + x62)\n var x64 uint32 = (x63 & 0x3ffffff)\n var x65 uint8 = uint8((x63 >> 26))\n var x66 uint32 = (x9 + uint32(x65))\n var x67 uint32 = (x8 + x66)\n var x68 uint32 = (x7 + x67)\n var x69 uint32 = (x68 & 0x1ffffff)\n var x70 uint8 = uint8((x68 >> 25))\n var x71 uint32 = (x6 + uint32(x70))\n var x72 uint32 = (x5 + x71)\n var x73 uint32 = (x4 + x72)\n var x74 uint32 = (x73 & 0x3ffffff)\n var x75 uint8 = uint8((x73 >> 26))\n var x76 uint32 = (x3 + uint32(x75))\n var x77 uint32 = (x2 + x76)\n var x78 uint32 = (x1 + x77)\n out1[0] = x36\n out1[1] = x41\n out1[2] = x46\n out1[3] = x51\n out1[4] = x55\n out1[5] = x59\n out1[6] = x64\n out1[7] = x69\n out1[8] = x74\n out1[9] = x78\n}", "func Fixed8FromInt64(val int64) Fixed8 {\n\treturn Fixed8(decimals * val)\n}", "func IntegerToByteCode(i int64) (uint32, uint32) {\n\treturn uint32(i & 0xFFFFFFFF), uint32(i >> 32)\n}", "func QuadAxpy(c, b []float64, s float64, ci, bi int)", "func fnv1(x uint32, list string) uint32 {\n\tfor _, b := range list {\n\t\tx = x*16777619 ^ uint32(b)\n\t}\n\treturn x\n}", "func pack8(src []uint64) uint64 {\n\t_ = src[7] // eliminate multiple bounds checks\n\treturn 8<<60 |\n\t\tsrc[0] |\n\t\tsrc[1]<<7 |\n\t\tsrc[2]<<14 |\n\t\tsrc[3]<<21 |\n\t\tsrc[4]<<28 |\n\t\tsrc[5]<<35 |\n\t\tsrc[6]<<42 |\n\t\tsrc[7]<<49\n}", "func prepareBytes(bytes []byte, b uint) {\n\t// Clear bits in the first byte to make sure the candidate has a size <= bitsize.\n\tbytes[0] &= uint8(int(1<<b) - 1)\n\t// Don't let the value be too small, i.e, set the most significant two bits.\n\t// Setting the top two bits, rather than just the top bit,\n\t// means that when two of these values are multiplied together,\n\t// the result isn't ever one bit short.\n\tif b >= 2 {\n\t\tbytes[0] |= 3 << (b - 2)\n\t} else {\n\t\t// Here b==1, because b cannot be zero.\n\t\tbytes[0] |= 1\n\t\tif len(bytes) > 1 {\n\t\t\tbytes[1] |= 0x80\n\t\t}\n\t}\n\t// Make the value odd since an even number this large certainly isn't prime.\n\tbytes[len(bytes)-1] |= 1\n}", "func (v Posit8x4) Int() []int8 {\n\tout := make([]int8, 4)\n\tfor i, posit := range v.impl {\n\t\tout[i] = posit.Int()\n\t}\n\treturn out\n}", "func fiat_p448_carry_mul(out1 *[8]uint64, arg1 *[8]uint64, arg2 *[8]uint64) {\n var x1 uint64\n var x2 uint64\n x2, x1 = bits.Mul64((arg1[7]), (arg2[7]))\n var x3 uint64\n var x4 uint64\n x4, x3 = bits.Mul64((arg1[7]), (arg2[6]))\n var x5 uint64\n var x6 uint64\n x6, x5 = bits.Mul64((arg1[7]), (arg2[5]))\n var x7 uint64\n var x8 uint64\n x8, x7 = bits.Mul64((arg1[6]), (arg2[7]))\n var x9 uint64\n var x10 uint64\n x10, x9 = bits.Mul64((arg1[6]), (arg2[6]))\n var x11 uint64\n var x12 uint64\n x12, x11 = bits.Mul64((arg1[5]), (arg2[7]))\n var x13 uint64\n var x14 uint64\n x14, x13 = bits.Mul64((arg1[7]), (arg2[7]))\n var x15 uint64\n var x16 uint64\n x16, x15 = bits.Mul64((arg1[7]), (arg2[6]))\n var x17 uint64\n var x18 uint64\n x18, x17 = bits.Mul64((arg1[7]), (arg2[5]))\n var x19 uint64\n var x20 uint64\n x20, x19 = bits.Mul64((arg1[6]), (arg2[7]))\n var x21 uint64\n var x22 uint64\n x22, x21 = bits.Mul64((arg1[6]), (arg2[6]))\n var x23 uint64\n var x24 uint64\n x24, x23 = bits.Mul64((arg1[5]), (arg2[7]))\n var x25 uint64\n var x26 uint64\n x26, x25 = bits.Mul64((arg1[7]), (arg2[7]))\n var x27 uint64\n var x28 uint64\n x28, x27 = bits.Mul64((arg1[7]), (arg2[6]))\n var x29 uint64\n var x30 uint64\n x30, x29 = bits.Mul64((arg1[7]), (arg2[5]))\n var x31 uint64\n var x32 uint64\n x32, x31 = bits.Mul64((arg1[7]), (arg2[4]))\n var x33 uint64\n var x34 uint64\n x34, x33 = bits.Mul64((arg1[7]), (arg2[3]))\n var x35 uint64\n var x36 uint64\n x36, x35 = bits.Mul64((arg1[7]), (arg2[2]))\n var x37 uint64\n var x38 uint64\n x38, x37 = bits.Mul64((arg1[7]), (arg2[1]))\n var x39 uint64\n var x40 uint64\n x40, x39 = bits.Mul64((arg1[6]), (arg2[7]))\n var x41 uint64\n var x42 uint64\n x42, x41 = bits.Mul64((arg1[6]), (arg2[6]))\n var x43 uint64\n var x44 uint64\n x44, x43 = bits.Mul64((arg1[6]), (arg2[5]))\n var x45 uint64\n var x46 uint64\n x46, x45 = bits.Mul64((arg1[6]), (arg2[4]))\n var x47 uint64\n var x48 uint64\n x48, x47 = bits.Mul64((arg1[6]), (arg2[3]))\n var x49 uint64\n var x50 uint64\n x50, x49 = bits.Mul64((arg1[6]), (arg2[2]))\n var x51 uint64\n var x52 uint64\n x52, x51 = bits.Mul64((arg1[5]), (arg2[7]))\n var x53 uint64\n var x54 uint64\n x54, x53 = bits.Mul64((arg1[5]), (arg2[6]))\n var x55 uint64\n var x56 uint64\n x56, x55 = bits.Mul64((arg1[5]), (arg2[5]))\n var x57 uint64\n var x58 uint64\n x58, x57 = bits.Mul64((arg1[5]), (arg2[4]))\n var x59 uint64\n var x60 uint64\n x60, x59 = bits.Mul64((arg1[5]), (arg2[3]))\n var x61 uint64\n var x62 uint64\n x62, x61 = bits.Mul64((arg1[4]), (arg2[7]))\n var x63 uint64\n var x64 uint64\n x64, x63 = bits.Mul64((arg1[4]), (arg2[6]))\n var x65 uint64\n var x66 uint64\n x66, x65 = bits.Mul64((arg1[4]), (arg2[5]))\n var x67 uint64\n var x68 uint64\n x68, x67 = bits.Mul64((arg1[4]), (arg2[4]))\n var x69 uint64\n var x70 uint64\n x70, x69 = bits.Mul64((arg1[3]), (arg2[7]))\n var x71 uint64\n var x72 uint64\n x72, x71 = bits.Mul64((arg1[3]), (arg2[6]))\n var x73 uint64\n var x74 uint64\n x74, x73 = bits.Mul64((arg1[3]), (arg2[5]))\n var x75 uint64\n var x76 uint64\n x76, x75 = bits.Mul64((arg1[2]), (arg2[7]))\n var x77 uint64\n var x78 uint64\n x78, x77 = bits.Mul64((arg1[2]), (arg2[6]))\n var x79 uint64\n var x80 uint64\n x80, x79 = bits.Mul64((arg1[1]), (arg2[7]))\n var x81 uint64\n var x82 uint64\n x82, x81 = bits.Mul64((arg1[7]), (arg2[4]))\n var x83 uint64\n var x84 uint64\n x84, x83 = bits.Mul64((arg1[7]), (arg2[3]))\n var x85 uint64\n var x86 uint64\n x86, x85 = bits.Mul64((arg1[7]), (arg2[2]))\n var x87 uint64\n var x88 uint64\n x88, x87 = bits.Mul64((arg1[7]), (arg2[1]))\n var x89 uint64\n var x90 uint64\n x90, x89 = bits.Mul64((arg1[6]), (arg2[5]))\n var x91 uint64\n var x92 uint64\n x92, x91 = bits.Mul64((arg1[6]), (arg2[4]))\n var x93 uint64\n var x94 uint64\n x94, x93 = bits.Mul64((arg1[6]), (arg2[3]))\n var x95 uint64\n var x96 uint64\n x96, x95 = bits.Mul64((arg1[6]), (arg2[2]))\n var x97 uint64\n var x98 uint64\n x98, x97 = bits.Mul64((arg1[5]), (arg2[6]))\n var x99 uint64\n var x100 uint64\n x100, x99 = bits.Mul64((arg1[5]), (arg2[5]))\n var x101 uint64\n var x102 uint64\n x102, x101 = bits.Mul64((arg1[5]), (arg2[4]))\n var x103 uint64\n var x104 uint64\n x104, x103 = bits.Mul64((arg1[5]), (arg2[3]))\n var x105 uint64\n var x106 uint64\n x106, x105 = bits.Mul64((arg1[4]), (arg2[7]))\n var x107 uint64\n var x108 uint64\n x108, x107 = bits.Mul64((arg1[4]), (arg2[6]))\n var x109 uint64\n var x110 uint64\n x110, x109 = bits.Mul64((arg1[4]), (arg2[5]))\n var x111 uint64\n var x112 uint64\n x112, x111 = bits.Mul64((arg1[4]), (arg2[4]))\n var x113 uint64\n var x114 uint64\n x114, x113 = bits.Mul64((arg1[3]), (arg2[7]))\n var x115 uint64\n var x116 uint64\n x116, x115 = bits.Mul64((arg1[3]), (arg2[6]))\n var x117 uint64\n var x118 uint64\n x118, x117 = bits.Mul64((arg1[3]), (arg2[5]))\n var x119 uint64\n var x120 uint64\n x120, x119 = bits.Mul64((arg1[2]), (arg2[7]))\n var x121 uint64\n var x122 uint64\n x122, x121 = bits.Mul64((arg1[2]), (arg2[6]))\n var x123 uint64\n var x124 uint64\n x124, x123 = bits.Mul64((arg1[1]), (arg2[7]))\n var x125 uint64\n var x126 uint64\n x126, x125 = bits.Mul64((arg1[7]), (arg2[0]))\n var x127 uint64\n var x128 uint64\n x128, x127 = bits.Mul64((arg1[6]), (arg2[1]))\n var x129 uint64\n var x130 uint64\n x130, x129 = bits.Mul64((arg1[6]), (arg2[0]))\n var x131 uint64\n var x132 uint64\n x132, x131 = bits.Mul64((arg1[5]), (arg2[2]))\n var x133 uint64\n var x134 uint64\n x134, x133 = bits.Mul64((arg1[5]), (arg2[1]))\n var x135 uint64\n var x136 uint64\n x136, x135 = bits.Mul64((arg1[5]), (arg2[0]))\n var x137 uint64\n var x138 uint64\n x138, x137 = bits.Mul64((arg1[4]), (arg2[3]))\n var x139 uint64\n var x140 uint64\n x140, x139 = bits.Mul64((arg1[4]), (arg2[2]))\n var x141 uint64\n var x142 uint64\n x142, x141 = bits.Mul64((arg1[4]), (arg2[1]))\n var x143 uint64\n var x144 uint64\n x144, x143 = bits.Mul64((arg1[4]), (arg2[0]))\n var x145 uint64\n var x146 uint64\n x146, x145 = bits.Mul64((arg1[3]), (arg2[4]))\n var x147 uint64\n var x148 uint64\n x148, x147 = bits.Mul64((arg1[3]), (arg2[3]))\n var x149 uint64\n var x150 uint64\n x150, x149 = bits.Mul64((arg1[3]), (arg2[2]))\n var x151 uint64\n var x152 uint64\n x152, x151 = bits.Mul64((arg1[3]), (arg2[1]))\n var x153 uint64\n var x154 uint64\n x154, x153 = bits.Mul64((arg1[3]), (arg2[0]))\n var x155 uint64\n var x156 uint64\n x156, x155 = bits.Mul64((arg1[2]), (arg2[5]))\n var x157 uint64\n var x158 uint64\n x158, x157 = bits.Mul64((arg1[2]), (arg2[4]))\n var x159 uint64\n var x160 uint64\n x160, x159 = bits.Mul64((arg1[2]), (arg2[3]))\n var x161 uint64\n var x162 uint64\n x162, x161 = bits.Mul64((arg1[2]), (arg2[2]))\n var x163 uint64\n var x164 uint64\n x164, x163 = bits.Mul64((arg1[2]), (arg2[1]))\n var x165 uint64\n var x166 uint64\n x166, x165 = bits.Mul64((arg1[2]), (arg2[0]))\n var x167 uint64\n var x168 uint64\n x168, x167 = bits.Mul64((arg1[1]), (arg2[6]))\n var x169 uint64\n var x170 uint64\n x170, x169 = bits.Mul64((arg1[1]), (arg2[5]))\n var x171 uint64\n var x172 uint64\n x172, x171 = bits.Mul64((arg1[1]), (arg2[4]))\n var x173 uint64\n var x174 uint64\n x174, x173 = bits.Mul64((arg1[1]), (arg2[3]))\n var x175 uint64\n var x176 uint64\n x176, x175 = bits.Mul64((arg1[1]), (arg2[2]))\n var x177 uint64\n var x178 uint64\n x178, x177 = bits.Mul64((arg1[1]), (arg2[1]))\n var x179 uint64\n var x180 uint64\n x180, x179 = bits.Mul64((arg1[1]), (arg2[0]))\n var x181 uint64\n var x182 uint64\n x182, x181 = bits.Mul64((arg1[0]), (arg2[7]))\n var x183 uint64\n var x184 uint64\n x184, x183 = bits.Mul64((arg1[0]), (arg2[6]))\n var x185 uint64\n var x186 uint64\n x186, x185 = bits.Mul64((arg1[0]), (arg2[5]))\n var x187 uint64\n var x188 uint64\n x188, x187 = bits.Mul64((arg1[0]), (arg2[4]))\n var x189 uint64\n var x190 uint64\n x190, x189 = bits.Mul64((arg1[0]), (arg2[3]))\n var x191 uint64\n var x192 uint64\n x192, x191 = bits.Mul64((arg1[0]), (arg2[2]))\n var x193 uint64\n var x194 uint64\n x194, x193 = bits.Mul64((arg1[0]), (arg2[1]))\n var x195 uint64\n var x196 uint64\n x196, x195 = bits.Mul64((arg1[0]), (arg2[0]))\n var x197 uint64\n var x198 fiat_p448_uint1\n x197, x198 = fiat_p448_addcarryx_u64(x43, x31, 0x0)\n var x199 uint64\n x199, _ = fiat_p448_addcarryx_u64(x44, x32, x198)\n var x201 uint64\n var x202 fiat_p448_uint1\n x201, x202 = fiat_p448_addcarryx_u64(x53, x197, 0x0)\n var x203 uint64\n x203, _ = fiat_p448_addcarryx_u64(x54, x199, x202)\n var x205 uint64\n var x206 fiat_p448_uint1\n x205, x206 = fiat_p448_addcarryx_u64(x61, x201, 0x0)\n var x207 uint64\n x207, _ = fiat_p448_addcarryx_u64(x62, x203, x206)\n var x209 uint64\n var x210 fiat_p448_uint1\n x209, x210 = fiat_p448_addcarryx_u64(x153, x205, 0x0)\n var x211 uint64\n x211, _ = fiat_p448_addcarryx_u64(x154, x207, x210)\n var x213 uint64\n var x214 fiat_p448_uint1\n x213, x214 = fiat_p448_addcarryx_u64(x163, x209, 0x0)\n var x215 uint64\n x215, _ = fiat_p448_addcarryx_u64(x164, x211, x214)\n var x217 uint64\n var x218 fiat_p448_uint1\n x217, x218 = fiat_p448_addcarryx_u64(x175, x213, 0x0)\n var x219 uint64\n x219, _ = fiat_p448_addcarryx_u64(x176, x215, x218)\n var x221 uint64\n var x222 fiat_p448_uint1\n x221, x222 = fiat_p448_addcarryx_u64(x189, x217, 0x0)\n var x223 uint64\n x223, _ = fiat_p448_addcarryx_u64(x190, x219, x222)\n var x225 uint64 = ((x221 >> 56) | ((x223 << 8) & 0xffffffffffffffff))\n var x226 uint64 = (x221 & 0xffffffffffffff)\n var x227 uint64\n var x228 fiat_p448_uint1\n x227, x228 = fiat_p448_addcarryx_u64(x89, x81, 0x0)\n var x229 uint64\n x229, _ = fiat_p448_addcarryx_u64(x90, x82, x228)\n var x231 uint64\n var x232 fiat_p448_uint1\n x231, x232 = fiat_p448_addcarryx_u64(x97, x227, 0x0)\n var x233 uint64\n x233, _ = fiat_p448_addcarryx_u64(x98, x229, x232)\n var x235 uint64\n var x236 fiat_p448_uint1\n x235, x236 = fiat_p448_addcarryx_u64(x105, x231, 0x0)\n var x237 uint64\n x237, _ = fiat_p448_addcarryx_u64(x106, x233, x236)\n var x239 uint64\n var x240 fiat_p448_uint1\n x239, x240 = fiat_p448_addcarryx_u64(x125, x235, 0x0)\n var x241 uint64\n x241, _ = fiat_p448_addcarryx_u64(x126, x237, x240)\n var x243 uint64\n var x244 fiat_p448_uint1\n x243, x244 = fiat_p448_addcarryx_u64(x127, x239, 0x0)\n var x245 uint64\n x245, _ = fiat_p448_addcarryx_u64(x128, x241, x244)\n var x247 uint64\n var x248 fiat_p448_uint1\n x247, x248 = fiat_p448_addcarryx_u64(x131, x243, 0x0)\n var x249 uint64\n x249, _ = fiat_p448_addcarryx_u64(x132, x245, x248)\n var x251 uint64\n var x252 fiat_p448_uint1\n x251, x252 = fiat_p448_addcarryx_u64(x137, x247, 0x0)\n var x253 uint64\n x253, _ = fiat_p448_addcarryx_u64(x138, x249, x252)\n var x255 uint64\n var x256 fiat_p448_uint1\n x255, x256 = fiat_p448_addcarryx_u64(x145, x251, 0x0)\n var x257 uint64\n x257, _ = fiat_p448_addcarryx_u64(x146, x253, x256)\n var x259 uint64\n var x260 fiat_p448_uint1\n x259, x260 = fiat_p448_addcarryx_u64(x155, x255, 0x0)\n var x261 uint64\n x261, _ = fiat_p448_addcarryx_u64(x156, x257, x260)\n var x263 uint64\n var x264 fiat_p448_uint1\n x263, x264 = fiat_p448_addcarryx_u64(x167, x259, 0x0)\n var x265 uint64\n x265, _ = fiat_p448_addcarryx_u64(x168, x261, x264)\n var x267 uint64\n var x268 fiat_p448_uint1\n x267, x268 = fiat_p448_addcarryx_u64(x181, x263, 0x0)\n var x269 uint64\n x269, _ = fiat_p448_addcarryx_u64(x182, x265, x268)\n var x271 uint64\n var x272 fiat_p448_uint1\n x271, x272 = fiat_p448_addcarryx_u64(x25, x13, 0x0)\n var x273 uint64\n x273, _ = fiat_p448_addcarryx_u64(x26, x14, x272)\n var x275 uint64\n var x276 fiat_p448_uint1\n x275, x276 = fiat_p448_addcarryx_u64(x83, x271, 0x0)\n var x277 uint64\n x277, _ = fiat_p448_addcarryx_u64(x84, x273, x276)\n var x279 uint64\n var x280 fiat_p448_uint1\n x279, x280 = fiat_p448_addcarryx_u64(x91, x275, 0x0)\n var x281 uint64\n x281, _ = fiat_p448_addcarryx_u64(x92, x277, x280)\n var x283 uint64\n var x284 fiat_p448_uint1\n x283, x284 = fiat_p448_addcarryx_u64(x99, x279, 0x0)\n var x285 uint64\n x285, _ = fiat_p448_addcarryx_u64(x100, x281, x284)\n var x287 uint64\n var x288 fiat_p448_uint1\n x287, x288 = fiat_p448_addcarryx_u64(x107, x283, 0x0)\n var x289 uint64\n x289, _ = fiat_p448_addcarryx_u64(x108, x285, x288)\n var x291 uint64\n var x292 fiat_p448_uint1\n x291, x292 = fiat_p448_addcarryx_u64(x113, x287, 0x0)\n var x293 uint64\n x293, _ = fiat_p448_addcarryx_u64(x114, x289, x292)\n var x295 uint64\n var x296 fiat_p448_uint1\n x295, x296 = fiat_p448_addcarryx_u64(x129, x291, 0x0)\n var x297 uint64\n x297, _ = fiat_p448_addcarryx_u64(x130, x293, x296)\n var x299 uint64\n var x300 fiat_p448_uint1\n x299, x300 = fiat_p448_addcarryx_u64(x133, x295, 0x0)\n var x301 uint64\n x301, _ = fiat_p448_addcarryx_u64(x134, x297, x300)\n var x303 uint64\n var x304 fiat_p448_uint1\n x303, x304 = fiat_p448_addcarryx_u64(x139, x299, 0x0)\n var x305 uint64\n x305, _ = fiat_p448_addcarryx_u64(x140, x301, x304)\n var x307 uint64\n var x308 fiat_p448_uint1\n x307, x308 = fiat_p448_addcarryx_u64(x147, x303, 0x0)\n var x309 uint64\n x309, _ = fiat_p448_addcarryx_u64(x148, x305, x308)\n var x311 uint64\n var x312 fiat_p448_uint1\n x311, x312 = fiat_p448_addcarryx_u64(x157, x307, 0x0)\n var x313 uint64\n x313, _ = fiat_p448_addcarryx_u64(x158, x309, x312)\n var x315 uint64\n var x316 fiat_p448_uint1\n x315, x316 = fiat_p448_addcarryx_u64(x169, x311, 0x0)\n var x317 uint64\n x317, _ = fiat_p448_addcarryx_u64(x170, x313, x316)\n var x319 uint64\n var x320 fiat_p448_uint1\n x319, x320 = fiat_p448_addcarryx_u64(x183, x315, 0x0)\n var x321 uint64\n x321, _ = fiat_p448_addcarryx_u64(x184, x317, x320)\n var x323 uint64\n var x324 fiat_p448_uint1\n x323, x324 = fiat_p448_addcarryx_u64(x19, x15, 0x0)\n var x325 uint64\n x325, _ = fiat_p448_addcarryx_u64(x20, x16, x324)\n var x327 uint64\n var x328 fiat_p448_uint1\n x327, x328 = fiat_p448_addcarryx_u64(x27, x323, 0x0)\n var x329 uint64\n x329, _ = fiat_p448_addcarryx_u64(x28, x325, x328)\n var x331 uint64\n var x332 fiat_p448_uint1\n x331, x332 = fiat_p448_addcarryx_u64(x39, x327, 0x0)\n var x333 uint64\n x333, _ = fiat_p448_addcarryx_u64(x40, x329, x332)\n var x335 uint64\n var x336 fiat_p448_uint1\n x335, x336 = fiat_p448_addcarryx_u64(x85, x331, 0x0)\n var x337 uint64\n x337, _ = fiat_p448_addcarryx_u64(x86, x333, x336)\n var x339 uint64\n var x340 fiat_p448_uint1\n x339, x340 = fiat_p448_addcarryx_u64(x93, x335, 0x0)\n var x341 uint64\n x341, _ = fiat_p448_addcarryx_u64(x94, x337, x340)\n var x343 uint64\n var x344 fiat_p448_uint1\n x343, x344 = fiat_p448_addcarryx_u64(x101, x339, 0x0)\n var x345 uint64\n x345, _ = fiat_p448_addcarryx_u64(x102, x341, x344)\n var x347 uint64\n var x348 fiat_p448_uint1\n x347, x348 = fiat_p448_addcarryx_u64(x109, x343, 0x0)\n var x349 uint64\n x349, _ = fiat_p448_addcarryx_u64(x110, x345, x348)\n var x351 uint64\n var x352 fiat_p448_uint1\n x351, x352 = fiat_p448_addcarryx_u64(x115, x347, 0x0)\n var x353 uint64\n x353, _ = fiat_p448_addcarryx_u64(x116, x349, x352)\n var x355 uint64\n var x356 fiat_p448_uint1\n x355, x356 = fiat_p448_addcarryx_u64(x119, x351, 0x0)\n var x357 uint64\n x357, _ = fiat_p448_addcarryx_u64(x120, x353, x356)\n var x359 uint64\n var x360 fiat_p448_uint1\n x359, x360 = fiat_p448_addcarryx_u64(x135, x355, 0x0)\n var x361 uint64\n x361, _ = fiat_p448_addcarryx_u64(x136, x357, x360)\n var x363 uint64\n var x364 fiat_p448_uint1\n x363, x364 = fiat_p448_addcarryx_u64(x141, x359, 0x0)\n var x365 uint64\n x365, _ = fiat_p448_addcarryx_u64(x142, x361, x364)\n var x367 uint64\n var x368 fiat_p448_uint1\n x367, x368 = fiat_p448_addcarryx_u64(x149, x363, 0x0)\n var x369 uint64\n x369, _ = fiat_p448_addcarryx_u64(x150, x365, x368)\n var x371 uint64\n var x372 fiat_p448_uint1\n x371, x372 = fiat_p448_addcarryx_u64(x159, x367, 0x0)\n var x373 uint64\n x373, _ = fiat_p448_addcarryx_u64(x160, x369, x372)\n var x375 uint64\n var x376 fiat_p448_uint1\n x375, x376 = fiat_p448_addcarryx_u64(x171, x371, 0x0)\n var x377 uint64\n x377, _ = fiat_p448_addcarryx_u64(x172, x373, x376)\n var x379 uint64\n var x380 fiat_p448_uint1\n x379, x380 = fiat_p448_addcarryx_u64(x185, x375, 0x0)\n var x381 uint64\n x381, _ = fiat_p448_addcarryx_u64(x186, x377, x380)\n var x383 uint64\n var x384 fiat_p448_uint1\n x383, x384 = fiat_p448_addcarryx_u64(x21, x17, 0x0)\n var x385 uint64\n x385, _ = fiat_p448_addcarryx_u64(x22, x18, x384)\n var x387 uint64\n var x388 fiat_p448_uint1\n x387, x388 = fiat_p448_addcarryx_u64(x23, x383, 0x0)\n var x389 uint64\n x389, _ = fiat_p448_addcarryx_u64(x24, x385, x388)\n var x391 uint64\n var x392 fiat_p448_uint1\n x391, x392 = fiat_p448_addcarryx_u64(x29, x387, 0x0)\n var x393 uint64\n x393, _ = fiat_p448_addcarryx_u64(x30, x389, x392)\n var x395 uint64\n var x396 fiat_p448_uint1\n x395, x396 = fiat_p448_addcarryx_u64(x41, x391, 0x0)\n var x397 uint64\n x397, _ = fiat_p448_addcarryx_u64(x42, x393, x396)\n var x399 uint64\n var x400 fiat_p448_uint1\n x399, x400 = fiat_p448_addcarryx_u64(x51, x395, 0x0)\n var x401 uint64\n x401, _ = fiat_p448_addcarryx_u64(x52, x397, x400)\n var x403 uint64\n var x404 fiat_p448_uint1\n x403, x404 = fiat_p448_addcarryx_u64(x87, x399, 0x0)\n var x405 uint64\n x405, _ = fiat_p448_addcarryx_u64(x88, x401, x404)\n var x407 uint64\n var x408 fiat_p448_uint1\n x407, x408 = fiat_p448_addcarryx_u64(x95, x403, 0x0)\n var x409 uint64\n x409, _ = fiat_p448_addcarryx_u64(x96, x405, x408)\n var x411 uint64\n var x412 fiat_p448_uint1\n x411, x412 = fiat_p448_addcarryx_u64(x103, x407, 0x0)\n var x413 uint64\n x413, _ = fiat_p448_addcarryx_u64(x104, x409, x412)\n var x415 uint64\n var x416 fiat_p448_uint1\n x415, x416 = fiat_p448_addcarryx_u64(x111, x411, 0x0)\n var x417 uint64\n x417, _ = fiat_p448_addcarryx_u64(x112, x413, x416)\n var x419 uint64\n var x420 fiat_p448_uint1\n x419, x420 = fiat_p448_addcarryx_u64(x117, x415, 0x0)\n var x421 uint64\n x421, _ = fiat_p448_addcarryx_u64(x118, x417, x420)\n var x423 uint64\n var x424 fiat_p448_uint1\n x423, x424 = fiat_p448_addcarryx_u64(x121, x419, 0x0)\n var x425 uint64\n x425, _ = fiat_p448_addcarryx_u64(x122, x421, x424)\n var x427 uint64\n var x428 fiat_p448_uint1\n x427, x428 = fiat_p448_addcarryx_u64(x123, x423, 0x0)\n var x429 uint64\n x429, _ = fiat_p448_addcarryx_u64(x124, x425, x428)\n var x431 uint64\n var x432 fiat_p448_uint1\n x431, x432 = fiat_p448_addcarryx_u64(x143, x427, 0x0)\n var x433 uint64\n x433, _ = fiat_p448_addcarryx_u64(x144, x429, x432)\n var x435 uint64\n var x436 fiat_p448_uint1\n x435, x436 = fiat_p448_addcarryx_u64(x151, x431, 0x0)\n var x437 uint64\n x437, _ = fiat_p448_addcarryx_u64(x152, x433, x436)\n var x439 uint64\n var x440 fiat_p448_uint1\n x439, x440 = fiat_p448_addcarryx_u64(x161, x435, 0x0)\n var x441 uint64\n x441, _ = fiat_p448_addcarryx_u64(x162, x437, x440)\n var x443 uint64\n var x444 fiat_p448_uint1\n x443, x444 = fiat_p448_addcarryx_u64(x173, x439, 0x0)\n var x445 uint64\n x445, _ = fiat_p448_addcarryx_u64(x174, x441, x444)\n var x447 uint64\n var x448 fiat_p448_uint1\n x447, x448 = fiat_p448_addcarryx_u64(x187, x443, 0x0)\n var x449 uint64\n x449, _ = fiat_p448_addcarryx_u64(x188, x445, x448)\n var x451 uint64\n var x452 fiat_p448_uint1\n x451, x452 = fiat_p448_addcarryx_u64(x33, x1, 0x0)\n var x453 uint64\n x453, _ = fiat_p448_addcarryx_u64(x34, x2, x452)\n var x455 uint64\n var x456 fiat_p448_uint1\n x455, x456 = fiat_p448_addcarryx_u64(x45, x451, 0x0)\n var x457 uint64\n x457, _ = fiat_p448_addcarryx_u64(x46, x453, x456)\n var x459 uint64\n var x460 fiat_p448_uint1\n x459, x460 = fiat_p448_addcarryx_u64(x55, x455, 0x0)\n var x461 uint64\n x461, _ = fiat_p448_addcarryx_u64(x56, x457, x460)\n var x463 uint64\n var x464 fiat_p448_uint1\n x463, x464 = fiat_p448_addcarryx_u64(x63, x459, 0x0)\n var x465 uint64\n x465, _ = fiat_p448_addcarryx_u64(x64, x461, x464)\n var x467 uint64\n var x468 fiat_p448_uint1\n x467, x468 = fiat_p448_addcarryx_u64(x69, x463, 0x0)\n var x469 uint64\n x469, _ = fiat_p448_addcarryx_u64(x70, x465, x468)\n var x471 uint64\n var x472 fiat_p448_uint1\n x471, x472 = fiat_p448_addcarryx_u64(x165, x467, 0x0)\n var x473 uint64\n x473, _ = fiat_p448_addcarryx_u64(x166, x469, x472)\n var x475 uint64\n var x476 fiat_p448_uint1\n x475, x476 = fiat_p448_addcarryx_u64(x177, x471, 0x0)\n var x477 uint64\n x477, _ = fiat_p448_addcarryx_u64(x178, x473, x476)\n var x479 uint64\n var x480 fiat_p448_uint1\n x479, x480 = fiat_p448_addcarryx_u64(x191, x475, 0x0)\n var x481 uint64\n x481, _ = fiat_p448_addcarryx_u64(x192, x477, x480)\n var x483 uint64\n var x484 fiat_p448_uint1\n x483, x484 = fiat_p448_addcarryx_u64(x7, x3, 0x0)\n var x485 uint64\n x485, _ = fiat_p448_addcarryx_u64(x8, x4, x484)\n var x487 uint64\n var x488 fiat_p448_uint1\n x487, x488 = fiat_p448_addcarryx_u64(x35, x483, 0x0)\n var x489 uint64\n x489, _ = fiat_p448_addcarryx_u64(x36, x485, x488)\n var x491 uint64\n var x492 fiat_p448_uint1\n x491, x492 = fiat_p448_addcarryx_u64(x47, x487, 0x0)\n var x493 uint64\n x493, _ = fiat_p448_addcarryx_u64(x48, x489, x492)\n var x495 uint64\n var x496 fiat_p448_uint1\n x495, x496 = fiat_p448_addcarryx_u64(x57, x491, 0x0)\n var x497 uint64\n x497, _ = fiat_p448_addcarryx_u64(x58, x493, x496)\n var x499 uint64\n var x500 fiat_p448_uint1\n x499, x500 = fiat_p448_addcarryx_u64(x65, x495, 0x0)\n var x501 uint64\n x501, _ = fiat_p448_addcarryx_u64(x66, x497, x500)\n var x503 uint64\n var x504 fiat_p448_uint1\n x503, x504 = fiat_p448_addcarryx_u64(x71, x499, 0x0)\n var x505 uint64\n x505, _ = fiat_p448_addcarryx_u64(x72, x501, x504)\n var x507 uint64\n var x508 fiat_p448_uint1\n x507, x508 = fiat_p448_addcarryx_u64(x75, x503, 0x0)\n var x509 uint64\n x509, _ = fiat_p448_addcarryx_u64(x76, x505, x508)\n var x511 uint64\n var x512 fiat_p448_uint1\n x511, x512 = fiat_p448_addcarryx_u64(x179, x507, 0x0)\n var x513 uint64\n x513, _ = fiat_p448_addcarryx_u64(x180, x509, x512)\n var x515 uint64\n var x516 fiat_p448_uint1\n x515, x516 = fiat_p448_addcarryx_u64(x193, x511, 0x0)\n var x517 uint64\n x517, _ = fiat_p448_addcarryx_u64(x194, x513, x516)\n var x519 uint64\n var x520 fiat_p448_uint1\n x519, x520 = fiat_p448_addcarryx_u64(x9, x5, 0x0)\n var x521 uint64\n x521, _ = fiat_p448_addcarryx_u64(x10, x6, x520)\n var x523 uint64\n var x524 fiat_p448_uint1\n x523, x524 = fiat_p448_addcarryx_u64(x11, x519, 0x0)\n var x525 uint64\n x525, _ = fiat_p448_addcarryx_u64(x12, x521, x524)\n var x527 uint64\n var x528 fiat_p448_uint1\n x527, x528 = fiat_p448_addcarryx_u64(x37, x523, 0x0)\n var x529 uint64\n x529, _ = fiat_p448_addcarryx_u64(x38, x525, x528)\n var x531 uint64\n var x532 fiat_p448_uint1\n x531, x532 = fiat_p448_addcarryx_u64(x49, x527, 0x0)\n var x533 uint64\n x533, _ = fiat_p448_addcarryx_u64(x50, x529, x532)\n var x535 uint64\n var x536 fiat_p448_uint1\n x535, x536 = fiat_p448_addcarryx_u64(x59, x531, 0x0)\n var x537 uint64\n x537, _ = fiat_p448_addcarryx_u64(x60, x533, x536)\n var x539 uint64\n var x540 fiat_p448_uint1\n x539, x540 = fiat_p448_addcarryx_u64(x67, x535, 0x0)\n var x541 uint64\n x541, _ = fiat_p448_addcarryx_u64(x68, x537, x540)\n var x543 uint64\n var x544 fiat_p448_uint1\n x543, x544 = fiat_p448_addcarryx_u64(x73, x539, 0x0)\n var x545 uint64\n x545, _ = fiat_p448_addcarryx_u64(x74, x541, x544)\n var x547 uint64\n var x548 fiat_p448_uint1\n x547, x548 = fiat_p448_addcarryx_u64(x77, x543, 0x0)\n var x549 uint64\n x549, _ = fiat_p448_addcarryx_u64(x78, x545, x548)\n var x551 uint64\n var x552 fiat_p448_uint1\n x551, x552 = fiat_p448_addcarryx_u64(x79, x547, 0x0)\n var x553 uint64\n x553, _ = fiat_p448_addcarryx_u64(x80, x549, x552)\n var x555 uint64\n var x556 fiat_p448_uint1\n x555, x556 = fiat_p448_addcarryx_u64(x195, x551, 0x0)\n var x557 uint64\n x557, _ = fiat_p448_addcarryx_u64(x196, x553, x556)\n var x559 uint64\n var x560 fiat_p448_uint1\n x559, x560 = fiat_p448_addcarryx_u64(x225, x447, 0x0)\n var x561 uint64 = (uint64(x560) + x449)\n var x562 uint64 = ((x267 >> 56) | ((x269 << 8) & 0xffffffffffffffff))\n var x563 uint64 = (x267 & 0xffffffffffffff)\n var x564 uint64\n var x565 fiat_p448_uint1\n x564, x565 = fiat_p448_addcarryx_u64(x559, x562, 0x0)\n var x566 uint64 = (uint64(x565) + x561)\n var x567 uint64 = ((x564 >> 56) | ((x566 << 8) & 0xffffffffffffffff))\n var x568 uint64 = (x564 & 0xffffffffffffff)\n var x569 uint64\n var x570 fiat_p448_uint1\n x569, x570 = fiat_p448_addcarryx_u64(x555, x562, 0x0)\n var x571 uint64 = (uint64(x570) + x557)\n var x572 uint64\n var x573 fiat_p448_uint1\n x572, x573 = fiat_p448_addcarryx_u64(x567, x379, 0x0)\n var x574 uint64 = (uint64(x573) + x381)\n var x575 uint64 = ((x569 >> 56) | ((x571 << 8) & 0xffffffffffffffff))\n var x576 uint64 = (x569 & 0xffffffffffffff)\n var x577 uint64\n var x578 fiat_p448_uint1\n x577, x578 = fiat_p448_addcarryx_u64(x575, x515, 0x0)\n var x579 uint64 = (uint64(x578) + x517)\n var x580 uint64 = ((x572 >> 56) | ((x574 << 8) & 0xffffffffffffffff))\n var x581 uint64 = (x572 & 0xffffffffffffff)\n var x582 uint64\n var x583 fiat_p448_uint1\n x582, x583 = fiat_p448_addcarryx_u64(x580, x319, 0x0)\n var x584 uint64 = (uint64(x583) + x321)\n var x585 uint64 = ((x577 >> 56) | ((x579 << 8) & 0xffffffffffffffff))\n var x586 uint64 = (x577 & 0xffffffffffffff)\n var x587 uint64\n var x588 fiat_p448_uint1\n x587, x588 = fiat_p448_addcarryx_u64(x585, x479, 0x0)\n var x589 uint64 = (uint64(x588) + x481)\n var x590 uint64 = ((x582 >> 56) | ((x584 << 8) & 0xffffffffffffffff))\n var x591 uint64 = (x582 & 0xffffffffffffff)\n var x592 uint64 = (x590 + x563)\n var x593 uint64 = ((x587 >> 56) | ((x589 << 8) & 0xffffffffffffffff))\n var x594 uint64 = (x587 & 0xffffffffffffff)\n var x595 uint64 = (x593 + x226)\n var x596 uint64 = (x592 >> 56)\n var x597 uint64 = (x592 & 0xffffffffffffff)\n var x598 uint64 = (x595 >> 56)\n var x599 uint64 = (x595 & 0xffffffffffffff)\n var x600 uint64 = (x568 + x596)\n var x601 uint64 = (x576 + x596)\n var x602 uint64 = (x598 + x600)\n var x603 fiat_p448_uint1 = fiat_p448_uint1((x602 >> 56))\n var x604 uint64 = (x602 & 0xffffffffffffff)\n var x605 uint64 = (uint64(x603) + x581)\n var x606 fiat_p448_uint1 = fiat_p448_uint1((x601 >> 56))\n var x607 uint64 = (x601 & 0xffffffffffffff)\n var x608 uint64 = (uint64(x606) + x586)\n out1[0] = x607\n out1[1] = x608\n out1[2] = x594\n out1[3] = x599\n out1[4] = x604\n out1[5] = x605\n out1[6] = x591\n out1[7] = x597\n}", "func IntAnd(z *big.Int, x, y *big.Int,) *big.Int", "func fiat_p448_msat(out1 *[8]uint64) {\n out1[0] = 0xffffffffffffffff\n out1[1] = 0xffffffffffffffff\n out1[2] = 0xffffffffffffffff\n out1[3] = 0xfffffffeffffffff\n out1[4] = 0xffffffffffffffff\n out1[5] = 0xffffffffffffffff\n out1[6] = 0xffffffffffffffff\n out1[7] = uint64(0x0)\n}", "func TestBtI64(t *testing.T) {\n\t// if b is nil, it occurs an error\n\t// when execution because a type of b is not []byte\n\n\t// test for a normal situation\n\t// input: 8 bytes array, []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}\n\t// output: zero(int64)\n\t//\n\t// no problem if input equals to output\n\tvar zeroI64 int64 = 0\n\tzeroB64 := []byte{0x00, 0x00, 0x00, 0x00,\n\t\t0x00, 0x00, 0x00, 0x00}\n\tif BtI64(zeroB64) != zeroI64 {\n\t\tt.Fatalf(\"it needs that input equals to output\")\n\t}\n\n\t// test for a normal situation\n\t// input: 8 bytes array, []byte{0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}\n\t// output: max(int64)\n\t//\n\t// no problem if input equals to output\n\tvar plusMaxI64 int64 = 9223372036854775807\n\tplusMaxB64 := []byte{0x7f, 0xff, 0xff, 0xff,\n\t\t0xff, 0xff, 0xff, 0xff}\n\tif BtI64(plusMaxB64) != plusMaxI64 {\n\t\tt.Fatalf(\"it needs that input equals to output\")\n\t}\n\n\t// test for a normal situation\n\t// input: 8 bytes array, []byte{0x80, 0x00, 0x00, 0x00}\n\t// output: min(int64)\n\t//\n\t// no problem if input equals to output\n\tvar minusMaxI64 int64 = -9223372036854775808\n\tminusMaxB64 := []byte{0x80, 0x00, 0x00, 0x00,\n\t\t0x00, 0x00, 0x00, 0x00}\n\tif BtI64(minusMaxB64) != minusMaxI64 {\n\t\tt.Fatalf(\"it needs that input equals to output\")\n\t}\n}", "func putint(b []byte, i uint64) (size int) {\n\tswitch {\n\tcase i < (1 << 8):\n\t\tb[0] = byte(i)\n\t\treturn 1\n\tcase i < (1 << 16):\n\t\tb[0] = byte(i >> 8)\n\t\tb[1] = byte(i)\n\t\treturn 2\n\tcase i < (1 << 24):\n\t\tb[0] = byte(i >> 16)\n\t\tb[1] = byte(i >> 8)\n\t\tb[2] = byte(i)\n\t\treturn 3\n\tcase i < (1 << 32):\n\t\tb[0] = byte(i >> 24)\n\t\tb[1] = byte(i >> 16)\n\t\tb[2] = byte(i >> 8)\n\t\tb[3] = byte(i)\n\t\treturn 4\n\tcase i < (1 << 40):\n\t\tb[0] = byte(i >> 32)\n\t\tb[1] = byte(i >> 24)\n\t\tb[2] = byte(i >> 16)\n\t\tb[3] = byte(i >> 8)\n\t\tb[4] = byte(i)\n\t\treturn 5\n\tcase i < (1 << 48):\n\t\tb[0] = byte(i >> 40)\n\t\tb[1] = byte(i >> 32)\n\t\tb[2] = byte(i >> 24)\n\t\tb[3] = byte(i >> 16)\n\t\tb[4] = byte(i >> 8)\n\t\tb[5] = byte(i)\n\t\treturn 6\n\tcase i < (1 << 56):\n\t\tb[0] = byte(i >> 48)\n\t\tb[1] = byte(i >> 40)\n\t\tb[2] = byte(i >> 32)\n\t\tb[3] = byte(i >> 24)\n\t\tb[4] = byte(i >> 16)\n\t\tb[5] = byte(i >> 8)\n\t\tb[6] = byte(i)\n\t\treturn 7\n\tdefault:\n\t\tb[0] = byte(i >> 56)\n\t\tb[1] = byte(i >> 48)\n\t\tb[2] = byte(i >> 40)\n\t\tb[3] = byte(i >> 32)\n\t\tb[4] = byte(i >> 24)\n\t\tb[5] = byte(i >> 16)\n\t\tb[6] = byte(i >> 8)\n\t\tb[7] = byte(i)\n\t\treturn 8\n\t}\n}", "func NewRat(a, b int64) *big.Rat", "func Fixed8FromFloat(val float64) Fixed8 {\n\treturn Fixed8(int64(decimals * val))\n}", "func TestLenBInBtI64(t *testing.T) {\n\t// if b is nil, it occurs an error\n\t// when execution because a type of b is not []byte\n\n\t// test for a normal situation\n\t// input: 8 bytes array, []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}\n\t// output: 0x7fffffffffffffff\n\t//\n\t// no problem if input unequals to output\n\tzeroB64 := []byte{0x00, 0x00, 0x00, 0x00,\n\t\t0x00, 0x00, 0x00, 0x00}\n\tif BtI64(zeroB64) == 0x7fffffffffffffff {\n\t\tt.Fatalf(\"it needs that input unequals to output\")\n\t}\n\n\t// test for an anomaly situation\n\t// input: 9 bytes array, []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}\n\t// output: 0x7fffffffffffffff\n\t//\n\t// no problem if input unequals to output\n\tzeroB72 := []byte{0x00, 0x00, 0x00, 0x00,\n\t\t0x00, 0x00, 0x00, 0x00,\n\t\t0x00}\n\tif BtI64(zeroB72) != 0x7fffffffffffffff {\n\t\tt.Fatalf(\"it needs that input equals to output\")\n\t}\n\n\t// test for an anomaly situation\n\t// input: 7 bytes array, []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}\n\t// output: 0x7fffffffffffffff\n\t//\n\t// no problem if input unequals to output\n\tzeroB56 := []byte{0x00, 0x00, 0x00, 0x00,\n\t\t0x00, 0x00, 0x00}\n\tif BtI64(zeroB56) != 0x7fffffffffffffff {\n\t\tt.Fatalf(\"it needs that input equals to output\")\n\t}\n\n\t// TODO: test a large size bytes array\n\t//zeroB18446744073709551615 := []byte{0x00 * 18446744073709551615}\n\t//if BtI64(zeroB18446744073709551615) != 0x7fffffffffffffff {\n\t//\tt.Fatalf(\"it needs that input equals to output\")\n\t//}\n\n\t// test for an anomaly situation\n\t// input: empty bytes array\n\t// output: 0x7fffffffffffffff\n\t//\n\t// no problem if input equals to output\n\temptyB0 := []byte{}\n\tif BtI64(emptyB0) != 0x7fffffffffffffff {\n\t\tt.Fatalf(\"it needs that input equals to output\")\n\t}\n}", "func sm2P256FromBig(X *sm2P256FieldElement, a *big.Int) {\n\tx := new(big.Int).Lsh(a, 257)\n\tx.Mod(x, sm2P256.P)\n\tfor i := 0; i < 9; i++ {\n\t\tif bits := x.Bits(); len(bits) > 0 {\n\t\t\tX[i] = uint32(bits[0]) & bottom29Bits\n\t\t} else {\n\t\t\tX[i] = 0\n\t\t}\n\t\tx.Rsh(x, 29)\n\t\ti++\n\t\tif i == 9 {\n\t\t\tbreak\n\t\t}\n\t\tif bits := x.Bits(); len(bits) > 0 {\n\t\t\tX[i] = uint32(bits[0]) & bottom28Bits\n\t\t} else {\n\t\t\tX[i] = 0\n\t\t}\n\t\tx.Rsh(x, 28)\n\t}\n}", "func PMAXSDm128int32(X1 []int32, X2 []int32)", "func FloatPrec(x *big.Float,) uint", "func (h *HSBA) AsInts() (int, int, int, int) {\n\treturn h.H, int(h.S * 255), int(h.B * 255), int(h.A * 255)\n}", "func btoi(b []byte) int {\n\tv := int(binary.BigEndian.Uint64(b))\n return v\n}", "func encodeValueAndMessage(alpha, v *big.Int, message [16]byte) {\n\t// The value v is at most 64 bits so if we want to encode a message we have\n\t// a bunch more space we can use. Encode the message into v then subtract\n\t// that from alpha.\n\tvBytes := GetB32(v)\n\tfor i := 0; i < 16; i++ {\n\t\tvBytes[i+8] = message[i]\n\t}\n\n\t// alpha - v\n\talpha.Add(alpha, new(big.Int).Sub(curve.N, new(big.Int).SetBytes(vBytes[:])))\n\talpha.Mod(alpha, curve.N)\n}", "func (f Fixed8) Value() int64 {\n\treturn int64(f) / int64(decimals)\n}", "func getB(i int) uint8 {\n\treturn uint8(i & 0x0000FF)\n}", "func vvFromUint(u uint64) (result []*vdl.Value) {\n\ti, f := int64(u), float64(u)\n\tswitch {\n\tcase u <= math.MaxInt8:\n\t\tresult = append(result,\n\t\t\tvdl.IntValue(vdl.Int8Type, i),\n\t\t\tvdl.IntValue(vdl.Int8TypeN, i))\n\t\tfallthrough\n\tcase u <= math.MaxUint8:\n\t\tresult = append(result,\n\t\t\tvdl.UintValue(vdl.ByteType, u),\n\t\t\tvdl.UintValue(vdl.ByteTypeN, u))\n\t\tfallthrough\n\tcase u <= math.MaxInt16:\n\t\tresult = append(result,\n\t\t\tvdl.IntValue(vdl.Int16Type, i),\n\t\t\tvdl.IntValue(vdl.Int16TypeN, i))\n\t\tfallthrough\n\tcase u <= math.MaxUint16:\n\t\tresult = append(result,\n\t\t\tvdl.UintValue(vdl.Uint16Type, u),\n\t\t\tvdl.UintValue(vdl.Uint16TypeN, u))\n\t\tfallthrough\n\tcase u <= 1<<24:\n\t\tresult = append(result,\n\t\t\tvdl.FloatValue(vdl.Float32Type, f),\n\t\t\tvdl.FloatValue(vdl.Float32TypeN, f))\n\t\tfallthrough\n\tcase u <= math.MaxInt32:\n\t\tresult = append(result,\n\t\t\tvdl.IntValue(vdl.Int32Type, i),\n\t\t\tvdl.IntValue(vdl.Int32TypeN, i))\n\t\tfallthrough\n\tcase u <= math.MaxUint32:\n\t\tresult = append(result,\n\t\t\tvdl.UintValue(vdl.Uint32Type, u),\n\t\t\tvdl.UintValue(vdl.Uint32TypeN, u))\n\t\tfallthrough\n\tcase u <= 1<<53:\n\t\tresult = append(result,\n\t\t\tvdl.FloatValue(vdl.Float64Type, f),\n\t\t\tvdl.FloatValue(vdl.Float64TypeN, f))\n\t\tfallthrough\n\tcase u <= math.MaxInt64:\n\t\tresult = append(result,\n\t\t\tvdl.IntValue(vdl.Int64Type, i),\n\t\t\tvdl.IntValue(vdl.Int64TypeN, i))\n\t\tfallthrough\n\tdefault:\n\t\tresult = append(result,\n\t\t\tvdl.UintValue(vdl.Uint64Type, u),\n\t\t\tvdl.UintValue(vdl.Uint64TypeN, u))\n\t}\n\treturn result\n}", "func oruint8s(a, b uint8) uint8", "func ipF4(pv, vl, vh float32) (a, b, c float32, r int) {\n\tif pv > vh {\n\t\tvh, pv = pv, vh\n\t\tr = 1\n\t} else if pv < vl {\n\t\tvl, pv = pv, vl\n\t\tr = -1\n\t}\n\treturn vl, pv, vh, r\n}", "func anyToInt8(i interface{}, def ...int8) int8 {\n\tvar defV int8 = 0\n\tif len(def) > 0 {\n\t\tdefV = def[0]\n\t}\n\tif i == nil {\n\t\treturn defV\n\t}\n\tif v, ok := i.(int8); ok {\n\t\treturn v\n\t}\n\treturn int8(anyToInt64(i, anyToInt64(defV)))\n}", "func RatSetInt(z *big.Rat, x *big.Int,) *big.Rat", "func inner(r0, r1, r2, r3, r4 int) (int, int, int, int, int) {\n\tfor {\n\t\tr1 = r2 & 255\n\t\tr3 += r1\n\t\tr3 &= 16777215\n\t\tr3 *= 65899\n\t\tr3 &= 16777215\n\t\tif 256 > r2 {\n\t\t\treturn r0, r1, r2, r3, r4\n\t\t}\n\t\tr2 = r2 / 256\n\t}\n}", "func IntQuo(z *big.Int, x, y *big.Int,) *big.Int", "func FtoB(f int) uint64 {\n\tif f < x86.REG_X0 || f > x86.REG_X15 {\n\t\treturn 0\n\t}\n\treturn 1 << uint(f-x86.REG_X0+16)\n}", "func MaxUint8x16(a, b Uint8x16) Uint8x16", "func toAtoms(v float64) uint64 {\n\treturn uint64(math.Round(v * 1e8))\n}", "func toAtoms(v float64) uint64 {\n\treturn uint64(math.Round(v * 1e8))\n}", "func Ones(_, _ int, _ float64) float64 { return 1 }", "func floatBits(f float64) uint64 {\n\t// Take f parameter and determine bit pattern.\n\t// Translate bit pattern into a value of type uint64\n\ti := *(*uint64)(unsafe.Pointer(&f))\n\t//fmt.Printf(\"strconv.FormatUint: %v\\n\", strconv.FormatUint(i, 2))\n\t// Return new value\n\treturn i\n}", "func IntDiv(z *big.Int, x, y *big.Int,) *big.Int", "func powerSeries(a int) (int, int) {\n\treturn a * a, a * a * a\n\n}", "func zeroextendAndMask8to64(a int8, b int16) (x, y uint64) {\n\t// ppc64x: -\"MOVB\\t\", -\"ANDCC\", \"MOVBZ\"\n\tx = uint64(a) & 0xFF\n\t// ppc64x: -\"MOVH\\t\", -\"ANDCC\", \"MOVHZ\"\n\ty = uint64(b) & 0xFFFF\n\treturn\n\n}", "func Fixed(a []byte, b []byte) []byte {\n\tresult := make([]byte, len(a))\n\n\tfor index, _ := range a {\n\t\tresult[index] = a[index] ^ b[index]\n\t}\n\n\treturn result\n}", "func Jacobi(x, y *big.Int,) int", "func (sc Scalar) Refresh_bint() {\n\tcopy_data := make([]byte,len(sc.Val))\n\tif len(sc.Val)!= 32 {\n\t// print error\n\t}\n\tcopy(copy_data,sc.Val)\n\tfor i, j := 0, len(copy_data)-1; i < j; i, j = i+1, j-1 { // reversal of bytes\n\t\tcopy_data[i], copy_data[j] = copy_data[j], copy_data[i]\n\t}\n\tsc.bint = *new(big.Int).SetBytes(copy_data)\n}", "func IntExp(z *big.Int, x, y, m *big.Int,) *big.Int", "func toBInteger(n int) string {\n\n\treturn fmt.Sprintf(\"i%de\", n)\n}", "func mergeBytes(left8 byte, right8 byte) (v int) {\n\t// awesome conversion to signed int\n\tv = int((uint16(left8) << 8) | uint16(right8))\n\tif v >= 32768 {\n\t\tv = -65536 + v\n\t}\n\treturn\n}", "func PMULLDm128int32(X1 []int32, X2 []int32)", "func btoi(b []byte) int {\n\treturn int(binary.BigEndian.Uint64(b))\n}", "func btoi(b []byte) int {\n\treturn int(binary.BigEndian.Uint64(b))\n}", "func btoi(b []byte) int {\n\treturn int(binary.BigEndian.Uint64(b))\n}", "func (this *VlqBase128Be) Value() (v int, err error) {\n\tif (this._f_value) {\n\t\treturn this.value, nil\n\t}\n\ttmp3, err := this.Last()\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\ttmp4, err := this.Groups[tmp3].Value()\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tvar tmp5 int;\n\ttmp6, err := this.Last()\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tif (tmp6 >= 1) {\n\t\ttmp7, err := this.Last()\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\ttmp8, err := this.Groups[(tmp7 - 1)].Value()\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\ttmp5 = (tmp8 << 7)\n\t} else {\n\t\ttmp5 = 0\n\t}\n\tvar tmp9 int;\n\ttmp10, err := this.Last()\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tif (tmp10 >= 2) {\n\t\ttmp11, err := this.Last()\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\ttmp12, err := this.Groups[(tmp11 - 2)].Value()\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\ttmp9 = (tmp12 << 14)\n\t} else {\n\t\ttmp9 = 0\n\t}\n\tvar tmp13 int;\n\ttmp14, err := this.Last()\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tif (tmp14 >= 3) {\n\t\ttmp15, err := this.Last()\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\ttmp16, err := this.Groups[(tmp15 - 3)].Value()\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\ttmp13 = (tmp16 << 21)\n\t} else {\n\t\ttmp13 = 0\n\t}\n\tvar tmp17 int;\n\ttmp18, err := this.Last()\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tif (tmp18 >= 4) {\n\t\ttmp19, err := this.Last()\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\ttmp20, err := this.Groups[(tmp19 - 4)].Value()\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\ttmp17 = (tmp20 << 28)\n\t} else {\n\t\ttmp17 = 0\n\t}\n\tvar tmp21 int;\n\ttmp22, err := this.Last()\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tif (tmp22 >= 5) {\n\t\ttmp23, err := this.Last()\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\ttmp24, err := this.Groups[(tmp23 - 5)].Value()\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\ttmp21 = (tmp24 << 35)\n\t} else {\n\t\ttmp21 = 0\n\t}\n\tvar tmp25 int;\n\ttmp26, err := this.Last()\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tif (tmp26 >= 6) {\n\t\ttmp27, err := this.Last()\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\ttmp28, err := this.Groups[(tmp27 - 6)].Value()\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\ttmp25 = (tmp28 << 42)\n\t} else {\n\t\ttmp25 = 0\n\t}\n\tvar tmp29 int;\n\ttmp30, err := this.Last()\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tif (tmp30 >= 7) {\n\t\ttmp31, err := this.Last()\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\ttmp32, err := this.Groups[(tmp31 - 7)].Value()\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\ttmp29 = (tmp32 << 49)\n\t} else {\n\t\ttmp29 = 0\n\t}\n\tthis.value = int((((((((tmp4 + tmp5) + tmp9) + tmp13) + tmp17) + tmp21) + tmp25) + tmp29))\n\tthis._f_value = true\n\treturn this.value, nil\n}", "func BigByZip(target *big.Int) uint32 {\n\tif target.Sign() == 0 {\n\t\treturn 0\n\t}\n\tc := uint(3)\n\te := uint(len(target.Bytes()))\n\tvar mantissa uint\n\tif e <= c {\n\t\tmantissa = uint(target.Bits()[0])\n\t\tshift := 8 * (c - e)\n\t\tmantissa <<= shift\n\t} else {\n\t\tshift := 8 * (e - c)\n\t\tmantissaNum := target.Rsh(target, shift)\n\t\tmantissa = uint(mantissaNum.Bits()[0])\n\t}\n\tmantissa <<= 8\n\tmantissa = mantissa & 0xffffffff\n\treturn uint32(mantissa | e)\n}", "func split(sum int) (x,y int) {\n\tx = sum * 4/9\n\ty = sum - x\n\treturn\n}", "func NewFixed8(val int) Fixed8 {\n\treturn Fixed8(decimals * val)\n}", "func add(x int, y int) (p,q int) {\n a := x + y\n b := 34 * 34\n return a,b\n}", "func ts64(x []u8, u u64) {\n\tfor i := 7; i >= 0; i-- {\n\t\tx[i] = u8(u)\n\t\tu >>= 8\n\t}\n}", "func (sb *Builder) formatBits(u uint64, base int, neg bool) {\n\tif base < 2 || base > len(digits) {\n\t\tpanic(\"strconv: illegal AppendInt/FormatInt base\")\n\t}\n\t// 2 <= base && base <= len(digits)\n\n\tvar a [64 + 1]byte // +1 for sign of 64bit value in base 2\n\ti := len(a)\n\n\tif neg {\n\t\tu = -u\n\t}\n\n\t// convert bits\n\t// We use uint values where we can because those will\n\t// fit into a single register even on a 32bit machine.\n\tif base == 10 {\n\t\t// common case: use constants for / because\n\t\t// the compiler can optimize it into a multiply+shift\n\n\t\tif host32bit {\n\t\t\t// convert the lower digits using 32bit operations\n\t\t\tfor u >= 1e9 {\n\t\t\t\t// Avoid using r = a%b in addition to q = a/b\n\t\t\t\t// since 64bit division and modulo operations\n\t\t\t\t// are calculated by runtime functions on 32bit machines.\n\t\t\t\tq := u / 1e9\n\t\t\t\tus := uint(u - q*1e9) // u % 1e9 fits into a uint\n\t\t\t\tfor j := 4; j > 0; j-- {\n\t\t\t\t\tis := us % 100 * 2\n\t\t\t\t\tus /= 100\n\t\t\t\t\ti -= 2\n\t\t\t\t\ta[i+1] = smallsString[is+1]\n\t\t\t\t\ta[i+0] = smallsString[is+0]\n\t\t\t\t}\n\n\t\t\t\t// us < 10, since it contains the last digit\n\t\t\t\t// from the initial 9-digit us.\n\t\t\t\ti--\n\t\t\t\ta[i] = smallsString[us*2+1]\n\n\t\t\t\tu = q\n\t\t\t}\n\t\t\t// u < 1e9\n\t\t}\n\n\t\t// u guaranteed to fit into a uint\n\t\tus := uint(u)\n\t\tfor us >= 100 {\n\t\t\tis := us % 100 * 2\n\t\t\tus /= 100\n\t\t\ti -= 2\n\t\t\ta[i+1] = smallsString[is+1]\n\t\t\ta[i+0] = smallsString[is+0]\n\t\t}\n\n\t\t// us < 100\n\t\tis := us * 2\n\t\ti--\n\t\ta[i] = smallsString[is+1]\n\t\tif us >= 10 {\n\t\t\ti--\n\t\t\ta[i] = smallsString[is]\n\t\t}\n\n\t} else if isPowerOfTwo(base) {\n\t\t// Use shifts and masks instead of / and %.\n\t\t// Base is a power of 2 and 2 <= base <= len(digits) where len(digits) is 36.\n\t\t// The largest power of 2 below or equal to 36 is 32, which is 1 << 5;\n\t\t// i.e., the largest possible shift count is 5. By &-ind that value with\n\t\t// the constant 7 we tell the compiler that the shift count is always\n\t\t// less than 8 which is smaller than any register width. This allows\n\t\t// the compiler to generate better code for the shift operation.\n\t\tshift := uint(bits.TrailingZeros(uint(base))) & 7\n\t\tb := uint64(base)\n\t\tm := uint(base) - 1 // == 1<<shift - 1\n\t\tfor u >= b {\n\t\t\ti--\n\t\t\ta[i] = digits[uint(u)&m]\n\t\t\tu >>= shift\n\t\t}\n\t\t// u < base\n\t\ti--\n\t\ta[i] = digits[uint(u)]\n\t} else {\n\t\t// general case\n\t\tb := uint64(base)\n\t\tfor u >= b {\n\t\t\ti--\n\t\t\t// Avoid using r = a%b in addition to q = a/b\n\t\t\t// since 64bit division and modulo operations\n\t\t\t// are calculated by runtime functions on 32bit machines.\n\t\t\tq := u / b\n\t\t\ta[i] = digits[uint(u-q*b)]\n\t\t\tu = q\n\t\t}\n\t\t// u < base\n\t\ti--\n\t\ta[i] = digits[uint(u)]\n\t}\n\n\t// add sign, if any\n\tif neg {\n\t\ti--\n\t\ta[i] = '-'\n\t}\n\n\tif sb.Cap() < sb.Len()+len(a)-i {\n\t\tsb.Grow(sb.Len() + 128)\n\t}\n\n\tcopy(sb.data[sb.length:], a[i:])\n\tsb.length += len(a) - i\n}", "func mul(a float64, b int16) int16 {\n\tc := a * float64(b)\n\td := math.Round(float64(c))\n\treturn int16(d)\n}", "func IntToIeeeFloat(i int) [10]byte {\n\tb := [10]byte{}\n\tnum := float64(i)\n\n\tvar sign int\n\tvar expon int\n\tvar fMant, fsMant float64\n\tvar hiMant, loMant uint\n\n\tif num < 0 {\n\t\tsign = 0x8000\n\t} else {\n\t\tsign = 0\n\t}\n\n\tif num == 0 {\n\t\texpon = 0\n\t\thiMant = 0\n\t\tloMant = 0\n\t} else {\n\t\tfMant, expon = math.Frexp(num)\n\t\tif (expon > 16384) || !(fMant < 1) { /* Infinity or NaN */\n\t\t\texpon = sign | 0x7FFF\n\t\t\thiMant = 0\n\t\t\tloMant = 0 /* infinity */\n\t\t} else { /* Finite */\n\t\t\texpon += 16382\n\t\t\tif expon < 0 { /* denormalized */\n\t\t\t\tfMant = math.Ldexp(fMant, expon)\n\t\t\t\texpon = 0\n\t\t\t}\n\t\t\texpon |= sign\n\t\t\tfMant = math.Ldexp(fMant, 32)\n\t\t\tfsMant = math.Floor(fMant)\n\t\t\thiMant = uint(fsMant)\n\t\t\tfMant = math.Ldexp(fMant-fsMant, 32)\n\t\t\tfsMant = math.Floor(fMant)\n\t\t\tloMant = uint(fsMant)\n\t\t}\n\t}\n\n\tb[0] = byte(expon >> 8)\n\tb[1] = byte(expon)\n\tb[2] = byte(hiMant >> 24)\n\tb[3] = byte(hiMant >> 16)\n\tb[4] = byte(hiMant >> 8)\n\tb[5] = byte(hiMant)\n\tb[6] = byte(loMant >> 24)\n\tb[7] = byte(loMant >> 16)\n\tb[8] = byte(loMant >> 8)\n\tb[9] = byte(loMant)\n\n\treturn b\n}", "func pack12(src []uint64) uint64 {\n\t_ = src[11] // eliminate multiple bounds checks\n\treturn 6<<60 |\n\t\tsrc[0] |\n\t\tsrc[1]<<5 |\n\t\tsrc[2]<<10 |\n\t\tsrc[3]<<15 |\n\t\tsrc[4]<<20 |\n\t\tsrc[5]<<25 |\n\t\tsrc[6]<<30 |\n\t\tsrc[7]<<35 |\n\t\tsrc[8]<<40 |\n\t\tsrc[9]<<45 |\n\t\tsrc[10]<<50 |\n\t\tsrc[11]<<55\n}", "func FromBytes(out1 *TightFieldElement, arg1 *[17]uint8) {\n\tx1 := (uint64(arg1[16]) << 41)\n\tx2 := (uint64(arg1[15]) << 33)\n\tx3 := (uint64(arg1[14]) << 25)\n\tx4 := (uint64(arg1[13]) << 17)\n\tx5 := (uint64(arg1[12]) << 9)\n\tx6 := (uint64(arg1[11]) * uint64(0x2))\n\tx7 := (uint64(arg1[10]) << 36)\n\tx8 := (uint64(arg1[9]) << 28)\n\tx9 := (uint64(arg1[8]) << 20)\n\tx10 := (uint64(arg1[7]) << 12)\n\tx11 := (uint64(arg1[6]) << 4)\n\tx12 := (uint64(arg1[5]) << 40)\n\tx13 := (uint64(arg1[4]) << 32)\n\tx14 := (uint64(arg1[3]) << 24)\n\tx15 := (uint64(arg1[2]) << 16)\n\tx16 := (uint64(arg1[1]) << 8)\n\tx17 := arg1[0]\n\tx18 := (x16 + uint64(x17))\n\tx19 := (x15 + x18)\n\tx20 := (x14 + x19)\n\tx21 := (x13 + x20)\n\tx22 := (x12 + x21)\n\tx23 := (x22 & 0xfffffffffff)\n\tx24 := uint8((x22 >> 44))\n\tx25 := (x11 + uint64(x24))\n\tx26 := (x10 + x25)\n\tx27 := (x9 + x26)\n\tx28 := (x8 + x27)\n\tx29 := (x7 + x28)\n\tx30 := (x29 & 0x7ffffffffff)\n\tx31 := uint1((x29 >> 43))\n\tx32 := (x6 + uint64(x31))\n\tx33 := (x5 + x32)\n\tx34 := (x4 + x33)\n\tx35 := (x3 + x34)\n\tx36 := (x2 + x35)\n\tx37 := (x1 + x36)\n\tout1[0] = x23\n\tout1[1] = x30\n\tout1[2] = x37\n}", "func MaxFloat32x4(a, b Float32x4) Float32x4" ]
[ "0.5875011", "0.5786388", "0.56857926", "0.5649239", "0.55676305", "0.55642706", "0.5491502", "0.54723597", "0.5457835", "0.5405847", "0.53998804", "0.5364934", "0.53503346", "0.5335121", "0.53200454", "0.52959776", "0.5291742", "0.52640444", "0.5239919", "0.5216811", "0.52064645", "0.5200701", "0.5186088", "0.5183516", "0.5152605", "0.51053125", "0.5103395", "0.50956255", "0.50951993", "0.5088584", "0.5011057", "0.5010464", "0.49823892", "0.4980958", "0.4979834", "0.49762857", "0.49715504", "0.4968153", "0.49594888", "0.49591956", "0.49587238", "0.49459267", "0.49397847", "0.4925761", "0.4925734", "0.49254394", "0.49211884", "0.49102703", "0.49069163", "0.49009287", "0.48879576", "0.48827162", "0.4882118", "0.4878828", "0.48746818", "0.48711917", "0.48611942", "0.48605207", "0.4857454", "0.4852607", "0.48464993", "0.48424676", "0.483166", "0.48260605", "0.48249876", "0.48228553", "0.48180038", "0.48172635", "0.4810625", "0.48101324", "0.48025227", "0.4801577", "0.4801577", "0.47976357", "0.4797475", "0.47971857", "0.479472", "0.4791472", "0.47815788", "0.47798502", "0.47782493", "0.47767803", "0.47698998", "0.47653988", "0.47642207", "0.4762667", "0.4762667", "0.4762667", "0.47563702", "0.47502077", "0.47480494", "0.4746476", "0.4745487", "0.47428268", "0.4738704", "0.47379592", "0.47366905", "0.47303367", "0.47294506", "0.47276384" ]
0.55694884
4
Calculate returns the standard deviation of a base indicator
func (sdi StandardDeviationIndicator) Calculate(index int) big.Decimal { return VarianceIndicator{ Indicator: sdi.Indicator, }.Calculate(index).Sqrt() }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (h *pingHistory) stdDev() float64 {\n\treturn math.Sqrt(h.variance())\n}", "func (e *exemplarSampler) standardDeviation() float64 {\n\tif e.count < 2 {\n\t\treturn 0\n\t}\n\treturn math.Sqrt(e.m2 / float64(e.count-1))\n}", "func TestGet_StandardDeviation(t *testing.T) {\n\tdata := []float64{1345, 1301, 1368, 1322, 1310, 1370, 1318, 1350, 1303, 1299}\n\tmean, stdev_s := normality.Get_AverageAndStandardDeviation(&data)\n\t// Expected values (Correct answer)\n\t// mean = 1328.6\n\t// stdev_s = 27.46391571984349\n\tfmt.Println(\" Mean : \", mean)\n\tfmt.Println(\"Standard Deviation : \", stdev_s)\n}", "func meanSd(data []float64) (mean, sd float64) {\n\tn := 0.0\n\tmean = 0.0\n\tm2 := 0.0\n\tfor _, x := range data {\n\t\tn++\n\t\tdelta := x - mean\n\t\tmean += delta / n\n\t\tif n > 1 {\n\t\t\tm2 += delta * (x - mean)\n\t\t}\n\t}\n\tsd = sqrt(m2 / (n - 1))\n\treturn\n}", "func (s Series) StdDev() (float64, error) {\n\tvals, err := s.Float(false)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tstdDev := stat.StdDev(vals, nil)\n\treturn stdDev, nil\n}", "func stDev(a []float64) (s float64) {\n\tm := mean(a)\n\tfor _, v := range a {\n\t\ts += (v - m) * (v - m)\n\t}\n\treturn math.Sqrt(s / float64(len(a)))\n}", "func StandardDeviation(values []float64) float64 {\n\tarithmeticMean := ArithmeticMean(values)\n\tvar squaredDifferences []float64\n\tfor _, v := range values {\n\t\tdifference := v - arithmeticMean\n\t\tsquaredDifferences = append(squaredDifferences, difference*difference)\n\t}\n\treturn math.Sqrt((1 / float64(len(values)-1)) * Sum(squaredDifferences))\n}", "func (s *NumSeries) StdDev() float64 {\n\treturn math.Sqrt(s.Variance())\n}", "func CalculateStdev(data []int) float64 {\n\tmean := CalculateMean(data)\n\tvar squareSum float64\n\tfor _, value := range data {\n\t\tsquareSum += math.Pow((float64(value) - mean), 2)\n\t}\n\treturn math.Sqrt(squareSum / (float64(len(data) - 1)))\n}", "func (ds *Dataset) StandardDeviation() float64 {\n\treturn math.Sqrt(ds.Variance())\n}", "func (r *RunningStats) Stddev() float64 {\n\treturn math.Sqrt(r.Var())\n}", "func (d DelphiEstimate) GetStandardDeviation() float64 {\n\treturn (d.WorstCase - d.BestCase) / 6\n}", "func sd_a(list []int) [2]float64 {\r\n\tvar average float64 = 0.0\r\n\tvar sd float64 = 0.0\r\n\tvar result [2]float64\r\n\r\n\tfor _, item := range list {\r\n\t\taverage += float64(item)\r\n\t}\r\n\r\n\taverage = average / float64(len(list))\r\n\r\n\tfor _, item := range list {\r\n\t\tsd += math.Pow((float64(item) - average), 2.0)\r\n\t}\r\n\tsd = math.Sqrt(sd / float64(len(list)))\r\n\r\n\tresult[0] = average\r\n\tresult[1] = sd\r\n\r\n\treturn result\r\n}", "func (s Set) GetStd() float64 {\r\n\treturn math.Sqrt(s.GetVariance())\r\n}", "func StdDev(by []string, input []*oproto.ValueStream) []*oproto.ValueStream {\n\toutput := []*oproto.ValueStream{{Variable: input[0].Variable}}\n\tiPos := make([]int, len(input))\n\tfor {\n\t\tvalues := []float64{}\n\t\ttimestamps := []uint64{}\n\t\tfor i := 0; i < len(input); i++ {\n\t\t\tif iPos[i] >= len(input[i].Value) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif input[i] != nil {\n\t\t\t\tvalues = append(values, input[i].Value[iPos[i]].GetDouble())\n\t\t\t\ttimestamps = append(timestamps, input[i].Value[iPos[i]].Timestamp)\n\t\t\t}\n\t\t\tiPos[i]++\n\t\t}\n\t\tif len(values) == 0 {\n\t\t\tbreak\n\t\t}\n\t\tvar total float64\n\t\tfor _, i := range values {\n\t\t\ttotal += i\n\t\t}\n\t\tvar variances float64\n\t\tfor _, i := range values {\n\t\t\tvariances += math.Pow(((total / float64(len(values))) - i), 2)\n\t\t}\n\n\t\tvar tsTotal uint64\n\t\tfor _, i := range timestamps {\n\t\t\ttsTotal += i\n\t\t}\n\t\toutput[0].Value = append(output[0].Value, value.NewDouble(tsTotal/uint64(len(timestamps)),\n\t\t\tmath.Sqrt(variances/float64(len(values)))))\n\t}\n\treturn output\n}", "func dev(dps Series, args ...float64) (d float64) {\n\tif len(dps) == 1 {\n\t\treturn 0\n\t}\n\ta := avg(dps)\n\tfor _, v := range dps {\n\t\td += math.Pow(float64(v)-a, 2)\n\t}\n\td /= float64(len(dps) - 1)\n\treturn math.Sqrt(d)\n}", "func (s *UniformSample) StdDev() float64 {\n\ts.mutex.Lock()\n\tdefer s.mutex.Unlock()\n\treturn gometrics.SampleStdDev(s.values)\n}", "func StandardDeviation(nums []int) (dev float64) {\n\tif len(nums) == 0 {\n\t\treturn 0.0\n\t}\n\n\tm := Mean(nums)\n\tfor _, n := range nums {\n\t\tdev += (float64(n) - m) * (float64(n) - m)\n\t}\n\tdev = math.Pow(dev/float64(len(nums)), 0.5)\n\treturn dev\n}", "func CalculateSD(appNuminEacZone map[string]int, appNum int, zoneNum int, mean float64) float64 {\n\tvar sum, x float64\n\tfor _, v := range appNuminEacZone {\n\t\tx = float64(v) - mean\n\t\tsum = sum + math.Pow(x, 2)\n\t}\n\tfmt.Println(\"sum=\", sum)\n\tsd := float64(math.Sqrt(sum / float64(zoneNum)))\n\treturn sd\n}", "func StdDevS(input Float64Data) (sdev float64, err error) {\n\treturn StandardDeviationSample(input)\n}", "func (b Binomial) StdDev() float64 {\n\treturn math.Sqrt(b.Variance())\n}", "func (t *TimerSnapshot) StdDev() float64 { return t.histogram.StdDev() }", "func SDInt(slice []int) float64 {\n\tif len(slice) == 1 {\n\t\treturn 0\n\t}\n\n\tmean := MeanInt(slice)\n\tsum := float64(0)\n\tfor _, value := range slice {\n\t\tsum += math.Pow(float64(value)-mean, float64(2))\n\t}\n\n\treturn math.Sqrt(sum / float64(len(slice)-1))\n}", "func Stdev(input []float64) float64 {\n\tvariance := Variance(input)\n\treturn math.Pow(variance, 0.5)\n}", "func (c *Counter) StdDev() float64 {\n\tfC := float64(c.Count)\n\tsigma := (c.sumOfSquares - c.Sum*c.Sum/fC) / fC\n\t// should never happen but it does\n\tif sigma < 0 {\n\t\tlog.Warnf(\"Unexpected negative sigma for %+v: %g\", c, sigma)\n\t\treturn 0\n\t}\n\treturn math.Sqrt(sigma)\n}", "func StandardDeviation(values []float64) float64 {\n\tvar sum, sumSq, length float64 = 0.0, 0.0, 0.0\n\tfor _, d := range values {\n\t\tif !math.IsNaN(d) {\n\t\t\tsum += d\n\t\t\tsumSq += d * d\n\t\t\tlength++\n\t\t}\n\t}\n\treturn math.Sqrt((math.Abs(sumSq-sum*sum) /\n\t\t((length) * (length - 1))))\n}", "func (indis Individuals) FitStd() float64 {\n\treturn math.Sqrt(varianceFloat64s(indis.getFitnesses()))\n}", "func stdev(img image.Image) float64 {\n\tw, h := dim(img)\n\n\tn := float64((w * h) - 1)\n\tsum := 0.0\n\tavg := mean(img)\n\n\tfor x := 0; x < w; x++ {\n\t\tfor y := 0; y < h; y++ {\n\t\t\tpix := getPixVal(img.At(x, y))\n\t\t\tsum += math.Pow((pix - avg), 2.0)\n\t\t}\n\t}\n\treturn math.Sqrt(sum / n)\n}", "func (a AlphaStable) StdDev() float64 {\n\treturn math.Sqrt(a.Variance())\n}", "func (NilTimer) StdDev() float64 { return 0.0 }", "func StdDev(col Columnar) ColumnElem {\n\treturn Function(STDDEV, col)\n}", "func (f F) StdDev() float64 {\n\tif f.D2 <= 4 {\n\t\treturn math.NaN()\n\t}\n\treturn math.Sqrt(f.Variance())\n}", "func measureStdDev(endpoints []string, hashesPerEndpoint int) float64 {\n\tch := newConsistentHashInternal(endpoints, hashesPerEndpoint).(*consistentHash)\n\tringOwnership := map[int]uint64{}\n\tprevPartitionEndHash := uint64(0)\n\tfor i := 0; i < len(ch.hashRing); i++ {\n\t\tendpointIndex := ch.hashRing[i].index\n\t\tpartitionEndHash := ch.hashRing[i].hash\n\t\tringOwnership[endpointIndex] += partitionEndHash - prevPartitionEndHash\n\t\tprevPartitionEndHash = partitionEndHash\n\t}\n\tringOwnership[ch.hashRing[0].index] += math.MaxUint64 - prevPartitionEndHash\n\treturn stdDeviation(ringOwnership)\n}", "func (n *Normal) StdDev() float64 {\n\treturn n.stddev\n}", "func StDev(xs ...float64) float64 {\n\treturn gomath.Sqrt(Var(xs...))\n}", "func (g GumbelRight) StdDev() float64 {\n\treturn (math.Pi / math.Sqrt(6)) * g.Beta\n}", "func (t *StandardTimer) StdDev() float64 {\n\treturn t.histogram.StdDev()\n}", "func (e Exponential) StdDev() float64 {\n\treturn 1 / e.Rate\n}", "func (c *Counter) Stdev() float64 {\n\treturn stdev(c.count, float64(c.sumSq), float64(c.sum))\n}", "func funcHistogramStdDev(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector {\n\tinVec := vals[0].(Vector)\n\n\tfor _, sample := range inVec {\n\t\t// Skip non-histogram samples.\n\t\tif sample.H == nil {\n\t\t\tcontinue\n\t\t}\n\t\tmean := sample.H.Sum / sample.H.Count\n\t\tvar variance, cVariance float64\n\t\tit := sample.H.AllBucketIterator()\n\t\tfor it.Next() {\n\t\t\tbucket := it.At()\n\t\t\tvar val float64\n\t\t\tif bucket.Lower <= 0 && 0 <= bucket.Upper {\n\t\t\t\tval = 0\n\t\t\t} else {\n\t\t\t\tval = math.Sqrt(bucket.Upper * bucket.Lower)\n\t\t\t}\n\t\t\tdelta := val - mean\n\t\t\tvariance, cVariance = kahanSumInc(bucket.Count*delta*delta, variance, cVariance)\n\t\t}\n\t\tvariance += cVariance\n\t\tvariance /= sample.H.Count\n\t\tenh.Out = append(enh.Out, Sample{\n\t\t\tMetric: enh.DropMetricName(sample.Metric),\n\t\t\tF: math.Sqrt(variance),\n\t\t})\n\t}\n\treturn enh.Out\n}", "func (s FloatList) StandardDeviation() float64 {\n\treturn math.Sqrt(s.Variance())\n}", "func (ds *Dataset) SampleStandardDeviation() float64 {\n\treturn math.Sqrt(ds.SampleVariance())\n}", "func (s *NumSeries) SampleStdDev() float64 {\n\treturn math.Sqrt(s.SampleVariance())\n}", "func (h *PCPHistogram) StandardDeviation() float64 {\n\th.mutex.RLock()\n\tdefer h.mutex.RUnlock()\n\treturn h.vals[\"standard_deviation\"].val.(float64)\n}", "func (norm Normal) StdDev() float64 {\n\treturn norm.Sigma\n}", "func (s *Stat) GetStdDevS() float64 {\n\treturn math.Sqrt(s.GetVarS())\n}", "func SD(list []interface{}) (float64, error) {\n\ta, err := ds.NewArrayFrom(list)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn a.SD(), nil\n}", "func (p Pareto) StdDev() float64 {\n\treturn math.Sqrt(p.Variance())\n}", "func (self *Sax) normalize(measureList []float64) []float64 {\n var err error\n if len(measureList) < 1 {\n return measureList\n }\n\n s2 := float64(1)\n m2 := float64(0)\n m1 := mean(measureList)\n if err != nil {\n panic(err)\n }\n s1 := stdDev(measureList, m1)\n if err != nil {\n panic(err)\n }\n\n stdMultiplier := (s2 / s1)\n\n for i, m := range measureList {\n measureList[i] = float64(m2 + (m - m1)) * stdMultiplier\n }\n return measureList\n}", "func ReduceStddev(values []interface{}) interface{} {\n\tvar data []float64\n\t// Collect all the data points\n\tfor _, value := range values {\n\t\tif value == nil {\n\t\t\tcontinue\n\t\t}\n\t\tdata = append(data, value.([]float64)...)\n\t}\n\n\t// If no data or we only have one point, it's nil or undefined\n\tif len(data) < 2 {\n\t\treturn nil\n\t}\n\n\t// Get the mean\n\tvar mean float64\n\tvar count int\n\tfor _, v := range data {\n\t\tcount++\n\t\tmean += (v - mean) / float64(count)\n\t}\n\t// Get the variance\n\tvar variance float64\n\tfor _, v := range data {\n\t\tdif := v - mean\n\t\tsq := math.Pow(dif, 2)\n\t\tvariance += sq\n\t}\n\tvariance = variance / float64(count-1)\n\tstddev := math.Sqrt(variance)\n\n\treturn stddev\n}", "func SDFloat(slice []float64) float64 {\n\tif len(slice) == 1 {\n\t\treturn 0\n\t}\n\n\tmean := MeanFloat(slice)\n\tsum := float64(0)\n\tfor _, value := range slice {\n\t\tsum += math.Pow(value-mean, float64(2))\n\t}\n\n\treturn math.Sqrt(sum / float64(len(slice)-1))\n}", "func (c ChiSquared) StdDev() float64 {\n\treturn math.Sqrt(c.Variance())\n}", "func (pdl PriceDataList) sma(n int, offset int) float64 {\n\tstart := len(pdl) - n - offset\n\tend := len(pdl) - offset\n\n\tslice := pdl[start:end]\n\tsum := float64(0)\n\n\tfor _, pd := range slice {\n\t\tsum += pd.Close\n\t}\n\n\treturn sum / float64(n)\n}", "func Deviation(generator *rand.Rand, factor float64) Transformation {\n\trandom := fallbackNewRandom(generator)\n\n\treturn func(duration time.Duration) time.Duration {\n\t\tmin := int64(math.Floor(float64(duration) * (1 - factor)))\n\t\tmax := int64(math.Ceil(float64(duration) * (1 + factor)))\n\n\t\treturn time.Duration(random.Int63n(max-min) + min)\n\t}\n}", "func (h *pingHistory) variance() float64 {\n\tvar sqDevSum float64\n\n\tmean := h.mean()\n\n\tfor _, t := range *h {\n\t\tsqDevSum = sqDevSum + math.Pow((float64(t)-mean), 2)\n\t}\n\treturn sqDevSum / float64(len(*h))\n}", "func Deviation(data []float64, holeIndices []int, dim int, triangles []int) float64 {\n\thasHoles := holeIndices != nil && len(holeIndices) > 0\n\tvar outerLen int\n\tif hasHoles {\n\t\touterLen = holeIndices[0] * dim\n\t} else {\n\t\touterLen = len(data)\n\t}\n\n\tpolygonArea := math.Abs(signedArea(data, 0, outerLen, dim))\n\tvar start, end int\n\tif hasHoles {\n\t\tfor i, l := 0, len(holeIndices); i < l; i++ {\n\t\t\tstart = holeIndices[i] * dim\n\t\t\tif i < l-1 {\n\t\t\t\tend = holeIndices[i+1] * dim\n\t\t\t} else {\n\t\t\t\tend = len(data)\n\t\t\t}\n\t\t\tpolygonArea -= math.Abs(signedArea(data, start, end, dim))\n\t\t}\n\t}\n\n\tvar trianglesArea float64\n\tfor i := 0; i < len(triangles); i += 3 {\n\t\ta := triangles[i] * dim\n\t\tb := triangles[i+1] * dim\n\t\tc := triangles[i+2] * dim\n\t\ttrianglesArea += math.Abs(\n\t\t\t(data[a]-data[c])*(data[b+1]-data[a+1]) -\n\t\t\t\t(data[a]-data[b])*(data[c+1]-data[a+1]))\n\t}\n\n\tif polygonArea == 0.0 && trianglesArea == 0.0 {\n\t\treturn 0.0\n\t}\n\tif polygonArea == 0.0 {\n\t\treturn math.Inf(1)\n\t}\n\treturn math.Abs((trianglesArea - polygonArea) / polygonArea)\n}", "func (b *Box) MeanStd() {\n\tb.MeanL, b.StdL = stat.MeanStdDev(b.values, nil)\n}", "func StandardDeviationPopulation(data []f64, mu f64) f64 {\n\treturn math.Sqrt(VariancePopulation(data, mu))\n}", "func (fn *formulaFuncs) stdev(stdeva bool, argsList *list.List) formulaArg {\n\tcount, result := -1.0, -1.0\n\tvar mean formulaArg\n\tif stdeva {\n\t\tmean = fn.AVERAGEA(argsList)\n\t} else {\n\t\tmean = fn.AVERAGE(argsList)\n\t}\n\tfor arg := argsList.Front(); arg != nil; arg = arg.Next() {\n\t\ttoken := arg.Value.(formulaArg)\n\t\tswitch token.Type {\n\t\tcase ArgString, ArgNumber:\n\t\t\tif !stdeva && (token.Value() == \"TRUE\" || token.Value() == \"FALSE\") {\n\t\t\t\tcontinue\n\t\t\t} else if stdeva && (token.Value() == \"TRUE\" || token.Value() == \"FALSE\") {\n\t\t\t\tnum := token.ToBool()\n\t\t\t\tif num.Type == ArgNumber {\n\t\t\t\t\tresult, count = calcStdevPow(result, count, num, mean)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tnum := token.ToNumber()\n\t\t\t\tif num.Type == ArgNumber {\n\t\t\t\t\tresult, count = calcStdevPow(result, count, num, mean)\n\t\t\t\t}\n\t\t\t}\n\t\tcase ArgList, ArgMatrix:\n\t\t\tresult, count = calcStdev(stdeva, result, count, mean, token)\n\t\t}\n\t}\n\tif count > 0 && result >= 0 {\n\t\treturn newNumberFormulaArg(math.Sqrt(result / count))\n\t}\n\treturn newErrorFormulaArg(formulaErrorDIV, formulaErrorDIV)\n}", "func SQRTSD(mx, x operand.Op) { ctx.SQRTSD(mx, x) }", "func (s Series) Mean() (float64, error) {\n\tvals, err := s.Float(false)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tstdDev := stat.Mean(vals, nil)\n\treturn stdDev, nil\n}", "func (e *edm) stat(tau2 int) float64 {\n\ta, b, c := e.ta.Median(), e.tb.Median(), e.tab.Median()\n\ta, b, c = a*a, b*b, c*c\n\tstat := 2*c - a - b\n\tstat *= float64(e.tau*(tau2-e.tau)) / float64(tau2)\n\tif stat > e.bestStat {\n\t\te.bestStat = stat\n\t\te.bestIdx = e.tau\n\t}\n\treturn stat\n}", "func (b *BinP1D) XStdDev() float64 {\n\treturn b.dist.xStdDev()\n}", "func Variance(data []float64, mean float64) float64 {\r\n\treturn Reduce(data, mean, 2) / (float64(len(data)) - 1)\r\n}", "func StdDevP(input Float64Data) (sdev float64, err error) {\n\treturn StandardDeviationPopulation(input)\n}", "func StandardNormalCDF(x float64) (r float64) {\n\tsum := x\n\tvalue := x\n\tfor i := 1; i <= 1000; i++ {\n\t\tvalue = (value * x * x / (2*float64(i) + 1))\n\t\tsum += value\n\t}\n\tr = 0.5 + (sum/math.Sqrt(2*math.Pi))*math.Exp(-(x*x)/2)\n\treturn\n}", "func DIVSD(mx, x operand.Op) { ctx.DIVSD(mx, x) }", "func calcStdev(stdeva bool, result, count float64, mean, token formulaArg) (float64, float64) {\n\tfor _, row := range token.ToList() {\n\t\tif row.Type == ArgNumber || row.Type == ArgString {\n\t\t\tif !stdeva && (row.Value() == \"TRUE\" || row.Value() == \"FALSE\") {\n\t\t\t\tcontinue\n\t\t\t} else if stdeva && (row.Value() == \"TRUE\" || row.Value() == \"FALSE\") {\n\t\t\t\tnum := row.ToBool()\n\t\t\t\tif num.Type == ArgNumber {\n\t\t\t\t\tresult, count = calcStdevPow(result, count, num, mean)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tnum := row.ToNumber()\n\t\t\t\tif num.Type == ArgNumber {\n\t\t\t\t\tresult, count = calcStdevPow(result, count, num, mean)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn result, count\n}", "func CDF(x, avg, stddev float64) float64 {\n\treturn (1 + math.Erf(((x-avg)/stddev)/math.Sqrt2)) / 2\n}", "func (s *Stat) GetStdDevP() float64 {\n\treturn math.Sqrt(s.GetVarP())\n}", "func DMI(ohlcv OHLCVSeries, len, smoo int) (adx, plus, minus ValueSeries) {\n\tadxkey := fmt.Sprintf(\"adx:%s:%d:%d\", ohlcv.ID(), len, smoo)\n\tadx = getCache(adxkey)\n\tif adx == nil {\n\t\tadx = NewValueSeries()\n\t}\n\n\tpluskey := fmt.Sprintf(\"plus:%s:%d:%d\", ohlcv.ID(), len, smoo)\n\tplus = getCache(pluskey)\n\tif plus == nil {\n\t\tplus = NewValueSeries()\n\t}\n\n\tminuskey := fmt.Sprintf(\"minus:%s:%d:%d\", ohlcv.ID(), len, smoo)\n\tminus = getCache(minuskey)\n\tif minus == nil {\n\t\tminus = NewValueSeries()\n\t}\n\n\th := OHLCVAttr(ohlcv, OHLCPropHigh)\n\tstop := h.GetCurrent()\n\tif stop == nil {\n\t\treturn\n\t}\n\n\tl := OHLCVAttr(ohlcv, OHLCPropLow)\n\ttr := OHLCVAttr(ohlcv, OHLCPropTRHL)\n\n\tup := Change(h, 1)\n\tdown := Change(l, 1)\n\tplusdm := Operate(up, down, \"dmi:uv\", func(uv, dv float64) float64 {\n\t\tdv = dv * -1\n\t\tif uv > dv && uv > 0 {\n\t\t\treturn uv\n\t\t}\n\t\treturn 0\n\t})\n\tminusdm := Operate(down, up, \"dmi:uv\", func(dv, uv float64) float64 {\n\t\tdv = dv * -1\n\t\tif dv > uv && dv > 0 {\n\t\t\treturn dv\n\t\t}\n\t\treturn 0\n\t})\n\ttrurange := RMA(tr, int64(len))\n\tplusdmrma := RMA(plusdm, int64(len))\n\tminusdmrma := RMA(minusdm, int64(len))\n\tplus = MulConst(Div(plusdmrma, trurange), 100)\n\tminus = MulConst(Div(minusdmrma, trurange), 100)\n\n\tsum := Add(plus, minus)\n\tdenom := Operate(sum, sum, \"dmi:denom\", func(a, b float64) float64 {\n\t\tif a == 0 {\n\t\t\treturn 1\n\t\t}\n\t\treturn a\n\t})\n\n\tadxrma := RMA(Div(DiffAbs(plus, minus), denom), 3)\n\tadx = MulConst(adxrma, 100)\n\n\tsetCache(adxkey, adx)\n\tsetCache(pluskey, plus)\n\tsetCache(minuskey, minus)\n\n\treturn adx, plus, minus\n}", "func MapStddev(itr Iterator) interface{} {\n\tvar values []float64\n\n\tfor k, v := itr.Next(); k != -1; k, v = itr.Next() {\n\t\tswitch n := v.(type) {\n\t\tcase float64:\n\t\t\tvalues = append(values, n)\n\t\tcase int64:\n\t\t\tvalues = append(values, float64(n))\n\t\t}\n\t}\n\n\treturn values\n}", "func (b *BinP1D) XStdErr() float64 {\n\treturn b.dist.xStdErr()\n}", "func RSI(inReal []float64, inTimePeriod int) []float64 {\n\toutReal := make([]float64, len(inReal))\n\n\tif inTimePeriod < 2 || len(inReal) < inTimePeriod {\n\t\treturn outReal\n\t}\n\n\t// variable declarations\n\ttempValue1 := 0.0\n\ttempValue2 := 0.0\n\toutIdx := inTimePeriod\n\ttoday := 0\n\tprevValue := inReal[today]\n\tprevGain := 0.0\n\tprevLoss := 0.0\n\ttoday++\n\n\tfor i := inTimePeriod; i > 0; i-- {\n\t\ttempValue1 = inReal[today]\n\t\ttoday++\n\t\ttempValue2 = tempValue1 - prevValue\n\t\tprevValue = tempValue1\n\t\tif tempValue2 < 0 {\n\t\t\tprevLoss -= tempValue2\n\t\t} else {\n\t\t\tprevGain += tempValue2\n\t\t}\n\t}\n\n\tprevLoss /= float64(inTimePeriod)\n\tprevGain /= float64(inTimePeriod)\n\n\tif today > 0 {\n\t\ttempValue1 = prevGain + prevLoss\n\t\tif !((-0.00000000000001 < tempValue1) && (tempValue1 < 0.00000000000001)) {\n\t\t\toutReal[outIdx] = 100.0 * (prevGain / tempValue1)\n\t\t} else {\n\t\t\toutReal[outIdx] = 0.0\n\t\t}\n\t\toutIdx++\n\t} else {\n\t\tfor today < 0 {\n\t\t\ttempValue1 = inReal[today]\n\t\t\ttempValue2 = tempValue1 - prevValue\n\t\t\tprevValue = tempValue1\n\t\t\tprevLoss *= float64(inTimePeriod - 1)\n\t\t\tprevGain *= float64(inTimePeriod - 1)\n\t\t\tif tempValue2 < 0 {\n\t\t\t\tprevLoss -= tempValue2\n\t\t\t} else {\n\t\t\t\tprevGain += tempValue2\n\t\t\t}\n\t\t\tprevLoss /= float64(inTimePeriod)\n\t\t\tprevGain /= float64(inTimePeriod)\n\t\t\ttoday++\n\t\t}\n\t}\n\n\tfor today < len(inReal) {\n\t\ttempValue1 = inReal[today]\n\t\ttoday++\n\t\ttempValue2 = tempValue1 - prevValue\n\t\tprevValue = tempValue1\n\t\tprevLoss *= float64(inTimePeriod - 1)\n\t\tprevGain *= float64(inTimePeriod - 1)\n\t\tif tempValue2 < 0 {\n\t\t\tprevLoss -= tempValue2\n\t\t} else {\n\t\t\tprevGain += tempValue2\n\t\t}\n\t\tprevLoss /= float64(inTimePeriod)\n\t\tprevGain /= float64(inTimePeriod)\n\t\ttempValue1 = prevGain + prevLoss\n\t\tif !((-0.00000000000001 < tempValue1) && (tempValue1 < 0.00000000000001)) {\n\t\t\toutReal[outIdx] = 100.0 * (prevGain / tempValue1)\n\t\t} else {\n\t\t\toutReal[outIdx] = 0.0\n\t\t}\n\t\toutIdx++\n\t}\n\n\treturn outReal\n}", "func (p *P1D) XStdDev() float64 {\n\treturn p.bng.dist.xStdDev()\n}", "func (a *Ant) denominator(edges []*g.Edge) float64 {\n\tdenominator := 0.0\n\tfor i := 0; i < len(edges); i++ {\n\t\tdenominator += a.desirability(edges[i])\n\t}\n\n\treturn denominator\n}", "func (r *rsi) Calc(cc []exchange.Candle) (decimal.Decimal, error) {\n\tstart := r.CandlesCount()\n\tend := r.offset\n\n\tif cc == nil || len(cc) < start {\n\t\treturn decimal.Zero, errors.New(\"RSI candles list is too small\")\n\t}\n\n\tcandles := cc[len(cc)-start : len(cc)-end]\n\n\tprevGain := decimal.Zero\n\tprevLoss := decimal.Zero\n\n\t// First N candles' averages cannot be calculated (too few candles before them).\n\t// We start from r.period and not r.period-1 because gain/loss is calculated by\n\t// using previous candle value and current candle value.\n\tfor i := r.period; i < len(candles); i++ {\n\t\tcurrentGain := decimal.Zero\n\t\tcurrentLoss := decimal.Zero\n\n\t\tcalc := func(val1, val2 decimal.Decimal) {\n\t\t\tchange := val1.Sub(val2)\n\t\t\tif change.Sign() > 0 {\n\t\t\t\tcurrentGain = currentGain.Add(change)\n\t\t\t} else if change.Sign() < 0 {\n\t\t\t\tcurrentLoss = currentLoss.Add(change.Abs())\n\t\t\t}\n\t\t}\n\n\t\t// first candle of RSI period (since it's the first period entry\n\t\t// it won't have gain/loss, but it will have price which will be\n\t\t// used in the second entry to calc gain/loss).\n\t\tif i == r.period {\n\t\t\t// use first N candles to calc first RSI candle gain/loss\n\t\t\tfor j := i - r.period + 1; j <= i; j++ {\n\t\t\t\tcalc(candles[j].Price(r.price), candles[j-1].Price(r.price))\n\t\t\t}\n\t\t\tprevGain = currentGain.Div(decimal.New(int64(r.period), 0))\n\t\t\tprevLoss = currentLoss.Div(decimal.New(int64(r.period), 0))\n\t\t\tcontinue\n\t\t}\n\t\tcalc(candles[i].Price(r.price), candles[i-1].Price(r.price))\n\n\t\tprevGain = prevGain.Mul(decimal.New(int64(r.period-1), 0)).Add(currentGain).Div(decimal.New(int64(r.period), 0))\n\t\tprevLoss = prevLoss.Mul(decimal.New(int64(r.period-1), 0)).Add(currentLoss).Div(decimal.New(int64(r.period), 0))\n\t}\n\n\trs := prevGain.Div(utils.PreventZero(prevLoss))\n\treturn decimal.New(100, 0).Sub(decimal.New(100, 0).Div(utils.PreventZero(decimal.New(1, 0).Add(rs)))), nil\n}", "func VSQRTSD(ops ...operand.Op) { ctx.VSQRTSD(ops...) }", "func (t *StandardTimer) Variance() float64 {\n\treturn t.histogram.Variance()\n}", "func Normal(mean, sd float64) float64 {\n\treturn rand.NormFloat64()*sd + mean\n}", "func (ds *Dataset) MeanDeviation() float64 {\n\tmean := ds.ArithmeticMean()\n\tvar diffs float64\n\tfor _, value := range ds.values {\n\t\tdiffs += math.Abs(mean - value)\n\t}\n\treturn diffs / float64(len(ds.values))\n}", "func Variance(input []float64) (variance float64) {\n\tavg := Average(input)\n\tfor _, value := range input {\n\t\tvariance += (value - avg) * (value - avg)\n\t}\n\treturn variance / float64(len(input))\n}", "func MakeMeanStdAV(d []float64, m int) []float64 {\n\tav := make([]float64, len(d)-m+1)\n\t_, std, _ := movmeanstd(d, m)\n\tmu := stat.Mean(std, nil)\n\tfor i := 0; i < len(d)-m+1; i++ {\n\t\tif std[i] < mu {\n\t\t\tav[i] = 1\n\t\t}\n\t}\n\treturn av\n}", "func VSQRTSD_RD_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VSQRTSD_RD_SAE_Z(x, x1, k, x2) }", "func (s Set) GetVariance() float64 {\r\n\tvar squareSum float64\r\n\tmean := s.GetMean()\r\n\tfor _, item := range s {\r\n\t\tsquareSum += math.Pow(mean-item, 2)\r\n\t}\r\n\treturn squareSum / float64(len(s))\r\n}", "func VSQRTSD_RD_SAE(ops ...operand.Op) { ctx.VSQRTSD_RD_SAE(ops...) }", "func (set *Set) SDiff(set2 *Set) {\n\n}", "func (n *NormalModel) Calc(values ModelDataSource, dataCount int) {\n\tif len(values) == 0 || dataCount <= 0 {\n\t\tn.Mean = 0.0\n\t\tn.StandardDeviation = 0.0\n\t\treturn\n\t}\n\tif len(values) < dataCount {\n\t\tdataCount = len(values)\n\t}\n\ttotal := 0.0\n\tfor i := 0; i < dataCount; i++ {\n\t\ttotal += values[i]\n\t}\n\tlength := float64(dataCount)\n\tn.Mean = total / length\n\tdev := 0.0\n\tfor i := 0; i < dataCount; i++ {\n\t\tdev += (values[i] - n.Mean) * (values[i] - n.Mean)\n\t}\n\tvariance := dev / length\n\tn.StandardDeviation = math.Sqrt(variance)\n}", "func (NilTimer) Variance() float64 { return 0.0 }", "func SUBSD(mx, x operand.Op) { ctx.SUBSD(mx, x) }", "func AccumDev(s []float64) (res []float64) {\n\tres = make([]float64, len(s))\n\n\tfor i, v := range s {\n\t\tif i == 0 {\n\t\t\tres[i] = 0.0\n\t\t} else {\n\t\t\tres[i] = (1+res[i-1])*(1+v) - 1\n\t\t}\n\t}\n\treturn\n}", "func CalcVariance(values []int) (float64, error) {\n\tif len(values) <= 0 {\n\t\treturn 0.0, errors.New(\"invalid length\")\n\t}\n\n\t// calculate average\n\tavg := 0.0\n\tfor _, v := range values {\n\t\tavg += float64(v)\n\t}\n\tavg /= float64(len(values))\n\n\tvariance := 0.0\n\tfor _, v := range values {\n\t\tvariance += math.Pow(float64(v)-avg, 2)\n\t}\n\tvariance /= float64(len(values))\n\n\treturn variance, nil\n}", "func (p *P1D) XStdErr() float64 {\n\treturn p.bng.dist.xStdErr()\n}", "func funcHistogramStdVar(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector {\n\tinVec := vals[0].(Vector)\n\n\tfor _, sample := range inVec {\n\t\t// Skip non-histogram samples.\n\t\tif sample.H == nil {\n\t\t\tcontinue\n\t\t}\n\t\tmean := sample.H.Sum / sample.H.Count\n\t\tvar variance, cVariance float64\n\t\tit := sample.H.AllBucketIterator()\n\t\tfor it.Next() {\n\t\t\tbucket := it.At()\n\t\t\tvar val float64\n\t\t\tif bucket.Lower <= 0 && 0 <= bucket.Upper {\n\t\t\t\tval = 0\n\t\t\t} else {\n\t\t\t\tval = math.Sqrt(bucket.Upper * bucket.Lower)\n\t\t\t}\n\t\t\tdelta := val - mean\n\t\t\tvariance, cVariance = kahanSumInc(bucket.Count*delta*delta, variance, cVariance)\n\t\t}\n\t\tvariance += cVariance\n\t\tvariance /= sample.H.Count\n\t\tenh.Out = append(enh.Out, Sample{\n\t\t\tMetric: enh.DropMetricName(sample.Metric),\n\t\t\tF: variance,\n\t\t})\n\t}\n\treturn enh.Out\n}", "func (b BrokerMap) StorageStdDev() float64 {\n\tvar m float64\n\tvar t float64\n\tvar s float64\n\tvar l float64\n\n\tfor id := range b {\n\t\tif id == StubBrokerID {\n\t\t\tcontinue\n\t\t}\n\t\tl++\n\t\tt += b[id].StorageFree\n\t}\n\n\tm = t / l\n\n\tfor id := range b {\n\t\tif id == StubBrokerID {\n\t\t\tcontinue\n\t\t}\n\t\ts += math.Pow(m-b[id].StorageFree, 2)\n\t}\n\n\tmsq := s / l\n\n\treturn math.Sqrt(msq)\n}", "func (m *Measurement) StdDevLatency() float64 {\n\tavg := m.AvgLatency()\n\tdev := 0.\n\tfor _, d := range m.data {\n\t\tdiff := d.Latency - avg\n\t\tdev += diff * diff\n\t}\n\tdev /= float64(m.successful)\n\tdev = math.Sqrt(dev)\n\treturn dev\n}", "func (n *Normal) Median() float64 {\n\treturn n.mean\n}", "func PDF(x, avg, stddev float64) float64 {\n\treturn math.Exp(-math.Pow(((x-avg)/stddev), 2)/2) / (stddev * math.Sqrt(2*math.Pi))\n}", "func calculateSumOfSquaredDeviation(slice []float64) (sum float64, sumOfSquaredDeviation float64) {\n\tsum = 0\n\tfor _, e := range slice {\n\t\tsum += e\n\t}\n\tave := sum / float64(len(slice))\n\tsumOfSquaredDeviation = 0\n\tfor _, e := range slice {\n\t\tsumOfSquaredDeviation += math.Pow((e - ave), 2)\n\t}\n\treturn\n}", "func SubNormalise(attr *StatusAttribute, v int64) float64 {\n\treturn statusNormalise(attr, v)\n}" ]
[ "0.63204044", "0.62387717", "0.62177527", "0.6179172", "0.60376096", "0.60309976", "0.59888536", "0.5988705", "0.5976886", "0.5951141", "0.59371185", "0.5927489", "0.59189886", "0.5914602", "0.5874174", "0.58649164", "0.5854873", "0.58130044", "0.57826555", "0.5778925", "0.57583505", "0.57572865", "0.57234764", "0.56814766", "0.5635633", "0.5635365", "0.5563163", "0.55511504", "0.5540329", "0.5525771", "0.5524284", "0.549762", "0.54969245", "0.5492626", "0.5471684", "0.5447685", "0.54287857", "0.54089606", "0.5399846", "0.5392308", "0.53893936", "0.5389193", "0.53777426", "0.5372834", "0.53213304", "0.53027457", "0.5301307", "0.52940154", "0.5279393", "0.5272345", "0.5268651", "0.521163", "0.5090729", "0.5048144", "0.50322723", "0.50268316", "0.50185585", "0.50038654", "0.49911624", "0.49780512", "0.49738997", "0.49689564", "0.4957181", "0.49148956", "0.49088332", "0.48912957", "0.48818785", "0.48763332", "0.48646942", "0.48564819", "0.48557708", "0.48328114", "0.48150423", "0.47915056", "0.4771071", "0.47433177", "0.47391665", "0.47321635", "0.4713614", "0.4711509", "0.47085056", "0.46954325", "0.4689294", "0.4680937", "0.4675534", "0.46748322", "0.4666629", "0.4664771", "0.46376404", "0.4637266", "0.4625303", "0.46117303", "0.4609647", "0.4606463", "0.46022388", "0.46015656", "0.4597155", "0.4594549", "0.45710847", "0.45702964" ]
0.6419046
0
NewClient Creates a new client to communicate with the Sentry API.
func NewClient(api string, baseURI string, org string) *Client { if !strings.HasSuffix(baseURI, "/") { baseURI += "/" } return &Client{ client: &http.Client{}, sentryAPIKey: api, sentryURI: baseURI, sentryOrg: org, } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewClient(clientID string) *Client {\n\treturn &Client{\n\t\tApi: &soundcloud.Api{\n\t\t\tClientId: clientID,\n\t\t},\n\t\tclientID: clientID,\n\t\thc: &http.Client{\n\t\t\tTimeout: 5 * time.Second,\n\t\t},\n\t}\n}", "func New(client *sajari.Client) *Client {\n\treturn &Client{\n\t\tc: client,\n\t}\n}", "func NewSentryClient(options sentry.ClientOptions) (*sentry.Client, error) {\n\tclient, err := sentry.NewClient(options)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// init sentry hub\n\tsentry.Init(options)\n\treturn client, nil\n}", "func NewClient(pkey, skey string) (*Client, error) {\n\tswitch {\n\tcase pkey == \"\" && skey == \"\":\n\t\treturn nil, ErrInvalidKey\n\tcase pkey != \"\" && !strings.HasPrefix(pkey, \"pkey_\"):\n\t\treturn nil, ErrInvalidKey\n\tcase skey != \"\" && !strings.HasPrefix(skey, \"skey_\"):\n\t\treturn nil, ErrInvalidKey\n\t}\n\n\tclient := &Client{\n\t\tClient: &http.Client{Transport: transport},\n\t\tdebug: false,\n\t\tpkey: pkey,\n\t\tskey: skey,\n\n\t\tEndpoints: map[internal.Endpoint]string{},\n\t}\n\n\tif len(build.Default.ReleaseTags) > 0 {\n\t\tclient.GoVersion = build.Default.ReleaseTags[len(build.Default.ReleaseTags)-1]\n\t}\n\n\treturn client, nil\n}", "func newClient() *sts.STS {\n\tsess := session.Must(session.NewSessionWithOptions(session.Options{\n\t\tSharedConfigState: session.SharedConfigEnable,\n\t}))\n\tconfig := aws.NewConfig()\n\tif debug {\n\t\tconfig.WithLogLevel(aws.LogDebugWithHTTPBody)\n\t}\n\treturn sts.New(sess, config)\n}", "func NewClient(consoleClient *console.Client, config *Config) (*Client, error) {\n\treturn newClient(consoleClient, config)\n}", "func NewClient(ctx context.Context, credentials *Credentials) (*Client, error) {\n\tctx, cancel := context.WithTimeout(ctx, 1*time.Minute)\n\tdefer cancel()\n\n\tssn, err := GetSession(credentials)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to get session\")\n\t}\n\n\tclient := &Client{\n\t\tssn: ssn,\n\t}\n\treturn client, nil\n}", "func NewClient(config *sdk.Config, credential *auth.Credential) *Client {\n\tvar handler sdk.RequestHandler = func(c *sdk.Client, req request.Common) (request.Common, error) {\n\t\terr := req.SetProjectId(PickResourceID(req.GetProjectId()))\n\t\treturn req, err\n\t}\n\tvar (\n\t\tuaccountClient = *uaccount.NewClient(config, credential)\n\t\tuhostClient = *uhost.NewClient(config, credential)\n\t\tunetClient = *unet.NewClient(config, credential)\n\t\tvpcClient = *vpc.NewClient(config, credential)\n\t\tudpnClient = *udpn.NewClient(config, credential)\n\t\tpathxClient = *pathx.NewClient(config, credential)\n\t\tudiskClient = *udisk.NewClient(config, credential)\n\t\tulbClient = *ulb.NewClient(config, credential)\n\t\tudbClient = *udb.NewClient(config, credential)\n\t\tumemClient = *umem.NewClient(config, credential)\n\t\tuphostClient = *uphost.NewClient(config, credential)\n\t\tpuhostClient = *puhost.NewClient(config, credential)\n\t\tpudbClient = *pudb.NewClient(config, credential)\n\t\tpumemClient = *pumem.NewClient(config, credential)\n\t\tppathxClient = *ppathx.NewClient(config, credential)\n\t)\n\n\tuaccountClient.Client.AddRequestHandler(handler)\n\tuhostClient.Client.AddRequestHandler(handler)\n\tunetClient.Client.AddRequestHandler(handler)\n\tvpcClient.Client.AddRequestHandler(handler)\n\tudpnClient.Client.AddRequestHandler(handler)\n\tpathxClient.Client.AddRequestHandler(handler)\n\tudiskClient.Client.AddRequestHandler(handler)\n\tulbClient.Client.AddRequestHandler(handler)\n\tudbClient.Client.AddRequestHandler(handler)\n\tumemClient.Client.AddRequestHandler(handler)\n\tuphostClient.Client.AddRequestHandler(handler)\n\tpuhostClient.Client.AddRequestHandler(handler)\n\tpudbClient.Client.AddRequestHandler(handler)\n\tpumemClient.Client.AddRequestHandler(handler)\n\tppathxClient.Client.AddRequestHandler(handler)\n\n\treturn &Client{\n\t\tuaccountClient,\n\t\tuhostClient,\n\t\tunetClient,\n\t\tvpcClient,\n\t\tudpnClient,\n\t\tpathxClient,\n\t\tudiskClient,\n\t\tulbClient,\n\t\tudbClient,\n\t\tumemClient,\n\t\tuphostClient,\n\t\tpuhostClient,\n\t\tpudbClient,\n\t\tpumemClient,\n\t\tppathxClient,\n\t}\n}", "func ExampleNewClient() {\n\t// initialize registrar\n\treg, err := dosaRenamed.NewRegistrar(\"test\", \"myteam.myservice\", cte1)\n\tif err != nil {\n\t\t// registration will fail if the object is tagged incorrectly\n\t\tfmt.Printf(\"NewRegistrar error: %s\", err)\n\t\treturn\n\t}\n\n\t// use a devnull connector for example purposes\n\tconn := devnull.NewConnector()\n\n\t// create the client using the registrar and connector\n\tclient := dosaRenamed.NewClient(reg, conn)\n\n\terr = client.Initialize(context.Background())\n\tif err != nil {\n\t\tfmt.Printf(\"Initialize error: %s\", err)\n\t\treturn\n\t}\n}", "func NewClient() (c *Client) {\n\tvar (\n\t\tcookie *cookiejar.Jar\n\t)\n\n\tcookie, _ = cookiejar.New(nil)\n\n\tc = &Client{\n\t\tClient: &http.Client{\n\t\t\tJar: cookie,\n\t\t},\n\t\tUserAgent: \"Sbss-Client\",\n\t}\n\n\treturn\n}", "func NewClient(clientID, email, password string, ttl int) *Client {\n\treturn &Client{\n\t\tClientID: clientID,\n\t\tEmail: email,\n\t\tPassword: password,\n\t\tTTL: ttl,\n\t\tHTTPClient: &http.Client{},\n\t\tapiBaseURL: apiBaseURL,\n\t\tloginURL: loginURL,\n\t}\n}", "func newClient() (client *Client) {\n\n\tclient = new(Client)\n\n\tid := <-uuidBuilder\n\tclient.Id = id.String()\n\tclient.subscriptions = make(map[string]bool)\n\n\tclients[client.Id] = client\n\n\tlog.WithField(\"clientID\", id.String()).Info(\"Created new Client\")\n\treturn\n}", "func NewClient(config *Configuration) (*Client, error) {\n\t// Check that authorization values are defined at all\n\tif config.AuthorizationHeaderToken == \"\" {\n\t\treturn nil, fmt.Errorf(\"No authorization is defined. You need AuthorizationHeaderToken\")\n\t}\n\n\tif config.ApplicationID == \"\" {\n\t\treturn nil, fmt.Errorf(\"ApplicationID is required - this is the only way to identify your requests in highwinds logs\")\n\t}\n\n\t// Configure the client from final configuration\n\tc := &Client{\n\t\tc: http.DefaultClient,\n\t\tDebug: config.Debug,\n\t\tApplicationID: config.ApplicationID,\n\t\tIdentity: &identity.Identification{\n\t\t\tAuthorizationHeaderToken: config.AuthorizationHeaderToken,\n\t\t},\n\t}\n\n\t// TODO eventually instantiate a custom client but not ready for that yet\n\n\t// Configure timeout on default client\n\tif config.Timeout == 0 {\n\t\tc.c.Timeout = time.Second * 10\n\t} else {\n\t\tc.c.Timeout = time.Second * time.Duration(config.Timeout)\n\t}\n\n\t// Set default headers\n\tc.Headers = c.GetHeaders()\n\treturn c, nil\n}", "func New(client *client.Client, properties ClientProperties) *Client {\n\treturn &Client{\n\t\tclient: client,\n\n\t\taccountSid: properties.AccountSid,\n\t\tdomainSid: properties.DomainSid,\n\t\tsid: properties.Sid,\n\t}\n}", "func New(client *client.Client, properties ClientProperties) *Client {\n\treturn &Client{\n\t\tclient: client,\n\n\t\taccountSid: properties.AccountSid,\n\t\tsid: properties.Sid,\n\t}\n}", "func newClient(project string) (*client, error) {\n\tctx := context.Background()\n\tcl, err := pubsub.NewClient(ctx, project)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &client{\n\t\tclient: cl,\n\t}, nil\n}", "func NewClient() *Client {\n\treturn &Client{\n\t\tprotoClient: storerClient,\n\t}\n}", "func NewClient(ctx context.Context, projectID string, sensor instana.TracerLogger, opts ...option.ClientOption) (*Client, error) {\n\tc, err := pubsub.NewClient(ctx, projectID, opts...)\n\treturn &Client{c, projectID, sensor}, err\n}", "func NewClient() (*Client, error) {\n\tssn, err := GetSession()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tclient := &Client{\n\t\tAPIKey: ssn.APIKey,\n\t}\n\n\tif err := client.loadSDKServices(); err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to load IBM SDK services\")\n\t}\n\n\treturn client, nil\n}", "func NewClient(srv *Server) *Client {\n\tn := atomic.AddInt64(&clientCount, 1)\n\n\tclt := &Client{\n\t\tdone: make(chan bool),\n\t\tfinish: make(chan bool),\n\t\tsrv: srv,\n\t\tID: int(n),\n\t\ttimer: time.NewTimer(recordsTimeout),\n\t}\n\n\tgo clt.listen()\n\n\tlib.Debugf(\"SQS client %d ready\", clt.ID)\n\n\treturn clt\n}", "func New(sid string, token string) *Client {\n\treturn &Client{\n\t\taccountSid: sid,\n\t\tauthToken: token,\n\t\tmessageFrom: \"Genesis\",\n\t}\n}", "func NewClient(clientID string, redirectURI string, providerURI string, log *zap.SugaredLogger) (*Client, error) {\n\thttpClient := &http.Client{}\n\thttpClient.Timeout = 5 * time.Second\n\n\treturn &Client{\n\t\tclientID: clientID,\n\t\tredirectURI: redirectURI,\n\t\tproviderURI: providerURI,\n\t\tclient: httpClient,\n\t\tlog: log.With(\"client-id\", clientID, \"provider-uri\", providerURI),\n\t}, nil\n}", "func NewClient() Client {\n\treturn Client{}\n}", "func NewClient() Client {\n\treturn Client{}\n}", "func NewClient() Client {\n\treturn Client{}\n}", "func NewClient(cfg *Config) (*Client, error) {\r\n\tBaseURL := new(url.URL)\r\n\tvar err error\r\n\r\n\tviper.SetEnvPrefix(\"TS\")\r\n\tviper.BindEnv(\"LOG\")\r\n\r\n\tswitch l := viper.Get(\"LOG\"); l {\r\n\tcase \"trace\":\r\n\t\tlog.SetLevel(log.TraceLevel)\r\n\tcase \"debug\":\r\n\t\tlog.SetLevel(log.DebugLevel)\r\n\tcase \"info\":\r\n\t\tlog.SetLevel(log.InfoLevel)\r\n\tcase \"warn\":\r\n\t\tlog.SetLevel(log.WarnLevel)\r\n\tcase \"fatal\":\r\n\t\tlog.SetLevel(log.FatalLevel)\r\n\tcase \"panic\":\r\n\t\tlog.SetLevel(log.PanicLevel)\r\n\t}\r\n\r\n\tif cfg.BaseURL != \"\" {\r\n\t\tBaseURL, err = url.Parse(cfg.BaseURL)\r\n\t\tif err != nil {\r\n\t\t\treturn nil, err\r\n\t\t}\r\n\t} else {\r\n\t\tBaseURL, err = url.Parse(defaultBaseURL)\r\n\t\tif err != nil {\r\n\t\t\treturn nil, err\r\n\t\t}\r\n\t}\r\n\r\n\tnewClient := &Client{\r\n\t\tBaseURL: BaseURL,\r\n\t\tclient: http.DefaultClient,\r\n\t\tcreds: &Credentials{\r\n\t\t\tAPIKey: cfg.APIKey,\r\n\t\t\tOrganizationID: cfg.OrganizationID,\r\n\t\t\tUserID: cfg.UserID,\r\n\t\t},\r\n\t}\r\n\r\n\tnewClient.Rulesets = &RulesetService{newClient}\r\n\tnewClient.Rules = &RuleService{newClient}\r\n\r\n\treturn newClient, nil\r\n}", "func NewClient(c Configuration) (Client, error) {\n\tcli := Client{\n\t\tName: \"splunk-http-collector-client\",\n\t}\n\tif err := cli.Configure(c.Collector.Proto, c.Collector.Host, c.Collector.Port); err != nil {\n\t\treturn cli, err\n\t}\n\tlog.Debugf(\"%s: proto=%s\", cli.Name, c.Collector.Proto)\n\tlog.Debugf(\"%s: host=%s\", cli.Name, c.Collector.Host)\n\tlog.Debugf(\"%s: port=%d\", cli.Name, c.Collector.Port)\n\tlog.Debugf(\"%s: token=%s\", cli.Name, c.Collector.Token)\n\tlog.Debugf(\"%s: timeout=%d\", cli.Name, c.Collector.Timeout)\n\tlog.Debugf(\"%s: endpoint.health=%s\", cli.Name, cli.Endpoints.Health)\n\tlog.Debugf(\"%s: endpoint.event=%s\", cli.Name, cli.Endpoints.Event)\n\tlog.Debugf(\"%s: endpoint.raw=%s\", cli.Name, cli.Endpoints.Raw)\n\tt := &http.Transport{\n\t\tTLSClientConfig: &tls.Config{\n\t\t\tInsecureSkipVerify: true,\n\t\t},\n\t}\n\tcli.client = &http.Client{\n\t\tTimeout: time.Duration(c.Collector.Timeout) * time.Second,\n\t\tTransport: t,\n\t}\n\tcli.Token = c.Collector.Token\n\tif err := cli.HealthCheck(); err != nil {\n\t\treturn cli, err\n\t}\n\treturn cli, nil\n}", "func NewClient(c Config) (Client, error) {\n\tif len(c.Endpoint) == 0 {\n\t\tc.Endpoint = EndpointProduction\n\t}\n\n\treturn &client{\n\t\tapikey: c.APIKey,\n\t\tendpoint: c.Endpoint,\n\t\torganizationid: c.OrganizationID,\n\t\thttpClient: http.DefaultClient,\n\t}, nil\n}", "func NewClient(clientID, email, password string, ttl int) *Client {\n\tbaseURL, _ := url.Parse(apiBaseURL)\n\tloginBaseURL, _ := url.Parse(loginURL)\n\n\treturn &Client{\n\t\tclientID: clientID,\n\t\temail: email,\n\t\tpassword: password,\n\t\tttl: ttl,\n\t\tapiBaseURL: baseURL,\n\t\tloginURL: loginBaseURL,\n\t\tHTTPClient: &http.Client{Timeout: 5 * time.Second},\n\t}\n}", "func (c *client) newClient() *gitea.Client {\n\treturn c.newClientToken(\"\")\n}", "func NewClient(list, create, show, update, delete_ goa.Endpoint) *Client {\n\treturn &Client{\n\t\tListEndpoint: list,\n\t\tCreateEndpoint: create,\n\t\tShowEndpoint: show,\n\t\tUpdateEndpoint: update,\n\t\tDeleteEndpoint: delete_,\n\t}\n}", "func New(client netatmo.AuthenticatedClient) *Client {\n\treturn &Client{\n\t\tclient: client,\n\t}\n}", "func NewClient(with ...ClientOption) *Client {\n\ttimeout := DefaultTimeout\n\n\tclient := &Client{\n\t\tclient: &http.Client{\n\t\t\tTimeout: timeout,\n\t\t},\n\t\tbase: getBaseURL(url.URL{\n\t\t\tScheme: \"https\",\n\t\t\tHost: \"api.secrethub.io\",\n\t\t}),\n\t\tuserAgent: DefaultUserAgent,\n\t}\n\tclient.Options(with...)\n\treturn client\n}", "func New(client *client.Client) *Client {\n\treturn &Client{\n\t\tclient: client,\n\t}\n}", "func New(client *client.Client) *Client {\n\treturn &Client{\n\t\tclient: client,\n\t}\n}", "func New(client *client.Client) *Client {\n\treturn &Client{\n\t\tclient: client,\n\t}\n}", "func NewClient(config *Config) *Client {\n\treturn &Client{\n\t\tconfig: config,\n\t}\n}", "func New(client *http.Client, accountSid, authToken string) *Client {\n\treturn &Client{httpClient: client, accountSid: accountSid, authToken: authToken}\n}", "func NewClient(cfg *Config) Client {\n\tins := client{\n\t\tcfg: cfg,\n\t\theartbeatChan: make(chan *models.HeartBeat),\n\t\tcloseFlag: make(chan struct{}, 0),\n\n\t\tSubscribedTopics: make(map[string]*models.SubscribeResponse),\n\t\trspCache: make(map[string]utils.Cache),\n\t}\n\n\treturn &ins\n}", "func NewClient(key string) *Client {\n\treturn &Client{key, &http.Client{}}\n}", "func NewClient(key, secret string) (cl *Client, err error) {\n\tcl = &Client{\n\t\tconn: &http.Client{},\n\t\tenv: newEnvironment(),\n\t}\n\n\tif key != \"\" {\n\t\tcl.env.apiKey = key\n\t\tcl.env.apiSecret = secret\n\t}\n\treturn cl, nil\n}", "func newClient(cs ConnectionSettings) *client {\n\treturn &client{\n\t\tConnectionSettings: cs,\n\t}\n}", "func NewClient(ctx context.Context) (*sm.Client, error) {\n\tclient, err := sm.NewClient(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn client, nil\n}", "func NewClient(baseARN string, regional bool) *Client {\n\treturn &Client{\n\t\tBaseARN: baseARN,\n\t\tEndpoint: \"sts.amazonaws.com\",\n\t\tUseRegionalEndpoint: regional,\n\t}\n}", "func NewClient(registryURL string) *Client {\n\treturn &Client{\n\t\turl: registryURL + \"/sgulreg/services\",\n\t\thttpClient: http.DefaultClient,\n\t\treqMux: &sync.RWMutex{},\n\t\tregistered: false,\n\t}\n}", "func NewClient(endPoint, id, key string) (*Client, error) {\n\treturn &Client{endPoint, id, key, \"\", false}, nil\n}", "func NewClient(cfg *api.Config, address, name string, port int) (Client, error) {\n\n\tc, err := api.NewClient(cfg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfmt.Println(\">>> Started a new Consul client.\")\n\treturn &client{\n\t\tclient: c,\n\t\tname: name,\n\t\taddress: address,\n\t\tport: port,\n\t}, nil\n}", "func NewClient(service, apiID, apiKey string, client *http.Client) *Client {\n\tc := &Client{\n\t\tservice: service,\n\t\tapiID: apiID,\n\t\tapiKey: apiKey,\n\t\tclient: client,\n\t}\n\tif c.client == nil {\n\t\tc.client = http.DefaultClient\n\t}\n\treturn c\n}", "func New(consumerID string, cli *http.Client) *Client {\n\treturn &Client{\n\t\tconsumerID: consumerID,\n\t\tclient: cli,\n\t}\n}", "func NewClient() *Client{\n\treturn &Client{}\n}", "func NewClient(iamClient *iam.Client, config *Config) (*Client, error) {\n\treturn newClient(iamClient, config)\n}", "func NewClient(getSimpleCardList, getCardInfo, postCardInfo, putCardInfo, deleteCardInfo goa.Endpoint) *Client {\n\treturn &Client{\n\t\tGetSimpleCardListEndpoint: getSimpleCardList,\n\t\tGetCardInfoEndpoint: getCardInfo,\n\t\tPostCardInfoEndpoint: postCardInfo,\n\t\tPutCardInfoEndpoint: putCardInfo,\n\t\tDeleteCardInfoEndpoint: deleteCardInfo,\n\t}\n}", "func NewClient(login, echoer, listener, summary, subscribe, history goa.Endpoint) *Client {\n\treturn &Client{\n\t\tLoginEndpoint: login,\n\t\tEchoerEndpoint: echoer,\n\t\tListenerEndpoint: listener,\n\t\tSummaryEndpoint: summary,\n\t\tSubscribeEndpoint: subscribe,\n\t\tHistoryEndpoint: history,\n\t}\n}", "func NewClient(enabled bool, address, token, source, sourceType, index string) Client {\n\tif enabled {\n\t\turl := address + \"/services/collector/raw\"\n\t\tsplunkClient := splunk.NewClient(nil, url, token, source, sourceType, index)\n\t\treturn Client{ClientImpl: splunkClient}\n\t}\n\treturn Client{ClientImpl: nil}\n}", "func New(client *client.Client, properties ClientProperties) *Client {\n\treturn &Client{\n\t\tclient: client,\n\n\t\taccountSid: properties.AccountSid,\n\t\tcallSid: properties.CallSid,\n\t}\n}", "func NewClient(key string) *Client {\n\treturn &Client{key}\n}", "func NewClient(endpoint, ak, as, ck string, debug bool) *Client {\n\treturn &Client{\n\t\tAppKey: ak,\n\t\tAppSecret: as,\n\t\tConsumerKey: ck,\n\t\tEndpoint: endpoint,\n\t\tTimeShift: 0,\n\t\tDebug: debug,\n\t}\n}", "func NewClient(base string) *Client {\n\treturn &Client{\n\t\tLogger: slf4go.Get(\"ethclient\"),\n\t\tclient: sling.New().Base(base),\n\t}\n}", "func NewClient(url, apiKey string) *Client {\n\treturn &Client{\n\t\turl: url,\n\t\tapiKey: apiKey,\n\t}\n}", "func NewClient(c *Config) *Client {\n\treturn &Client{\n\t\tBaseURL: BaseURLV1,\n\t\tUname: c.Username,\n\t\tPword: c.Password,\n\t\tHTTPClient: &http.Client{\n\t\t\tTimeout: time.Minute,\n\t\t},\n\t}\n}", "func New() *Client {\n\treturn &Client{}\n}", "func New() *Client {\n\treturn &Client{}\n}", "func NewClient(config Config) *Client {\n\treturn &Client{Config: config}\n}", "func New(client *client.Client, properties ClientProperties) *Client {\n\treturn &Client{\n\t\tclient: client,\n\n\t\tserviceSid: properties.ServiceSid,\n\t}\n}", "func NewClient(tr Transport) *Client {\n\treturn &Client{transport: tr}\n}", "func NewClient(id shared.ClientID) Client {\n\treturn &BaseClient{id: id}\n}", "func New(client *client.Client, properties ClientProperties) *Client {\n\treturn &Client{\n\t\tclient: client,\n\n\t\tassetSid: properties.AssetSid,\n\t\tserviceSid: properties.ServiceSid,\n\t}\n}", "func NewClient(config ClientConfig) (*Client, error) {\n\tvar baseURLToUse *url.URL\n\tvar err error\n\tif config.BaseURL == \"\" {\n\t\tbaseURLToUse, err = url.Parse(defaultBaseURL)\n\t} else {\n\t\tbaseURLToUse, err = url.Parse(config.BaseURL)\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tc := &Client{\n\t\temail: config.Username,\n\t\tpassword: config.Password,\n\t\tbaseURL: baseURLToUse.String(),\n\t}\n\tc.client = http.DefaultClient\n\tc.InvitationService = &InvitationService{client: c}\n\tc.ActiveUserService = &ActiveUserService{client: c}\n\tc.UserService = &UserService{\n\t\tActiveUserService: c.ActiveUserService,\n\t\tInvitationService: c.InvitationService,\n\t}\n\treturn c, nil\n}", "func NewClient(clientID, secret, APIBase string) *Client {\n\treturn &Client{\n\t\t&http.Client{},\n\t\tclientID,\n\t\tsecret,\n\t\tAPIBase,\n\t\tnil,\n\t}\n}", "func NewClient(config *Config) (*Client, error) {\n\tclient := new(Client)\n\terr := client.Init(config)\n\treturn client, err\n}", "func NewClient(config *Config) (*Client, error) {\n\tclient := new(Client)\n\terr := client.Init(config)\n\treturn client, err\n}", "func NewClient(config *Config) (*Client, error) {\n\tclient := new(Client)\n\terr := client.Init(config)\n\treturn client, err\n}", "func NewClient(config *Config) (*Client, error) {\n\tclient := new(Client)\n\terr := client.Init(config)\n\treturn client, err\n}", "func NewClient(config *Config) (*Client, error) {\n\tclient := new(Client)\n\terr := client.Init(config)\n\treturn client, err\n}", "func NewClient(config *Config) (*Client, error) {\n\tclient := new(Client)\n\terr := client.Init(config)\n\treturn client, err\n}", "func NewClient(endpoint string, headers map[string]string) *Client {\n\treturn &Client{\n\t\tEndpoint: endpoint,\n\t\tHeaders: headers,\n\t\tclient: &http.Client{},\n\t}\n}", "func NewClient() *Client {\n\treturn &Client{\n\t\tClient: github.NewClient(nil),\n\t}\n}", "func New(client *client.Client, properties ClientProperties) *Client {\n\treturn &Client{\n\t\tclient: client,\n\n\t\taccountSid: properties.AccountSid,\n\t\tcountryCode: properties.CountryCode,\n\t}\n}", "func newClient(uri string, hc *http.Client, opts jsonclient.Options, log *entitylist.LogInfo) (*LogClient, error) {\n\tlogClient, err := jsonclient.New(uri, hc, opts)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &LogClient{*logClient, *log}, err\n}", "func NewClient(cid string, csec string, redurl string) *Client {\n\tconfig := &oauth.Config{\n\t\tClientId: cid,\n\t\tClientSecret: csec,\n\t\tScope: \"non-expiring\",\n\t\tAuthURL: AuthUrl,\n\t\tTokenURL: TokenUrl,\n\t\tRedirectURL: redurl,\n\t\tTokenCache: oauth.CacheFile(CacheFile),\n\t}\n\t//fmt.Printf(\"%+v\\n\", config)\n\n\ttransport := &oauth.Transport{Config: config}\n\ttoken, err := config.TokenCache.Token()\n\tif err != nil {\n\t\tif code == \"\" {\n\t\t\turl := config.AuthCodeURL(\"\")\n\t\t\tfmt.Println(url)\n\t\t\treturn nil\n\t\t}\n\t\ttoken, err = transport.Exchange(code)\n\t}\n\ttransport.Token = token\n\n\treturn &Client{\n\t\tHttpClient: transport.Client(),\n\t\tToken: token,\n\t}\n}", "func New() Client {\n\treturn &client{}\n}", "func New() (*Client, error) {\n\tcs, err := newClients()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &Client{\n\t\tcs: cs,\n\t}, nil\n}", "func NewClient(cfg clients.Config, hc *http.Client) (Client, error) {\n\treturn clients.NewClient(cfg, hc)\n}", "func NewClient(cfg clients.Config, hc *http.Client) (Client, error) {\n\treturn clients.NewClient(cfg, hc)\n}", "func NewClient(accessKeyId, accessKeySecret string) *Client {\n\tendpoint := os.Getenv(\"SLB_ENDPOINT\")\n\tif endpoint == \"\" {\n\t\tendpoint = SLBDefaultEndpoint\n\t}\n\treturn NewClientWithEndpoint(endpoint, accessKeyId, accessKeySecret)\n}", "func NewClient(find, list, submit goa.Endpoint) *Client {\n\treturn &Client{\n\t\tFindEndpoint: find,\n\t\tListEndpoint: list,\n\t\tSubmitEndpoint: submit,\n\t}\n}", "func NewClient(config *Config) *Client {\n\tc := &Client{config: defaultConfig.Merge(config)}\n\n\treturn c\n}", "func NewClient(entries []*disc.Entry, log *logging.Logger) disc.APIClient {\n\tentriesMap := make(map[cipher.PubKey]*disc.Entry)\n\tfor _, entry := range entries {\n\t\tentriesMap[entry.Static] = entry\n\t}\n\tlog.WithField(\"func\", \"direct.NewClient\").\n\t\tDebug(\"Created Direct client.\")\n\treturn &directClient{\n\t\tentries: entriesMap,\n\t}\n}", "func NewClient(ctx *pulumi.Context,\n\tname string, args *ClientArgs, opts ...pulumi.ResourceOption) (*Client, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.Brand == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'Brand'\")\n\t}\n\tif args.DisplayName == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'DisplayName'\")\n\t}\n\tsecrets := pulumi.AdditionalSecretOutputs([]string{\n\t\t\"secret\",\n\t})\n\topts = append(opts, secrets)\n\topts = internal.PkgResourceDefaultOpts(opts)\n\tvar resource Client\n\terr := ctx.RegisterResource(\"gcp:iap/client:Client\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func NewClient(baseURL, apiUsername, apiPassword, senderID string) *Client {\n\treturn &Client{\n\t\tclient: http.DefaultClient,\n\t\tbaseURL: baseURL,\n\t\tapiUsername: apiUsername,\n\t\tapiPassword: apiPassword,\n\t\tsenderID: senderID,\n\t}\n}", "func NewClient(cfg Config) *Server {\r\n\tctx := context.Background()\r\n\tts := oauth2.StaticTokenSource(\r\n\t\t&oauth2.Token{AccessToken: cfg.OAuth2Token},\r\n\t)\r\n\ttc := oauth2.NewClient(ctx, ts)\r\n\tclient := github.NewClient(tc)\r\n\treturn &Server{\r\n\t\tRepoService: client.Repositories,\r\n\t\tLogger: zap.NewExample().Sugar(),\r\n\t\tConfig: cfg,\r\n\t}\r\n}", "func NewClient() *Client {\n\treturn &Client{\n\t\tsubs: make(map[int]*Subscription),\n\t}\n}", "func NewClient(cfg *Config) *Client {\n\treturn &Client{\n\t\tcfg: cfg,\n\t}\n}", "func NewClient(endpoints ...[]string) *Client {\n\tclient := &Client{\n\t\tevents: make(chan *models.Event),\n\t\tcancel: make(chan struct{}),\n\t}\n\tfor _, v := range endpoints {\n\t\tclient.endpoints = v\n\t}\n\tclient.address = client.GetServiceIP()\n\treturn client\n}", "func New(client *client.Client, properties ClientProperties) *Client {\n\treturn &Client{\n\t\tclient: client,\n\n\t\taccountSid: properties.AccountSid,\n\t\tmessageSid: properties.MessageSid,\n\t}\n}", "func NewClient(userList, getUser, createUser, updateUser, deleteUser goa.Endpoint) *Client {\n\treturn &Client{\n\t\tUserListEndpoint: userList,\n\t\tGetUserEndpoint: getUser,\n\t\tCreateUserEndpoint: createUser,\n\t\tUpdateUserEndpoint: updateUser,\n\t\tDeleteUserEndpoint: deleteUser,\n\t}\n}", "func NewClient(ctx context.Context, log *logging.Client, serverInfo *common.ServerInfo) (*Client, error) {\n\tif log == nil {\n\t\treturn nil, errInternal.Annotate(\"failed to get logging client\")\n\t}\n\tif serverInfo == nil {\n\t\treturn nil, errInternal.Annotate(\"failed to get server info\")\n\t}\n\tslackClient := slack.New(gigabotToken)\n\treturn &Client{\n\t\tctx: ctx,\n\t\tlog: log,\n\t\tserverInfo: serverInfo,\n\t\tslackClient: slackClient,\n\t}, nil\n}", "func New(cfg client.Config) (client.Client, error) {\n\treturn client.New(cfg)\n}", "func NewClient(cl *kgo.Client) *Client {\n\treturn &Client{cl}\n}", "func NewClient(apiKey, appKey string) *Client {\n\treturn &Client{\n\t\tapiKey: apiKey,\n\t\tappKey: appKey,\n\t\tHttpClient: http.DefaultClient,\n\t}\n}" ]
[ "0.69753504", "0.6909176", "0.68756545", "0.6828231", "0.6781878", "0.6778484", "0.67573667", "0.6750796", "0.6738676", "0.67295736", "0.67211974", "0.6719252", "0.67097473", "0.67009145", "0.66968787", "0.6682295", "0.66727865", "0.6664944", "0.66583467", "0.6650193", "0.6645113", "0.66375405", "0.66365594", "0.66365594", "0.66365594", "0.6634283", "0.662416", "0.6614668", "0.6605737", "0.6602303", "0.65953094", "0.6585764", "0.65749526", "0.6573541", "0.6573541", "0.6573541", "0.6570195", "0.6569919", "0.6567235", "0.6564477", "0.6559738", "0.6559363", "0.6547017", "0.65461016", "0.6543743", "0.6541008", "0.6535043", "0.652375", "0.65224284", "0.6520048", "0.6513604", "0.6510279", "0.6509524", "0.65085214", "0.6503904", "0.6495682", "0.6494859", "0.6483957", "0.64783007", "0.6458428", "0.6458169", "0.6458169", "0.64573175", "0.6450578", "0.64500576", "0.64422375", "0.6441305", "0.64413", "0.6439122", "0.6439087", "0.6439087", "0.6439087", "0.6439087", "0.6439087", "0.6439087", "0.6437295", "0.64369714", "0.643678", "0.643517", "0.64343697", "0.6432708", "0.6429275", "0.6419602", "0.6419602", "0.64176", "0.641726", "0.6416232", "0.64161843", "0.6414953", "0.6413475", "0.6409984", "0.64064234", "0.639722", "0.6393527", "0.6392991", "0.63884574", "0.6387424", "0.6385376", "0.6381118", "0.6379412" ]
0.72267675
0
Check that the constructor works.
func TestNewTransport(t *testing.T) { _, err := NewTransport(testURL, ips, nil, nil, nil) if err != nil { t.Fatal(err) } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func TestConstructor(t *testing.T) {\n\tobject := Constructor()\n\tobjectType := reflect.TypeOf(object)\n\tminStackType := reflect.TypeOf(MinStack{})\n\tif objectType != minStackType {\n\t\tt.Errorf(\"Constructor Error | Expected %v, got %v\", minStackType, objectType)\n\t}\n}", "func TestConstructor(t *testing.T) {\n\tobject := Constructor(2)\n\n\tobjectType := reflect.TypeOf(object)\n\tLRUCacheType := reflect.TypeOf(LRUCache{})\n\tif objectType != LRUCacheType {\n\t\tt.Errorf(\"Constructor Error | Expected %v, got %v\", LRUCacheType, objectType)\n\t}\n}", "func TestNew(t *testing.T){\n\tclient := New(\"localhost\", 5555)\n\t\n\tif client.Server != \"localhost\" {\n\t\tt.Error(\"The server hasn't being initialized correctly\")\n\t}\n\t\n\tif client.Port != 5555 {\n\t\tt.Error(\"The port wasn't initialized propertly\")\n\t}\n\t\n\tif client.Opened {\n\t\tt.Error(\"The opened indicator wasn't initialized propertly\")\n\t}\n\t\n\tif client.conn != nil {\n\t\tt.Error(\"The TCP connection wasn't initialized propertly\")\n\t}\n}", "func MustNewConstructor(x interface{}) *Constructor {\n\tc, err := NewConstructor(x)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn c\n}", "func TestNew(t *testing.T) {\n\ttestutil.SkipIfNotRoot(t)\n\tif _, err := New(); err != nil {\n\t\tt.Errorf(`New() = %q, not nil`, err)\n\t}\n}", "func checkInit(t *testing.T, stub *shim.MockStub, args [][]byte) {\n\tres := stub.MockInit(\"1\", args)\n\tif res.Status != shim.OK {\n\t\tfmt.Println(\"Init failed\", string(res.Message))\n\t\tt.FailNow()\n\t}\n}", "func TestNewEmpty(t *testing.T) {\n\tos.Remove(\"/tmp/commits.log\")\n\n\tc, err := New(\"/tmp/commits.log\")\n\tcheck(err)\n\n\tassert.Equal(t, 0, c.Length())\n\tassert.Equal(t, false, c.Has(\"foo\"))\n}", "func checkInit(t *testing.T, stub *shim.MockStub, args [][]byte) {\n\t\n\tres := stub.MockInit(\"1\", args)\n\tif res.Status != shim.OK {\n\t\tfmt.Println(\"Init failed\", string(res.Message))\n\t\tt.FailNow()\n\t}\n}", "func (cer *CER) sanityCheck() error {\n\tif len(cer.OriginHost) == 0 {\n\t\treturn ErrMissingOriginHost\n\t}\n\tif len(cer.OriginRealm) == 0 {\n\t\treturn ErrMissingOriginRealm\n\t}\n\treturn nil\n}", "func TestNew(t *testing.T) {\n\tserver, err := New(\"tcp\", \"localhost\", \"50000\", 1)\n\tif err != nil || server == nil {\n\t\tt.Errorf(\"return non initialize server or erorr: %v\", err)\n\t}\n}", "func TestSet_New(t *testing.T) {\n\tt.Run(\"Set is empty on construction\", func(t *testing.T) {\n\t\ts := New()\n\t\tassert.True(t, s.isEmpty())\n\t})\n\tt.Run(\"Set has 0 'size'' on construction\", func(t *testing.T) {\n\t\ts := New()\n\t\tassert.Equal(t, 0, s.Size())\n\t})\n}", "func ConstructorFrom(ctx context.Context) (_ Constructor, ok bool) {\n\tv := ctx.Value(constructorKey)\n\tif v == nil {\n\t\treturn nil, false\n\t}\n\n\tctor, ok := v.(Constructor)\n\treturn ctor, ok\n}", "func (i InputCheckPasswordEmpty) construct() InputCheckPasswordSRPClass { return &i }", "func (i InputCheckPasswordSRP) construct() InputCheckPasswordSRPClass { return &i }", "func (s *BasePlSqlParserListener) EnterConstructor_spec(ctx *Constructor_specContext) {}", "func Test_New(t *testing.T) {\n\to := observable.New()\n\n\tassert.NotNil(t, o)\n}", "func MustNewAbc(alphabet string, seed uint64) Abc {\n\tres, err := NewAbc(alphabet, seed)\n\tif err == nil {\n\t\treturn res\n\t}\n\tpanic(err)\n}", "func (s ServerDHParamsFail) construct() ServerDHParamsClass { return &s }", "func (o *GetConstructorOK) IsSuccess() bool {\n\treturn true\n}", "func checkInitBook() error {\n\tif b == nil {\n\t\treturn fmt.Errorf(\"book initialize failed\")\n\t}\n\treturn nil\n}", "func validateKubeSelfSubjectRulesReviewV1Beta1_IsConstructParameters(x interface{}) error {\n\treturn nil\n}", "func Test_StatefulScheduler_Initialize(t *testing.T) {\n\n\ts := makeDefaultStatefulScheduler()\n\n\tif len(s.inProgressJobs) != 0 {\n\t\tt.Errorf(\"Expected Scheduler to startup with no jobs in progress\")\n\t}\n\n\tif len(s.clusterState.nodes) != 5 {\n\t\tt.Errorf(\"Expected Scheduler to have a cluster with 5 nodes\")\n\t}\n}", "func New() error {\n\treturn &errorMessage{\"Find the bug\"}\n}", "func Constructor() Trie {\n\treturn Trie{\n\t\twd: false,\n\t}\n\n}", "func initilize() bool {\n\t// Create self entry\n\tLocalIP = getLocalIP().String()\n\tLogger = NewSsmsLogger(LocalIP)\n\ttimestamp := time.Now().UnixNano()\n\tstate := StateAlive\n\tCurrentMember = &Member{uint64(timestamp), ip2int(getLocalIP()), uint8(state)}\n\n\t// Create member list\n\tCurrentList = NewMemberList(20)\n\n\t// Make necessary tables\n\tPingAckTimeout = make(map[uint16]*time.Timer)\n\tFailureTimeout = make(map[[2]uint64]*time.Timer)\n\tDuplicateUpdateCaches = make(map[uint64]uint8)\n\tTTLCaches = NewTtlCache()\n\n\treturn true\n}", "func TestNewProgram(t *testing.T) {\n\tp, err := NewProgram()\n\tif err != nil {\n\t\tt.Errorf(\"New program failed : %s\", fmt.Sprint(err))\n\t}\n\n\tdefer p.Close()\n\n\tif p.IsRunning == true {\n\t\tt.Error(\"New program failed : IsRunning is true\")\n\t}\n\n\tif p.windows == nil {\n\t\tt.Error(\"New program failed : windows's map not init\")\n\t}\n\n\tif p.showed == nil {\n\t\tt.Error(\"New program failed : showed windows map not init\")\n\t}\n}", "func MustNewControlPlane() *ControlPlane {\n\tcp := &ControlPlane{}\n\tcp.prepareEcho()\n\tcp.Encryptor = crypto.MustNewEncryptor()\n\tcp.Hasher = crypto.MustNewHasher()\n\tcp.Validator = validator.New()\n\n\trepo, enforcer, err := repo.NewRepoEnforcer(cp.Logger)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tcp.Repo = repo\n\tcp.Enforcer = enforcer\n\n\treturn cp\n}", "func (_pc *PCCreate) check() error {\n\treturn nil\n}", "func Constructor() Trie {\r\n\treturn Trie{}\r\n}", "func TestAssetSysCC_Init(t *testing.T) {\n\tascc := new(AssetSysCC)\n\tstub := shim.NewMockStub(\"ascc\", ascc)\n\tcheckInit(t, stub, [][]byte{[]byte(\"\")})\n}", "func init() {}", "func init() {}", "func init() {}", "func init() {}", "func TestNewBatchConstructor(t *testing.T) {\n\tbatch := NewBatch()\n\tif batch.Writer != os.Stdout {\n\t\tt.Fatal(\"Batch is not using stdout\")\n\t}\n}", "func TestNewInstance(t *testing.T) {\n\tif _, err := NewInstance(nil); err == nil {\n\t\tt.Error(\"NewInstance: expected error with nil database handle\")\n\t}\n\n\tRunWithDB(func(db *sql.DB) {\n\t\tif _, err := NewInstance(db); err != nil {\n\t\t\tt.Fatal(\"NewInstance: got error:\\n\", err)\n\t\t}\n\t})\n}", "func CfnInstance_IsConstruct(x interface{}) *bool {\n\t_init_.Initialize()\n\n\tvar returns *bool\n\n\t_jsii_.StaticInvoke(\n\t\t\"monocdk.aws_opsworks.CfnInstance\",\n\t\t\"isConstruct\",\n\t\t[]interface{}{x},\n\t\t&returns,\n\t)\n\n\treturn returns\n}", "func TestInitializeUtil(t *testing.T) {\n\tinstance := util.InitializeUtil()\n\tif instance == nil {\n\t\tt.Error(\"Can not create util instance\")\n\t}\n}", "func TestNew(t *testing.T) {\n\tt.Run(\"one required int option\", func(t *testing.T) {\n\t\t_, err := New(Option{\n\t\t\tKey: \"int\",\n\t\t\tValueType: \"int\",\n\t\t\tDescription: \"some int value\",\n\t\t\tRequired: true,\n\t\t})\n\t\ttestUtil.NoError(t, err)\n\t})\n\n\tt.Run(\"one int option\", func(t *testing.T) {\n\t\t_, err := New(Option{\n\t\t\tKey: \"int\",\n\t\t\tValueType: \"int\",\n\t\t\tDescription: \"some int value\",\n\t\t\tDefaultValue: 10,\n\t\t})\n\t\ttestUtil.NoError(t, err)\n\t})\n\n\tt.Run(\"one required string option\", func(t *testing.T) {\n\t\t_, err := New(Option{\n\t\t\tKey: \"string\",\n\t\t\tValueType: \"string\",\n\t\t\tDescription: \"some string value\",\n\t\t\tRequired: true,\n\t\t})\n\t\ttestUtil.NoError(t, err)\n\t})\n\n\tt.Run(\"one string option\", func(t *testing.T) {\n\t\t_, err := New(Option{\n\t\t\tKey: \"string\",\n\t\t\tValueType: \"string\",\n\t\t\tDescription: \"some string value\",\n\t\t\tDefaultValue: \"default value\",\n\t\t})\n\t\ttestUtil.NoError(t, err)\n\t})\n\n\tt.Run(\"one option with validator\", func(t *testing.T) {\n\t\t_, err := New(Option{\n\t\t\tKey: \"int\",\n\t\t\tValueType: \"int\",\n\t\t\tDescription: \"some int value\",\n\t\t\tDefaultValue: 10,\n\t\t\tValidator: validator.IntWithin,\n\t\t\tValidatorParam: []int{9, 11},\n\t\t})\n\t\ttestUtil.NoError(t, err)\n\t})\n\n\tt.Run(\"invalid option (no key)\", func(t *testing.T) {\n\t\t_, err := New(Option{\n\t\t\tKey: \"\",\n\t\t\tValueType: \"string\",\n\t\t\tDescription: \"some string value\",\n\t\t\tDefaultValue: \"default value\",\n\t\t})\n\t\ttestUtil.WithError(t, err)\n\t})\n\n\tt.Run(\"invalid option (required and default value are specified)\", func(t *testing.T) {\n\t\t_, err := New(Option{\n\t\t\tKey: \"\",\n\t\t\tValueType: \"string\",\n\t\t\tDescription: \"some string value\",\n\t\t\tRequired: true,\n\t\t\tDefaultValue: \"default value\",\n\t\t})\n\t\ttestUtil.WithError(t, err)\n\t})\n\n\tt.Run(\"invalid option with validator (validation failed)\", func(t *testing.T) {\n\t\t_, err := New(Option{\n\t\t\tKey: \"int\",\n\t\t\tValueType: \"int\",\n\t\t\tDescription: \"some int value\",\n\t\t\tDefaultValue: 10,\n\t\t\tValidator: validator.IntSmallerThan,\n\t\t\tValidatorParam: 10,\n\t\t})\n\t\ttestUtil.NoError(t, err)\n\t})\n}", "func (o *GetConstructorNotFound) IsSuccess() bool {\n\treturn false\n}", "func checkCalledFromInit() {\n\tfor skip := 3; ; skip++ {\n\t\t_, funcName, ok := callerName(skip)\n\t\tif !ok {\n\t\t\tpanic(\"not called from an init func\")\n\t\t}\n\n\t\tif funcName == \"init\" || strings.HasPrefix(funcName, \"init·\") ||\n\t\t\tstrings.HasPrefix(funcName, \"init.\") {\n\t\t\treturn\n\t\t}\n\t}\n}", "func (s *Prototype) Constructor() Value { return s.constructor }", "func (s ServerDHParamsOk) construct() ServerDHParamsClass { return &s }", "func Problem383() {\n\n\tfmt.Println(canConstruct1(\"a\", \"b\"))\n\tfmt.Println(canConstruct1(\"aa\", \"ab\"))\n\tfmt.Println(canConstruct1(\"aa\", \"aab\"))\n\n}", "func TestInitNoc(t *testing.T) {\n var c Noc\n\n c.InitNoc(\"localhost\", \"8888\", false)\n if strings.Contains(c.HostAddr, \"http\") != true {\n t.Errorf(\"Expected HTTP in HostAddr\")\n }\n\n c.InitNoc(\"localhost\", \"8888\", true)\n if strings.Contains(c.HostAddr, \"https\") != true {\n t.Errorf(\"Expected HTTPS in HostAddr\")\n }\n\n if c.InitNoc(\"\", \"8888\", false) == nil {\n t.Errorf(\"Expected an error when passing a hostname of length zero\")\n }\n\n if c.InitNoc(\"localhost\", \"\", false) == nil {\n t.Errorf(\"Expected an error when passing a port of length zero\")\n }\n\n c.InitNoc(\"localhost\", \"8888\", false)\n if len(c.HostAddr) != 21 {\n x := strconv.Itoa(len(c.HostAddr))\n t.Errorf(\"Expected noc.HostAddr to be length 21 when using localhost, 8888, false. Got: \" + x)\n }\n}", "func (s *deployerSuite) TestNew(c *gc.C) {\n\tdeployer := deployer.NewState(s.stateAPI)\n\tc.Assert(deployer, gc.NotNil)\n}", "func Constructor() Trie {\n\treturn Trie{}\n}", "func Constructor() Trie {\n\treturn Trie{}\n}", "func Constructor() Trie {\n\treturn Trie{}\n}", "func Constructor() Trie {\n\treturn Trie{}\n}", "func Constructor() Trie {\n\treturn Trie{}\n}", "func Constructor() Trie {\n\treturn Trie{}\n}", "func MustInit(cmp *mcmp.Component) {\n\tctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)\n\tdefer cancel()\n\n\tdebugLog(cmp, \"initializing\")\n\tif err := mrun.Init(ctx, cmp); err != nil {\n\t\tmlog.From(cmp).Fatal(\"initialization failed\", merr.Context(err))\n\t}\n\tdebugLog(cmp, \"initialization completed successfully\")\n}", "func init() {\n\t//todo...\n}", "func CfnDetector_IsConstruct(x interface{}) *bool {\n\t_init_.Initialize()\n\n\tvar returns *bool\n\n\t_jsii_.StaticInvoke(\n\t\t\"monocdk.aws_guardduty.CfnDetector\",\n\t\t\"isConstruct\",\n\t\t[]interface{}{x},\n\t\t&returns,\n\t)\n\n\treturn returns\n}", "func Constructor() Trie {\n return Trie{}\n}", "func Constructor() Trie {\n return Trie{}\n}", "func TestNewClient(t *testing.T) {\n\tt.Parallel()\n\tc, err := NewClient(&ClientOpts{\n\t\tVersion: \"2.1\",\n\t\tValidate: true,\n\t})\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tif reflect.TypeOf(c).String() != \"*scaniigo.Client\" {\n\t\tt.Error(ErrInvalidDataType)\n\t}\n}", "func TestBucketNew(t *testing.T) {\n\tt.Skip()\n\n\tbkt := bucket.New()\n\tif &bkt == nil {\n\t\tt.Errorf(\"bkt should not be nil\")\n\t}\n\n\tnodes := bkt.Nodes\n\n\tif &bkt.Nodes == nil {\n\t\tt.Errorf(\"bkt.Nodes should not be nil\")\n\t}\n\n\tlog.Debugf(\"bucket_test.go: TestBucketNew(): nodes = %+v\", nodes)\n\n\tif &bkt.Routes == nil {\n\t\tt.Errorf(\"bkt.Routes should not be nil\")\n\t}\n}", "func NewConstructor(x interface{}) (*Constructor, error) {\n\tif x == nil {\n\t\treturn nil, kerror.New(kerror.EViolation, \"function expected, nil given\")\n\t}\n\tft := reflect.TypeOf(x)\n\tfv := reflect.ValueOf(x)\n\tif ft.Kind() != reflect.Func {\n\t\treturn nil, kerror.Newf(kerror.EViolation, \"function expected, %s given\", ft)\n\t}\n\tif fv.IsNil() {\n\t\treturn nil, kerror.New(kerror.EViolation, \"function expected, nil given\")\n\t}\n\tc := &Constructor{\n\t\tfunction: fv,\n\t}\n\tnumIn := ft.NumIn()\n\tif ft.IsVariadic() {\n\t\tnumIn--\n\t}\n\tc.inTypes = make([]reflect.Type, numIn)\n\tfor i := 0; i < numIn; i++ {\n\t\tc.inTypes[i] = ft.In(i)\n\t}\n\tswitch ft.NumOut() {\n\tdefault:\n\t\treturn nil, kerror.Newf(kerror.EViolation, \"function %s is not a constructor\", ft)\n\tcase 1:\n\t\tc.t = ft.Out(0)\n\t\tc.objectOutIndex = 0\n\t\tc.destructorOutIndex = -1\n\t\tc.errorOutIndex = -1\n\tcase 2:\n\t\tif ft.Out(1) != errorType {\n\t\t\treturn nil, kerror.Newf(kerror.EViolation, \"function %s is not a constructor\", ft)\n\t\t}\n\t\tc.t = ft.Out(0)\n\t\tc.objectOutIndex = 0\n\t\tc.destructorOutIndex = -1\n\t\tc.errorOutIndex = 1\n\tcase 3:\n\t\tif ft.Out(1) != destructorType || ft.Out(2) != errorType {\n\t\t\treturn nil, kerror.Newf(kerror.EViolation, \"function %s is not a constructor\", ft)\n\t\t}\n\t\tc.t = ft.Out(0)\n\t\tc.objectOutIndex = 0\n\t\tc.destructorOutIndex = 1\n\t\tc.errorOutIndex = 2\n\t}\n\treturn c, nil\n}", "func (a StoriesAllStoriesNotModified) construct() StoriesAllStoriesClass { return &a }", "func TestCreateCard(t *testing.T) {\n\tc := Card{Nine, Diamond}\n\tif c.Rank != Nine {\n\t\tt.Errorf(\"Card rank is incorrect, got: %d, want: %d.\", c.Rank, Nine)\n\t}\n\tif c.Suit != Diamond {\n\t\tt.Errorf(\"Card suit is incorrect, got: %d, want: %d.\", c.Suit, Diamond)\n\t}\n}", "func NewChecker() *Checker { return &Checker{faulter: &DebugFaulter{}} }", "func (cea *CEA) sanityCheck() error {\n\tif cea.ResultCode == 0 {\n\t\treturn ErrMissingResultCode\n\t}\n\tif len(cea.OriginHost) == 0 {\n\t\treturn ErrMissingOriginHost\n\t}\n\tif len(cea.OriginRealm) == 0 {\n\t\treturn ErrMissingOriginRealm\n\t}\n\treturn nil\n}", "func TestNewSet(t *testing.T) {\n\tt.Run(\"A stack is empty on construction\", func(t *testing.T) {\n\t\ts := stack.New()\n\t\t//s := stack.New()\n\t\tassert.True(t, s.IsEmpty())\n\t})\n}", "func TestBuilder0(t *testing.T) {\n\tbuilder := NewBuilder(\"Ctx_\", nil)\n\n\tif nil == builder {\n\t\tt.Error(\"NewBuilder failed\")\n\t}\n\t// Check prefix is to Upper !\n\tif \"CTX_\" != builder.Prefix() {\n\t\tt.Error(\"Bad prefix '\", builder.Prefix(), \"' CTX_ expected\")\n\t}\n\n\tif false != builder.IgnoreMissingFiles() {\n\t\tt.Error(\"Wrong IgnoreMissingFile Value'\", builder.IgnoreMissingFiles(), \"' false expected\")\n\t}\n\n\t// Check prefix is to Upper !\n\tif 5 != builder.MaxRecursion() {\n\t\tt.Error(\"Bad Max Recursion '\", builder.MaxRecursion(), \"' 5 expected\")\n\t}\n\n\tif nil == builder.Config() {\n\t\tt.Error(\"Could not get Config\")\n\t}\n\n}", "func Constructor() Trie {\n\treturn Trie{100, false, [26]*Trie{}}\n}", "func MustNew(area image.Rectangle) *braille.Canvas {\n\tcvs, err := braille.New(area)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"braille.New => unexpected error: %v\", err))\n\t}\n\treturn cvs\n}", "func (i *InvariantsChecker) assertInitWasCalled() bool {\n\tif i.initStatus != colexecop.OperatorInitialized {\n\t\tif c, ok := i.Input.(*Columnarizer); ok {\n\t\t\tif c.removedFromFlow {\n\t\t\t\t// This is a special case in which we allow for the operator to\n\t\t\t\t// not be initialized. Next and DrainMeta calls are noops in\n\t\t\t\t// this case, so the caller should short-circuit.\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\tcolexecerror.InternalError(errors.AssertionFailedf(\"Init hasn't been called, input is %T\", i.Input))\n\t}\n\treturn false\n}", "func newInstance0(frame *rtda.Frame) {\n\tvars := frame.LocalVars()\n\tconstructorObj := vars.GetRef(0)\n\targArrObj := vars.GetRef(1)\n\n\tgoConstructor := getExtra(constructorObj)\n\tgoClass := goConstructor.Class()\n\tobj := goClass.NewObj()\n\tstack := frame.OperandStack()\n\tstack.PushRef(obj)\n\n\t// call <init>\n\targs := actualConstructorArgs(obj, argArrObj, goConstructor)\n\tframe.Thread().InvokeMethodWithShim(goConstructor, args)\n}", "func TestNewStacktrace(t *testing.T) {\n\tstacktrace, err := NewStacktrace(0)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tif len(stacktrace) != 4 {\n\t\tt.Error(\"Stacktrace should be 4 frames.\")\n\t}\n}", "func TestNewPoint(t *testing.T) {\n\tp := NewPoint(1, 1, 1)\n\tif (p.X != 1) || (p.X != 1) || (p.X != 1) {\n\t\tt.Log(\"Wrong assignment of the coordinates!\")\n\t\tt.Fail()\n\t}\n}", "func (p PhoneCallEmpty) construct() PhoneCallClass { return &p }", "func TestSparkling_New(t *testing.T) {\n\tsp := New(os.Stdout)\n\tif reflect.TypeOf(sp).String() != \"*sparkling.Sparkling\" {\n\t\tt.Error(\"New returned incorrect type\")\n\t}\n}", "func MustNew(worker uint8, alphabet string, seed uint64) *Shortid {\n\tsid, err := New(worker, alphabet, seed)\n\tif err == nil {\n\t\treturn sid\n\t}\n\tpanic(err)\n}", "func (s *BasePlSqlParserListener) EnterConstructor_declaration(ctx *Constructor_declarationContext) {}", "func MustNew(cfg Config) *Client {\n\tc, err := New(cfg)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn c\n}", "func Test_Init3(t *testing.T) {\n\t_, err := Load(\"\", false)\n\tassert.Equal(t, err.Error(), \"file name must be given\")\n}", "func mustNew(hash string, size int64) *repb.Digest {\n\tdigest, err := New(hash, size)\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\treturn digest\n}", "func Test_GraphNodeInitialisation1(t *testing.T) {\n\n\tgn1, gn2 := newGraphNode(), newGraphNode()\n\n\tif gn1 == gn2 {\n\t\tt.Errorf(\"gn1 == gn2\")\n\t}\n}", "func (o *FakeObject) New(args ...interface{}) Object { return o.Invoke(args) }", "func IsOk(c *SingleCrawler) error {\n if c.Site == nil {\n return errors.New(\"Crawler has no Site.\")\n }\n if c.Sitemap == nil {\n return errors.New(\"Crawler has no Sitemap.\")\n }\n if c.NumPages < 0 {\n return errors.New(\"Crawler has negative # of pages.\")\n }\n if c.NumWorkers <= 0 {\n return errors.New(\"Crawler <= 0 number of workers (can't work).\")\n }\n if c.Filename == \"\" {\n return errors.New(\"Crawler has no Filename to write sitemap to.\")\n }\n return nil\n}", "func Init() error {\n\n}", "func TestBuilder1c(t *testing.T) {\n\tbuilder := NewBuilder(\"Ctx_\", nil)\n\t_, err := builder.LoadJSONFile(\"missing.json\")\n\n\tif nil == err {\n\t\tt.Error(\"LoadJSON should Failed\")\n\t}\n}", "func Constructor() Codec {\n\treturn Codec{}\n}", "func Constructor() Trie {\n\t// return new trie instance\n\treturn Trie{root: &TrieNode{map[rune]*TrieNode{}, false}}\n}", "func Test_Init(t *testing.T) {\n\tclient, err := Load(\"\", true)\n\tassert.Nil(t, err)\n\tmockClient = client\n}", "func MustCreateClient(connString string) {\n\tonce.Do(func() {\n\t\tvar err error\n\t\tInstance, err = CreateClient(connString)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t})\n}", "func (s *Saiyan) constructor(name string, power int) *Saiyan {\n\treturn &Saiyan{\n\t\tName: name,\n\t\tPower: power,\n\t}\n}", "func TestNew(t *testing.T) {\n\tcrawler := NewYaml([]byte(sampleYml))\n\n\t// inspect Crawler options\n\texpect, err := json.Marshal(expectedCrawl)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tgot, err := json.Marshal(crawler)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif !reflect.DeepEqual(expect, got) {\n\t\tt.Errorf(\"expecting %s, got %s\",\n\t\t\tstring(expect), string(got))\n\t}\n}", "func validateKubeScaleV1Beta2_IsConstructParameters(x interface{}) error {\n\treturn nil\n}", "func Constructor() Trie {\n\treturn Trie{\n\t\tend: false,\n\t\tedges: nil,\n\t}\n}", "func (x *fastReflection_EventCreateClass) IsValid() bool {\n\treturn x != nil\n}", "func (a StoriesAllStories) construct() StoriesAllStoriesClass { return &a }", "func (s *Suite) TestLengthZero(c *gc.C) {\n q := NewQueue()\n c.Assert(q.Len(), gc.Equals, 0)\n}", "func (n *mockAgent) init(ctx context.Context, sandbox *Sandbox, config KataAgentConfig) (bool, error) {\n\treturn false, nil\n}", "func Constructor() Trie {\n\n}", "func Constructor() RandomizedSet {\n\treturn RandomizedSet{\n\t\tValue: map[int]bool{},\n\t\tKeys: []int{},\n\t}\n}", "func TestInit(t *testing.T) {\n\tl := new(Layout)\n\tif err := l.Init(nil, \"\"); err != errNoBaseTemplate {\n\t\tt.Error(errNoBaseTemplate)\n\t}\n\tif err := l.Init(nil, \"base\"); err != nil {\n\t\tt.Error(\"Init Layout with nil function map, defined baseTemplate, and no patterns\")\n\t}\n}", "func (this *NowStr) Constructor() FunctionConstructor { return NewNowStr }", "func (u UserEmpty) construct() UserClass { return &u }" ]
[ "0.6858513", "0.6666509", "0.6298899", "0.6049641", "0.59415185", "0.58098793", "0.58064705", "0.57521695", "0.5611217", "0.55934876", "0.5578441", "0.5485013", "0.54486066", "0.5383912", "0.53633654", "0.5355686", "0.5350019", "0.53485173", "0.53275627", "0.53227466", "0.5302956", "0.5299442", "0.529502", "0.5291972", "0.5290979", "0.52800816", "0.5272578", "0.5263029", "0.52371633", "0.5228575", "0.5225774", "0.5225774", "0.5225774", "0.5225774", "0.5225004", "0.51984715", "0.51781785", "0.5169528", "0.5162026", "0.5154958", "0.51414037", "0.5126744", "0.5119794", "0.51141185", "0.5109829", "0.510926", "0.51086324", "0.51086324", "0.51086324", "0.51086324", "0.51086324", "0.51086324", "0.5101376", "0.5080959", "0.50804925", "0.50762475", "0.50762475", "0.5074712", "0.5069126", "0.50492847", "0.5047968", "0.5042029", "0.50237453", "0.5019317", "0.501924", "0.5011143", "0.50090384", "0.50031555", "0.5000284", "0.5000074", "0.49986655", "0.4997222", "0.4990476", "0.49834123", "0.49804634", "0.49774677", "0.49726775", "0.49718374", "0.49637508", "0.49635053", "0.49628317", "0.49600798", "0.4955625", "0.49534303", "0.49483445", "0.49333894", "0.4932691", "0.4927495", "0.49257347", "0.4922067", "0.49154538", "0.49127397", "0.49127063", "0.4912132", "0.49056625", "0.49045652", "0.49032703", "0.4903259", "0.4895078", "0.48943436", "0.4893946" ]
0.0
-1
Check that the constructor rejects unsupported URLs.
func TestBadUrl(t *testing.T) { _, err := NewTransport("ftp://www.example.com", nil, nil, nil, nil) if err == nil { t.Error("Expected error") } _, err = NewTransport("https://www.example", nil, nil, nil, nil) if err == nil { t.Error("Expected error") } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func validURL(url string) bool {\n\treturn true\n}", "func URL(data ValidationData) error {\n\tv, err := helper.ToString(data.Value)\n\tif err != nil {\n\t\treturn ErrInvalid{\n\t\t\tValidationData: data,\n\t\t\tFailure: \"is not a string\",\n\t\t\tMessage: data.Message,\n\t\t}\n\t}\n\n\tparsed, err := url.Parse(v)\n\tif err != nil {\n\t\treturn ErrInvalid{\n\t\t\tValidationData: data,\n\t\t\tFailure: \"is not a valid URL\",\n\t\t\tMessage: data.Message,\n\t\t}\n\t}\n\n\tif parsed.Scheme != \"http\" && parsed.Scheme != \"https\" {\n\t\treturn ErrInvalid{\n\t\t\tValidationData: data,\n\t\t\tFailure: fmt.Sprintf(\"has an invalid scheme '%s'\", parsed.Scheme),\n\t\t\tMessage: data.Message,\n\t\t}\n\t}\n\n\tif parsed.Host == \"\" || strings.IndexRune(parsed.Host, '\\\\') > 0 {\n\t\treturn ErrInvalid{\n\t\t\tValidationData: data,\n\t\t\tFailure: fmt.Sprintf(\"has an invalid host ('%s')\", parsed.Host),\n\t\t\tMessage: data.Message,\n\t\t}\n\t}\n\n\treturn nil\n}", "func ValidURL(urlStr string) bool {\n\treturn len(urlStr) < 8192\n}", "func validateURL(rawurl string) error {\n\tu, err := url.ParseRequestURI(rawurl)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(u.Scheme) == 0 {\n\t\treturn fmt.Errorf(\"Invalid scheme: %s\", rawurl)\n\t}\n\n\tif len(u.Host) == 0 {\n\t\treturn fmt.Errorf(\"Invalid host: %s\", rawurl)\n\t}\n\n\treturn nil\n}", "func MustParseURLs(urls ...string) FixedEndpoints {\n\tfe, err := ParseURLs(urls...)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn fe\n}", "func Test_GetImageFromUrl_badUrl(t *testing.T) {\n\tb, err := GetImageFromUrl(\"some-bad-url\")\n\n\tassert.Equal(t, `Error getting image: Get some-bad-url: unsupported protocol scheme \"\"`, err.Error())\n\tassert.Equal(t, []byte(nil), b)\n}", "func validURL(s string) bool {\n\tif s == \"\" {\n\t\treturn false\n\t}\n\tu, err := url.Parse(s)\n\tif err != nil {\n\t\treturn false\n\t}\n\tif u.Host == \"\" {\n\t\treturn false\n\t}\n\tswitch u.Scheme {\n\tcase \"http\", \"https\":\n\tdefault:\n\t\treturn false\n\t}\n\tfor _, r := range u.RawQuery {\n\t\t// https://tools.ietf.org/html/rfc3986#section-3.4 defines:\n\t\t//\n\t\t//\tquery = *( pchar / \"/\" / \"?\" )\n\t\t//\tpchar = unreserved / pct-encoded / sub-delims / \":\" / \"@\"\n\t\t//\tunreserved = ALPHA / DIGIT / \"-\" / \".\" / \"_\" / \"~\"\n\t\t//\tpct-encoded = \"%\" HEXDIG HEXDIG\n\t\t//\tsub-delims = \"!\" / \"$\" / \"&\" / \"'\" / \"(\" / \")\"\n\t\t//\t\t\t/ \"*\" / \"+\" / \",\" / \";\" / \"=\"\n\t\t//\n\t\t// check for these\n\t\tswitch {\n\t\tcase r >= '0' && r <= '9':\n\t\tcase r >= 'A' && r <= 'Z':\n\t\tcase r >= 'a' && r <= 'z':\n\t\tdefault:\n\t\t\tswitch r {\n\t\t\tcase '/', '?',\n\t\t\t\t':', '@',\n\t\t\t\t'-', '.', '_', '~',\n\t\t\t\t'%', '!', '$', '&', '\\'', '(', ')', '*', '+', ',', ';', '=':\n\t\t\tdefault:\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t}\n\treturn true\n}", "func isValidHostURL(hostURL string) bool {\n\tif strings.TrimSpace(hostURL) == \"\" {\n\t\treturn false\n\t}\n\turl := client.NewURL(hostURL)\n\tif url.Scheme != \"https\" && url.Scheme != \"http\" {\n\t\treturn false\n\t}\n\tif url.Path != \"\" && url.Path != \"/\" {\n\t\treturn false\n\t}\n\treturn true\n}", "func URLValidator(message ...string) regexpValidator {\n\tregex := `^(https?|ftp)(:\\/\\/[-_.!~*\\'()a-zA-Z0-9;\\/?:\\@&=+\\$,%#]+)$`\n\tif len(message) > 0 {\n\t\treturn RegexpValidator(regex, message[0])\n\t} else {\n\t\treturn RegexpValidator(regex, \"Enter a valid url.\")\n\t}\n}", "func IsURLValid(value string) bool {\n\tcheck := value != \"\" && !strings.Contains(value, \".gif\") && !strings.Contains(value, \"logo\") && !strings.Contains(value, \"mobilebanner\")\n\n\tif check {\n\t\treturn strings.HasPrefix(value, \"http\") || strings.HasPrefix(value, \"https\")\n\t}\n\n\treturn check\n}", "func (h *Handlers) ValidateURL(input string) bool {\n\tu, err := url.Parse(input)\n\n\tfmt.Println(err, u.Scheme, u.Host)\n\tif err != nil || u.Scheme == \"\" || !strings.Contains(u.Host, \".\") {\n\t\treturn false\n\t}\n\n\treturn true\n}", "func (s *htmlState) checkURL(raw string) {\n\tif s.ignore&issueURL != 0 {\n\t\treturn\n\t}\n\n\tif strings.HasPrefix(raw, \"mailto:\") {\n\t\tif strings.Index(raw, \"@\") == -1 {\n\t\t\ts.err(fmt.Errorf(\"not an email address\"))\n\t\t}\n\t\treturn\n\t}\n\n\tu, err := url.Parse(raw)\n\tif err != nil {\n\t\ts.err(fmt.Errorf(\"bad URL '%s': %s\", raw, err.Error()))\n\t\treturn\n\t}\n\tif u.Opaque != \"\" {\n\t\ts.err(fmt.Errorf(\"bad URL part '%s'\", u.Opaque))\n\t\treturn\n\t}\n\n\tif strings.Index(raw, \" \") != -1 {\n\t\ts.err(fmt.Errorf(\"unencoded space in URL\"))\n\t}\n}", "func ValidateURL(u url.URL) error {\n\t_, err := url.ParseRequestURI(u.String())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\texp := regexp.MustCompile(`http(s)?://(www.)?.*..*/`)\n\tif !exp.MatchString(u.String()) {\n\t\treturn errors.New(\"Invalid URL or wrong scheme\")\n\t}\n\n\texp2 := regexp.MustCompile(`.jp(e?)g$|.css$|.ico$`)\n\tif exp2.MatchString(u.String()) {\n\t\treturn errors.New(\"Bad document type\")\n\t}\n\n\treturn nil\n}", "func validateHTTPURL(url string) error {\n\tresp, err := http.Head(url)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif resp.StatusCode >= 400 {\n\t\treturn fmt.Errorf(\"Got a non 200- or 300- HTTP response code: %s\", resp.Status)\n\t}\n\treturn nil\n}", "func URLValidator(u string) {\n\t_, err := url.ParseRequestURI(u)\n\tif err != nil {\n\t\terror := fmt.Errorf(\"%v\", aurora.Index(197, err))\n\t\tfmt.Println(error.Error())\n\t\tos.Exit(1)\n\t}\n}", "func ValidateURL(typeName, urlStr string) (*url.URL, error) {\n\tvar u *url.URL\n\tvar err error\n\tswitch strings.ToLower(typeName) {\n\tcase \"mssql\":\n\t\tu, err = mssql.ParseURL(urlStr)\n\tdefault:\n\t\tlogger.Debug(\"Applying default URL parsing for this data source type\", \"type\", typeName, \"url\", urlStr)\n\n\t\t// Make sure the URL starts with a protocol specifier, so parsing is unambiguous\n\t\tif !reURL.MatchString(urlStr) {\n\t\t\tlogger.Debug(\n\t\t\t\t\"Data source URL doesn't specify protocol, so prepending it with http:// in order to make it unambiguous\",\n\t\t\t\t\"type\", typeName, \"url\", urlStr)\n\t\t\turlStr = fmt.Sprintf(\"http://%s\", urlStr)\n\t\t}\n\t\tu, err = url.Parse(urlStr)\n\t}\n\tif err != nil {\n\t\treturn nil, URLValidationError{Err: err, URL: urlStr}\n\t}\n\n\treturn u, nil\n}", "func ErrInvalidSourceURL(url string) sdk.Error {\r\n\treturn sdk.NewError(\r\n\t\tDefaultCodespace,\r\n\t\tErrorCodeInvalidSourceURL,\r\n\t\t\"Invalid source URL: \"+url)\r\n}", "func isValidURL(str string) bool {\n\tu, err := url.Parse(str)\n\treturn err == nil && u.Scheme != \"\" && u.Host != \"\"\n}", "func isValidURL(toTest string) bool {\n\t_, err := url.ParseRequestURI(toTest)\n\tif err != nil {\n\t\treturn false\n\t}\n\n\tu, err := url.Parse(toTest)\n\tif err != nil || u.Scheme == \"\" || u.Host == \"\" {\n\t\treturn false\n\t}\n\n\treturn true\n}", "func CheckURLByProduct(urls []string) []string {\n\tvar validURLs = []string{}\n\tfor _, url := range urls {\n\t\tif len(validURLs) == 3 {\n\t\t\tbreak\n\t\t} else if URLChecker(url) {\n\t\t\tvalidURLs = append(validURLs, url)\n\t\t}\n\t}\n\treturn validURLs\n}", "func (o *URLCreateOptions) Validate() (err error) {\n\texists, err := url.Exists(o.Client, o.urlName, o.Component(), o.Application)\n\tif exists {\n\t\treturn fmt.Errorf(\"The url %s already exists in the application: %s\\n%v\", o.urlName, o.Application, err)\n\t}\n\n\treturn\n}", "func (m *URL) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateAnalytics(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (raw *rawSyncCmdArgs) validateURLIsNotServiceLevel(url string, location common.Location) error {\n\tsrcLevel, err := DetermineLocationLevel(url, location, true)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif srcLevel == ELocationLevel.Service() {\n\t\treturn fmt.Errorf(\"service level URLs (%s) are not supported in sync: \", url)\n\t}\n\n\treturn nil\n}", "func isValidURL(s string) bool {\n\tu, err := url.Parse(s)\n\treturn err == nil && u.IsAbs()\n}", "func isValidURL(toTest string) bool {\n\t_, err := url.ParseRequestURI(toTest)\n\tif err != nil {\n\t\treturn false\n\t} else {\n\t\treturn true\n\t}\n}", "func SimpleURLChecks(t *testing.T, scheme string, host string, port uint16) mapval.Validator {\n\n\thostPort := host\n\tif port != 0 {\n\t\thostPort = fmt.Sprintf(\"%s:%d\", host, port)\n\t}\n\n\tu, err := url.Parse(fmt.Sprintf(\"%s://%s\", scheme, hostPort))\n\trequire.NoError(t, err)\n\n\treturn mapval.MustCompile(mapval.Map{\n\t\t\"url\": wrappers.URLFields(u),\n\t})\n}", "func validateURL(ctx *validation.Context, s string) (*url.URL, bool) {\n\t// url.Parse considers \"example.com/a/b\" to be a path, so ensure a scheme.\n\tif !strings.HasPrefix(s, \"https://\") {\n\t\ts = \"https://\" + s\n\t}\n\tu, err := url.Parse(s)\n\tif err != nil {\n\t\tctx.Error(errors.Annotate(err, \"not a valid URL\").Err())\n\t\treturn nil, false\n\t}\n\tvalid := true\n\tif !strings.HasSuffix(u.Host, \".googlesource.com\") {\n\t\tctx.Errorf(\"isn't at *.googlesource.com %q\", u.Host)\n\t\tvalid = false\n\t}\n\tif u.Scheme != \"\" && u.Scheme != \"https\" {\n\t\tctx.Errorf(\"scheme must be https\")\n\t\tvalid = false\n\t}\n\tif strings.HasSuffix(u.Host, \"-review.googlesource.com\") {\n\t\tctx.Errorf(\"must not be a Gerrit host (try without '-review')\")\n\t\tvalid = false\n\t}\n\treturn u, valid\n}", "func (v URL) IsValid() bool {\n\tif v == \"\" || len(v) >= 2083 || len(v) <= 3 || strings.HasPrefix(v.String(), \".\") {\n\t\treturn false\n\t}\n\tu, err := netURL.Parse(v.String())\n\tif err != nil {\n\t\treturn false\n\t}\n\tif strings.HasPrefix(u.Host, \".\") {\n\t\treturn false\n\t}\n\tif u.Host == \"\" && (u.Path != \"\" && !strings.Contains(u.Path, \".\")) {\n\t\treturn false\n\t}\n\ttmp := strings.Split(u.Host, \":\")\n\tif tmp[0] == \"localhost\" {\n\t\treturn true\n\t}\n\treturn rxURL.Match([]byte(v.String()))\n}", "func ValidateURI(in string) error {\n\tin = strings.ToLower(in)\n\t// make sure uri starts with cpe:/\n\tif !strings.HasPrefix(in, \"cpe:/\") {\n\t\treturn errors.Wrapf(ErrParse, \"Error: URI must start with 'cpe:/'. Given: %s\", in)\n\t}\n\t// make sure uri doesn't contain more than 7 colons\n\tif count := strings.Count(in, \":\"); count > 7 {\n\t\treturn errors.Wrapf(ErrParse, \"Error parsing URI. Found %d extra components in: %s\", count-7, in)\n\t}\n\treturn nil\n}", "func ValidateNonDefault(args []string) error {\n\n\tif len(args) == 0 {\n\t\treturn errors.New(missingArguments)\n\t}\n\n\tif err := validateMethod(args[0]); err != nil {\n\t\treturn err\n\t}\n\n\tif err := validateURL(args[1]); err != nil {\n\t\treturn err\n\t}\n\n\tif err := validateExtension(args[2]); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (o *URLCreateOptions) Validate() (err error) {\n\tif !util.CheckOutputFlag(o.OutputFlag) {\n\t\treturn fmt.Errorf(\"given output format %s is not supported\", o.OutputFlag)\n\t}\n\n\t// if experimental mode is enabled, and devfile is provided.\n\terrorList := make([]string, 0)\n\tif o.isDevFile {\n\t\tif !o.isDocker && o.tlsSecret != \"\" && (o.urlType != envinfo.INGRESS || !o.secureURL) {\n\t\t\terrorList = append(errorList, \"TLS secret is only available for secure URLs of Ingress kind\")\n\t\t}\n\n\t\t// check if a host is provided for route based URLs\n\t\tif len(o.host) > 0 {\n\t\t\tif o.urlType == envinfo.ROUTE {\n\t\t\t\terrorList = append(errorList, \"host is not supported for URLs of Route Kind\")\n\t\t\t}\n\t\t\tif err := validation.ValidateHost(o.host); err != nil {\n\t\t\t\terrorList = append(errorList, err.Error())\n\t\t\t}\n\t\t} else if o.urlType == envinfo.INGRESS {\n\t\t\terrorList = append(errorList, \"host must be provided in order to create URLS of Ingress Kind\")\n\t\t}\n\t\tfor _, localURL := range o.EnvSpecificInfo.GetURL() {\n\t\t\tif o.urlName == localURL.Name {\n\t\t\t\terrorList = append(errorList, fmt.Sprintf(\"URL %s already exists\", o.urlName))\n\t\t\t}\n\t\t}\n\t} else {\n\t\tfor _, localURL := range o.LocalConfigInfo.GetURL() {\n\t\t\tif o.urlName == localURL.Name {\n\t\t\t\terrorList = append(errorList, fmt.Sprintf(\"URL %s already exists in application: %s\", o.urlName, o.Application))\n\t\t\t}\n\t\t}\n\t}\n\t// Check if url name is more than 63 characters long\n\tif len(o.urlName) > 63 {\n\t\terrorList = append(errorList, \"URL name must be shorter than 63 characters\")\n\t}\n\n\tif !o.isExperimental {\n\t\tif o.now {\n\t\t\tif err = o.ValidateComponentCreate(); err != nil {\n\t\t\t\terrorList = append(errorList, err.Error())\n\t\t\t}\n\t\t}\n\t}\n\n\tif len(errorList) > 0 {\n\t\tfor i := range errorList {\n\t\t\terrorList[i] = fmt.Sprintf(\"\\t- %s\", errorList[i])\n\t\t}\n\t\treturn fmt.Errorf(\"URL creation failed:\\n%s\", strings.Join(errorList, \"\\n\"))\n\t}\n\treturn\n}", "func newURLs(isRaw bool, versionID string, isAllVersions bool, sources ...string) ([]*url.URL, error) {\n\tvar urls []*url.URL\n\tfor _, src := range sources {\n\t\tsrcurl, err := url.New(src, url.WithRaw(isRaw), url.WithVersion(versionID),\n\t\t\turl.WithAllVersions(isAllVersions))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif err := checkVersinoningURLRemote(srcurl); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\turls = append(urls, srcurl)\n\t}\n\treturn urls, nil\n}", "func sanitizeUrl(href string, domain string) (url.URL, bool){\n\tif strings.Trim(href, \" \") == \"\"{\n\t\treturn url.URL{}, false\n\t}\n\n\tu, err := url.Parse(href)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn url.URL{}, false\n\t}\n\n\tif u.Host == \"\"{\n\t\tu.Host = domain\n\t} else if u.Host != domain || u.Path == \"/\" || u.Path == \"\"{\n\t\treturn url.URL{}, false\n\t}\n\n\tif u.Scheme == \"\"{\n\t\tu.Scheme = \"https\"\n\t}\n\n\t// Ignore alien schemas [ mailto, ftp, etc ]\n\tif !strings.Contains(u.Scheme, \"http\") {\n\t\treturn url.URL{}, false\n\t}\n\n\t// TODO: Check URL is accessible\n\n\treturn *u, true\n}", "func (s *ScraperGeneric) WantsURL(url string) bool {\n\treturn true\n}", "func isURL(v string) bool {\n\tvalGen := pflagValueFuncMap[urlFlag]\n\treturn valGen().Set(v) == nil\n}", "func (v *URLIsPresent) IsValid(errors *validate.Errors) {\n\tif v.Field == \"http://\" || v.Field == \"https://\" {\n\t\tif v.Message == \"\" {\n\t\t\tv.Message = fmt.Sprintf(\"%s url is empty\", v.Name)\n\t\t}\n\t\terrors.Add(GenerateKey(v.Name), v.Message)\n\t}\n\tparsedUrl, err := url.ParseRequestURI(v.Field)\n\tif err != nil {\n\t\tif v.Message == \"\" {\n\t\t\tv.Message = fmt.Sprintf(\"%s does not match url format. Err: %s\", v.Name,\n\t\t\t\terr)\n\t\t}\n\t\terrors.Add(GenerateKey(v.Name), v.Message)\n\t} else {\n\t\tif parsedUrl.Scheme != \"\" && parsedUrl.Scheme != \"http\" && parsedUrl.Scheme != \"https\" {\n\t\t\tif v.Message == \"\" {\n\t\t\t\tv.Message = fmt.Sprintf(\"%s invalid url scheme\", v.Name)\n\t\t\t}\n\t\t\terrors.Add(GenerateKey(v.Name), v.Message)\n\t\t}\n\t}\n}", "func checkURL(u string) bool {\n\tcheck, _ := regexp.MatchString(\"^(http://skypolaris.org/wp-content/uploads/IGS%20Files/)(.*?)(%20)(.*?)(.igc)$\", u)\n\tif check == true {\n\t\treturn true\n\t}\n\treturn false\n}", "func (ie *CommonIE) ValidURL() string {\n\treturn ie.VALIDURL\n}", "func (p *para) checkUrl(s string) error {\n\tr := regexp.MustCompile(`google\\.com\\/(\\w.+)\\/d\\/(\\w.+)\\/`)\n\tif r.MatchString(s) {\n\t\tres := r.FindAllStringSubmatch(s, -1)\n\t\tp.Kind = res[0][1]\n\t\tp.Id = res[0][2]\n\t\tif p.Kind == \"file\" {\n\t\t\tp.Url = anyurl + \"&id=\" + p.Id\n\t\t} else {\n\t\t\tif p.Kind == \"presentation\" {\n\t\t\t\tp.Url = docutl + p.Kind + \"/d/\" + p.Id + \"/export/\" + p.Ext\n\t\t\t} else {\n\t\t\t\tp.Url = docutl + p.Kind + \"/d/\" + p.Id + \"/export?format=\" + p.Ext\n\t\t\t}\n\t\t}\n\t} else {\n\t\treturn errors.New(\"Error: URL is wrong.\")\n\t}\n\treturn nil\n}", "func ParseAndValidateURIs(uriList string) (res []*url.URL, err error) {\n\tif len(uriList) > 0 {\n\t\turis := strings.Split(uriList, \",\")\n\t\tfor _, uri := range uris {\n\t\t\tparsedURI, err := url.Parse(uri)\n\t\t\tif err != nil {\n\t\t\t\tparsedURI = nil\n\t\t\t}\n\t\t\tif parsedURI == nil {\n\t\t\t\treturn nil, fmt.Errorf(\"Invalid URI: %s\", uri)\n\t\t\t}\n\t\t\tif !parsedURI.IsAbs() {\n\t\t\t\treturn nil, fmt.Errorf(\"Invalid URI: %s\", uri)\n\t\t\t}\n\t\t\tres = append(res, parsedURI)\n\t\t}\n\t}\n\treturn\n}", "func CheckURL(config *parsers.WebsiteConfig) error {\n\tjar, err := cookiejar.New(nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tclient := &http.Client{\n\t\tTimeout: 30 * time.Second,\n\t\tJar: jar,\n\t}\n\n\trequest, err := http.NewRequest(\"GET\", config.URL, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\trequest.Header.Set(\"User-Agent\", \"Mozilla/5.0 (X11; Linux x86_64; rv:83.0) Gecko/20100101 Firefox/83.0\")\n\trequest.Header.Set(\"Accept\", \"image/webp,*/*\")\n\t//request.Header.Set(\"Accept-Encoding\", \"gzip, deflate, br\")\n\trequest.Header.Set(\"Accept-Language\", \"fr,fr-FR;q=0.8,en-US;q=0.5,en;q=0.3\")\n\n\tresponse, err := client.Do(request)\n\tif response.StatusCode == 403 {\n\t\treturn errors.New(fmt.Sprintf(\"Impossible d'accéder à la page %s\", config.URL))\n\t}\n\n\tfor _, assertion := range config.Assertions {\n\t\tif assertion.Selector != \"\" {\n\t\t\tdoc, err := goquery.NewDocumentFromReader(response.Body)\n\t\t\tdefer response.Body.Close()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tselectorText := doc.Find(assertion.Selector).First().Text()\n\t\t\tfor _, text := range assertion.Contains {\n\t\t\t\tif !strings.Contains(selectorText, text) {\n\t\t\t\t\treturn errors.New(\"Le selecteur ne contient pas le texte recherché\")\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif assertion.Status != 0 {\n\t\t\tif assertion.Status != response.StatusCode {\n\t\t\t\treturn errors.New(fmt.Sprintf(\"Mauvais code HTTP %d (attendu : %d)\", response.StatusCode, assertion.Status))\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func (check *Check) Url(url string) *Check {\n\tcheck.url = &url\n\treturn check\n}", "func ValidateURL(fl validator.FieldLevel) bool {\n\tstr := fl.Field().String()\n\tu, err := url.Parse(str)\n\tif err != nil {\n\t\treturn false\n\t}\n\tif strings.HasPrefix(u.Host, \".\") {\n\t\treturn false\n\t}\n\tif u.Host == \"\" && (u.Path != \"\" && !strings.Contains(u.Path, \".\")) {\n\t\treturn false\n\t}\n\treturn urlRegex.MatchString(str)\n}", "func validateOtherExternalServiceConnection(c *schema.OtherExternalServiceConnection) error {\n\tparseRepo := url.Parse\n\tif c.Url != \"\" {\n\t\t// We ignore the error because this already validated by JSON Schema.\n\t\tbaseURL, _ := url.Parse(c.Url)\n\t\tparseRepo = baseURL.Parse\n\t}\n\n\tfor i, repo := range c.Repos {\n\t\tcloneURL, err := parseRepo(repo)\n\t\tif err != nil {\n\t\t\treturn errors.Errorf(`repos.%d: %s`, i, err)\n\t\t}\n\n\t\tswitch cloneURL.Scheme {\n\t\tcase \"git\", \"http\", \"https\", \"ssh\":\n\t\t\tcontinue\n\t\tdefault:\n\t\t\treturn errors.Errorf(\"repos.%d: scheme %q not one of git, http, https or ssh\", i, cloneURL.Scheme)\n\t\t}\n\t}\n\n\treturn nil\n}", "func validateFSConnectionParams(p *dataConnParam) error {\n\tif len(p.connectionParams) < 1 {\n\t\treturn errors.New(\"the url path must be in the form https://host/fs/project/collection/doc/collection/doc\")\n\t}\n\treturn nil\n}", "func (me TdtypeType) IsUrl() bool { return me.String() == \"url\" }", "func BlacklistURL(urls ...string) {\n\tfor _, u := range urls {\n\t\turlBlacklist[u] = struct{}{}\n\t}\n}", "func URL(str string) bool {\n\tif str == \"\" || len(str) >= 2083 || len(str) <= 3 || strings.HasPrefix(str, \".\") {\n\t\treturn false\n\t}\n\tu, err := url.Parse(str)\n\tif err != nil {\n\t\treturn false\n\t}\n\tif strings.HasPrefix(u.Host, \".\") {\n\t\treturn false\n\t}\n\tif u.Host == \"\" && (u.Path != \"\" && !strings.Contains(u.Path, \".\")) {\n\t\treturn false\n\t}\n\treturn rxURL.MatchString(str)\n\n}", "func (p *Page) MustSetBlockedURLs(urls ...string) *Page {\n\tp.e(p.SetBlockedURLs(urls))\n\treturn p\n}", "func AssertBasicURLEscaperExceptPercent(t *testing.T, e escape.Escaper) {\n\t// All URL escapers should leave 0-9, A-Z, a-z unescaped\n\tAssertUnescapedByte(t, e, 'a')\n\tAssertUnescapedByte(t, e, 'z')\n\tAssertUnescapedByte(t, e, 'A')\n\tAssertUnescapedByte(t, e, 'Z')\n\tAssertUnescapedByte(t, e, '0')\n\tAssertUnescapedByte(t, e, '9')\n\n\t// Unreserved characters\n\tAssertUnescapedByte(t, e, '-')\n\tAssertUnescapedByte(t, e, '_')\n\tAssertUnescapedByte(t, e, '.')\n\tAssertUnescapedByte(t, e, '*')\n\n\tAssertEscapingByte(t, e, \"%00\", '\\u0000') // nul\n\tAssertEscapingByte(t, e, \"%7F\", '\\u007f') // del\n\tAssertEscapingByte(t, e, \"%C2%80\", '\\u0080') // xx-00010,x-000000\n\tAssertEscapingRune(t, e, \"%DF%BF\", '\\u07ff') // xx-11111,x-111111\n\tAssertEscapingRune(t, e, \"%E0%A0%80\", '\\u0800') // xxx-0000,x-100000,x-00,0000\n\tAssertEscapingRune(t, e, \"%EF%BF%BF\", '\\uffff') // xxx-1111,x-111111,x-11,1111\n\tAssertEscapingUnicode(t, e, \"%F0%90%80%80\", MinHighSurrogate, MinLowSurrogate)\n\tAssertEscapingUnicode(t, e, \"%F4%8F%BF%BF\", MaxHighSurrogate, MaxLowSurrogate)\n\n\trequire.Equal(t, \"\", e.Escape(\"\"))\n\trequire.Equal(t, \"safestring\", e.Escape(\"safestring\"))\n\trequire.Equal(t, \"embedded%00null\", e.Escape(\"embedded\\u0000null\"))\n\trequire.Equal(t, \"max%EF%BF%BFchar\", e.Escape(\"max\\uffffchar\"))\n}", "func isUrl(u string) bool {\n\tresult, err := url.Parse(u)\n\tif err != nil {\n\t\treturn false\n\t}\n\treturn result.Scheme == \"http\" || result.Scheme == \"https\"\n}", "func isURL(fl FieldLevel) bool {\n\tfield := fl.Field()\n\n\tswitch field.Kind() {\n\tcase reflect.String:\n\n\t\ts := field.String()\n\n\t\tif len(s) == 0 {\n\t\t\treturn false\n\t\t}\n\n\t\turl, err := url.Parse(s)\n\t\tif err != nil || url.Scheme == \"\" {\n\t\t\treturn false\n\t\t}\n\n\t\tif url.Host == \"\" && url.Fragment == \"\" && url.Opaque == \"\" {\n\t\t\treturn false\n\t\t}\n\n\t\treturn true\n\t}\n\n\tpanic(fmt.Sprintf(\"Bad field type %T\", field.Interface()))\n}", "func checkArguments(url, dir string) {\n\tif !httputil.IsURL(url) {\n\t\tprintErrorAndExit(\"Url %s doesn't look like valid url\", url)\n\t}\n\n\tif !fsutil.IsExist(dir) {\n\t\tprintErrorAndExit(\"Directory %s does not exist\", dir)\n\t}\n\n\tif !fsutil.IsDir(dir) {\n\t\tprintErrorAndExit(\"Target %s is not a directory\", dir)\n\t}\n\n\tif !fsutil.IsReadable(dir) {\n\t\tprintErrorAndExit(\"Directory %s is not readable\", dir)\n\t}\n\n\tif !fsutil.IsExecutable(dir) {\n\t\tprintErrorAndExit(\"Directory %s is not executable\", dir)\n\t}\n}", "func MustParseURL(rawURL string) *url.URL {\n\turl, err := url.Parse(rawURL)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn url\n}", "func isUrl(str string) bool {\n\tu, err := url.Parse(str)\n\treturn err == nil && u.Scheme != \"\" && u.Host != \"\"\n}", "func validHTTP(req *http.Request, resp *http.Response) error {\r\n\tswitch {\r\n\tcase resp.StatusCode >= 400:\r\n\t\treturn errNotFound\r\n\tcase resp.StatusCode >= 300 && resp.StatusCode != http.StatusNotModified:\r\n\t\tl := resp.Header.Get(\"Location\")\r\n\t\tu, err := url.Parse(l)\r\n\t\tif err != nil {\r\n\t\t\treturn err\r\n\t\t}\r\n\t\tif u.Host == \"alert.scansafe.net\" {\r\n\t\t\treturn errFiltered\r\n\t\t}\r\n\t}\r\n\treturn nil\r\n}", "func MustParseURL(target string) *url.URL {\n\tu, err := url.Parse(target)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Error parsing target(%s): %v\", target, err))\n\t}\n\treturn u\n}", "func validateFileOrURL(v string) error {\n\tvalGen := pflagValueFuncMap[fileFlag]\n\tif valGen().Set(v) == nil {\n\t\treturn nil\n\t}\n\tvalGen = pflagValueFuncMap[urlFlag]\n\treturn valGen().Set(v)\n}", "func ValidateURL(c *Config) error {\n\t_, err := url.Parse(c.MetricsPath)\n\treturn err\n}", "func validateEndPoint(endPointPointer *string) error {\n\tendPoint := *endPointPointer\n\n\tif endPoint == \"\" {\n\t\tendPoint = defaultEndPointURI\n\t}\n\n\tmatch, err := regexp.MatchString(`^\\w+://`, endPoint)\n\tif err != nil {\n\t\treturn util.NewPersistentError(\"InvalidEndpoint\",\n\t\t\tfmt.Sprintf(\"Invalid endpoint url %q: %v\", endPoint, err))\n\t}\n\tif !match {\n\t\tendPoint = \"https://\" + endPoint\n\t}\n\tu, err := url.Parse(endPoint)\n\tif err != nil {\n\t\treturn util.NewPersistentError(\"InvalidEndpoint\",\n\t\t\tfmt.Sprintf(\"Invalid endpoint url %q: %v\", endPoint, err))\n\t}\n\tif u.Scheme == \"\" {\n\t\tu.Scheme = \"https\"\n\t}\n\n\t*endPointPointer = u.String()\n\n\treturn nil\n}", "func MustParseEndpointURL(url string, options ...Option) Endpoint {\n\td, err := ParseEndpointURL(url, options...)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn d\n}", "func ValidateNFailRequest(page *rod.Page, e *proto.FetchRequestPaused) error {\n\treqURL := e.Request.URL\n\tnormalized := strings.ToLower(reqURL) // normalize url to lowercase\n\tnormalized = strings.TrimSpace(normalized) // trim leading & trailing whitespaces\n\tif !allowLocalFileAccess && stringsutil.HasPrefixI(normalized, \"file:\") {\n\t\treturn multierr.Combine(FailWithReason(page, e), ErrURLDenied.Msgf(reqURL, \"use of file:// protocol disabled use '-lfa' to enable\"))\n\t}\n\t// validate potential invalid schemes\n\t// javascript protocol is allowed for xss fuzzing\n\tif HasPrefixAnyI(normalized, \"ftp:\", \"externalfile:\", \"chrome:\", \"chrome-extension:\") {\n\t\treturn multierr.Combine(FailWithReason(page, e), ErrURLDenied.Msgf(reqURL, \"protocol blocked by network policy\"))\n\t}\n\tif !isValidHost(reqURL) {\n\t\treturn multierr.Combine(FailWithReason(page, e), ErrURLDenied.Msgf(reqURL, \"address blocked by network policy\"))\n\t}\n\treturn nil\n}", "func FilterUrl(inUrl *url.URL) bool {\n\t// Check if allowed scheme\n\tallowedSchemes := []string{\"http\", \"https\"}\n\n\tif !util.Conatins(allowedSchemes, inUrl.Scheme) {\n\t\tlog.Printf(\"Filtering scheme %v\\n\", inUrl.Scheme)\n\t\treturn true\n\t}\n\n\t// Host / Domain checks\n\n\thost, _ := publicsuffix.EffectiveTLDPlusOne(inUrl.Host)\n\n\t// Check for no domain\n\tif host == \"\" {\n\t\tlog.Println(\"Filtering empty host\")\n\t\treturn true\n\t}\n\n\t// Check for allowed domains\n\t/*\n\t\t_, icann := publicsuffix.PublicSuffix(host)\n\t\tif !icann {\n\t\t\tlog.Println(\"filtering bad host\")\n\t\t\treturn true\n\t\t}\n\t*/\n\n\t// Check port\n\t_, port, err := net.SplitHostPort(inUrl.Host)\n\tif err != nil {\n\t\t//log.Println(err)\n\t} else if port != \"80\" || port != \"443\" {\n\t\tlog.Println(\"Filtering port \", port)\n\t\treturn true\n\t}\n\n\treturn false\n}", "func isURL(str string) bool {\n\tu, err := url.Parse(str)\n\treturn err == nil && u.Scheme != \"\" && u.Host != \"\"\n}", "func TestParseURIWithValidURL(t *testing.T) {\n\tconst (\n\t\thttpPort = \"8086\"\n\t\thttpsPort = \"433\"\n\t\thttpScheme = \"http\"\n\t\thttpsScheme = \"https\"\n\t\thostname = \"accounts.couchbase.com\"\n\t)\n\thttpURL := fmt.Sprintf(\"%s://%s:%s\", httpScheme, hostname, httpPort)\n\thttpsURL := fmt.Sprintf(\"%s://%s:%s\", httpsScheme, hostname, httpsPort)\n\thttpHost := fmt.Sprintf(\"%s:%s\", hostname, httpPort)\n\thttpsHost := fmt.Sprintf(\"%s:%s\", hostname, httpsPort)\n\n\tconfig := &OidcProviderConfiguration{}\n\n\turl, err := config.parseURI(httpURL)\n\tassert.Nil(t, err)\n\tassert.Equal(t, httpURL, url.String())\n\tassert.Equal(t, httpScheme, url.Scheme)\n\tassert.Equal(t, httpHost, url.Host)\n\tassert.Equal(t, hostname, url.Hostname())\n\tassert.Equal(t, httpPort, url.Port())\n\n\turl, err = config.parseURI(httpsURL)\n\tassert.Nil(t, err)\n\tassert.Equal(t, httpsURL, url.String())\n\tassert.Equal(t, httpsScheme, url.Scheme)\n\tassert.Equal(t, httpsHost, url.Host)\n\tassert.Equal(t, hostname, url.Hostname())\n\tassert.Equal(t, httpsPort, url.Port())\n\n\t// Blank or empty URL\n\turl, err = config.parseURI(\"\")\n\tassert.Nil(t, err)\n\tassert.Nil(t, url)\n}", "func IsValidURL(obj string) bool {\n\treturn urlPtn.MatchString(obj)\n}", "func TestInvalidUrl(t *testing.T) {\n\n\tassert := assert.New(t)\n\n\tw := webhook.W{\n\t\tUntil: time.Now().Add(60 * time.Second),\n\t\tEvents: []string{\"iot\"},\n\t}\n\tw.Config.URL = \"invalid\"\n\tw.Config.ContentType = \"application/json\"\n\n\tobs, err := OutboundSenderFactory{\n\t\tListener: w,\n\t\tClient: &http.Client{},\n\t\tNumWorkers: 10,\n\t\tQueueSize: 10,\n\t\tProfilerFactory: testServerProfilerFactory,\n\t\tLogger: getLogger(),\n\t}.New()\n\tassert.Nil(obs)\n\tassert.NotNil(err)\n\n\tw2 := webhook.W{\n\t\tUntil: time.Now().Add(60 * time.Second),\n\t\tEvents: []string{\"iot\"},\n\t}\n\tw2.Config.ContentType = \"application/json\"\n\n\tobs, err = OutboundSenderFactory{\n\t\tListener: w2,\n\t\tClient: &http.Client{},\n\t\tNumWorkers: 10,\n\t\tQueueSize: 10,\n\t\tProfilerFactory: testServerProfilerFactory,\n\t\tLogger: getLogger(),\n\t}.New()\n\tassert.Nil(obs)\n\tassert.NotNil(err)\n\n}", "func IsValidUrl(str string) bool {\n\tu, err := url.Parse(str)\n\treturn err == nil && u.Scheme != \"\" && u.Host != \"\"\n}", "func isValidHost(targetUrl string) bool {\n\tif !stringsutil.HasPrefixAny(targetUrl, \"http:\", \"https:\") {\n\t\treturn true\n\t}\n\tif networkPolicy == nil {\n\t\treturn true\n\t}\n\turlx, err := urlutil.Parse(targetUrl)\n\tif err != nil {\n\t\t// not a valid url\n\t\treturn false\n\t}\n\ttargetUrl = urlx.Hostname()\n\t_, ok := networkPolicy.ValidateHost(targetUrl)\n\treturn ok\n}", "func RejectNonHTTPS() gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\tlg.Info(\"[RejectNonHTTPS]\")\n\n\t\t//TODO:check protocol of url\n\t\t//if strings.Index(c.url, \"https://\") == -1 {\n\t\t//\tc.AbortWithStatus(403)\n\t\t//}\n\t\tc.Next()\n\t}\n}", "func IsURL(str string) bool {\n\tif str == \"\" || utf8.RuneCountInString(str) >= maxURLRuneCount || len(str) <= minURLRuneCount || strings.HasPrefix(str, \".\") {\n\t\treturn false\n\t}\n\tstrTemp := str\n\tif strings.Contains(str, \":\") && !strings.Contains(str, \"://\") {\n\t\t// support no indicated urlscheme but with colon for port number\n\t\t// http:// is appended so url.Parse will succeed, strTemp used so it does not impact rxURL.MatchString\n\t\tstrTemp = \"http://\" + str\n\t}\n\tu, err := url.Parse(strTemp)\n\tif err != nil {\n\t\treturn false\n\t}\n\tif strings.HasPrefix(u.Host, \".\") {\n\t\treturn false\n\t}\n\tif u.Host == \"\" && (u.Path != \"\" && !strings.Contains(u.Path, \".\")) {\n\t\treturn false\n\t}\n\treturn rxURL.MatchString(str)\n}", "func IsURL(str string) bool {\n\tif str == \"\" || utf8.RuneCountInString(str) >= maxURLRuneCount || len(str) <= minURLRuneCount || strings.HasPrefix(str, \".\") {\n\t\treturn false\n\t}\n\tstrTemp := str\n\tif strings.Contains(str, \":\") && !strings.Contains(str, \"://\") {\n\t\t// support no indicated urlscheme but with colon for port number\n\t\t// http:// is appended so url.Parse will succeed, strTemp used so it does not impact rxURL.MatchString\n\t\tstrTemp = \"http://\" + str\n\t}\n\tu, err := url.Parse(strTemp)\n\tif err != nil {\n\t\treturn false\n\t}\n\tif strings.HasPrefix(u.Host, \".\") {\n\t\treturn false\n\t}\n\tif u.Host == \"\" && (u.Path != \"\" && !strings.Contains(u.Path, \".\")) {\n\t\treturn false\n\t}\n\treturn rxURL.MatchString(str)\n}", "func MustParseURL(s string) *URL {\n\tu, err := ParseURL(s)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn u\n}", "func mustPU(rawurl string) *url.URL {\n\tif u, err := url.Parse(rawurl); err != nil {\n\t\tpanic(err)\n\t} else {\n\t\treturn u\n\t}\n}", "func TestValidateURL(t *testing.T) {\n\n\tplataforms := [4]string{\n\t\t\"facebook\",\n\t\t\"twitter\",\n\t\t\"youtube\",\n\t\t\"instagram\",\n\t}\n\n\tfor _, plataform := range plataforms {\n\n\t\tURL := \"www.\" + plataform + \".com/teste\"\n\n\t\tresult := validateURL(URL)\n\n\t\tif result != plataform {\n\t\t\tt.Errorf(\"Plataform was incorrect, got: %s, want: %s.\", result, plataform)\n\t\t}\n\t}\n}", "func TestSimpleParser_baseUrl_err(t *testing.T) {\n\tif baseUrl(\"1234\") != \"1234\" {\n\t\tt.Logf(\"baseUrl method is expected to return original link on error\")\n\t\tt.FailNow()\n\t}\n}", "func (p PathParser) validatePath(value string) error {\n\tif p.ValidSchemes != nil {\n\t\tvar u, err = url.Parse(value)\n\t\tif err != nil {\n\t\t\tlogrus.WithError(err).WithField(\"url\", value).Error(\"PathParser: invalid URL\")\n\t\t\treturn err\n\t\t}\n\n\t\tif u.RawQuery != \"\" {\n\t\t\tlogrus.WithField(\"url\", value).Error(\"URL can't have a query string\")\n\t\t\treturn errors.New(\"URL query string unsupported\")\n\t\t}\n\t\tif u.Fragment != \"\" {\n\t\t\tlogrus.WithField(\"url\", value).Error(\"URL can't have a fragment\")\n\t\t\treturn errors.New(\"URL fragment unsupported\")\n\t\t}\n\n\t\tvar scheme = strings.ToLower(u.Scheme)\n\t\tif !p.ValidSchemes[scheme] {\n\t\t\tlogrus.WithField(\"url\", value).Error(\"URL scheme not supported\")\n\t\t\treturn errors.New(\"unsupported URL scheme: \" + u.Scheme)\n\t\t}\n\n\t\tswitch scheme {\n\t\tcase \"http\", \"https\":\n\t\t\tif u.Path != \"\" || u.RawPath != \"\" || u.Opaque != \"\" {\n\t\t\t\tif u.Path != \"/\" {\n\t\t\t\t\tlogrus.WithField(\"url\", value).Errorf(\"%s URLs should only have a hostPort, but no path\", strings.ToUpper(scheme))\n\t\t\t\t\treturn errors.New(\"PathParser: got path in HTTP(s) URL\")\n\t\t\t\t}\n\t\t\t}\n\t\tcase \"\", \"file\", \"unix\":\n\t\t\tif u.Host != \"\" {\n\t\t\t\tlogrus.WithField(\"url\", value).Error(\"file paths can't have a hostname portion\")\n\t\t\t\tlogrus.Info(\" you may have used something like file://etc/motd instead of file:/etc/motd (or file:///etc/motd)\")\n\t\t\t\treturn errors.New(\"PathParser: got host in file URL\")\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t}\n\n\t// simple path: assume it's valid\n\treturn nil\n}", "func URLNotIn(vs ...string) predicate.Token {\n\tv := make([]interface{}, len(vs))\n\tfor i := range v {\n\t\tv[i] = vs[i]\n\t}\n\treturn predicate.Token(func(s *sql.Selector) {\n\t\t// if not arguments were provided, append the FALSE constants,\n\t\t// since we can't apply \"IN ()\". This will make this predicate falsy.\n\t\tif len(vs) == 0 {\n\t\t\ts.Where(sql.False())\n\t\t\treturn\n\t\t}\n\t\ts.Where(sql.NotIn(s.C(FieldURL), v...))\n\t},\n\t)\n}", "func IsValidURL(address string) bool {\n\tif IsEmptyStr(address) {\n\t\treturn false\n\t}\n\n\treturn govalidator.IsURL(address)\n}", "func ValidateURI(id *url.URL, mode ValidationMode) error {\n\toptions := mode.validationOptions()\n\n\tvalidationError := func(format string, args ...interface{}) error {\n\t\tvar kind string\n\t\tswitch options.idType {\n\t\tcase trustDomainId:\n\t\t\tkind = \"trust domain \"\n\t\tcase workloadId:\n\t\t\tkind = \"workload \"\n\t\t}\n\t\tvar idStr string\n\t\tif id != nil {\n\t\t\tidStr = id.String()\n\t\t}\n\t\treturn fmt.Errorf(\"invalid %sSPIFFE ID %q: \"+format,\n\t\t\tappend([]interface{}{kind, idStr}, args...)...)\n\t}\n\n\tif id == nil || *id == (url.URL{}) {\n\t\treturn validationError(\"SPIFFE ID is empty\")\n\t}\n\n\t// General validation\n\tswitch {\n\tcase strings.ToLower(id.Scheme) != \"spiffe\":\n\t\treturn validationError(\"invalid scheme\")\n\tcase id.User != nil:\n\t\treturn validationError(\"user info is not allowed\")\n\tcase id.Host == \"\":\n\t\treturn validationError(\"trust domain is empty\")\n\tcase id.Port() != \"\":\n\t\treturn validationError(\"port is not allowed\")\n\tcase id.Fragment != \"\":\n\t\treturn validationError(\"fragment is not allowed\")\n\tcase id.RawQuery != \"\":\n\t\treturn validationError(\"query is not allowed\")\n\t}\n\n\t// trust domain validation\n\tif options.trustDomainRequired {\n\t\tif options.trustDomain == \"\" {\n\t\t\treturn errors.New(\"trust domain to validate against cannot be empty\")\n\t\t}\n\t\tif id.Host != options.trustDomain {\n\t\t\treturn fmt.Errorf(\"%q does not belong to trust domain %q\", id, options.trustDomain)\n\t\t}\n\t}\n\n\t// id type validation\n\tswitch options.idType {\n\tcase anyId:\n\tcase trustDomainId:\n\t\tif id.Path != \"\" {\n\t\t\treturn validationError(\"path is not empty\")\n\t\t}\n\tcase workloadId:\n\t\tif id.Path == \"\" {\n\t\t\treturn validationError(\"path is empty\")\n\t\t}\n\tdefault:\n\t\treturn validationError(\"internal error: unhandled id type %v\", options.idType)\n\t}\n\n\treturn nil\n}", "func ValidateRemote(remote *Remote) bool {\n\turl, err := url.Parse(remote.Urlbase)\n\tif err != nil {\n\t\tlog.Printf(\"%s is not a URL!\\n\", remote.Urlbase)\n\t\treturn false\n\t} else if !url.IsAbs() {\n\t\tlog.Printf(\"%s is not an absolute URL!\\n\", remote.Urlbase)\n\t\treturn false\n\t}\n\tswitch url.Scheme {\n\tcase \"git\":\n\t\tfallthrough\n\tcase \"http\":\n\t\tfallthrough\n\tcase \"https\":\n\t\tif url.User != nil {\n\t\t\tlog.Printf(\"Please don't embed userinfo in your http(s) or git URL!\\n\")\n\t\t\tlog.Printf(\"Instead, modify your .netrc to include it for %s\\n\", url.Host)\n\t\t\tlog.Printf(\"Example:\\n\")\n\t\t\tlog.Printf(\" machine %s login <username> password <password>\\n\", url.Host)\n\t\t\treturn false\n\t\t}\n\tcase \"ssh\":\n\t\tif url.User == nil {\n\t\t\tlog.Printf(\"%s does not include an embedded username!\", remote.Urlbase)\n\t\t\treturn false\n\t\t}\n\tdefault:\n\t\tlog.Printf(\"URL scheme %s is not supported by the dev tool for now.\", url.Scheme)\n\t\treturn false\n\t}\n\tif remote.Name == \"\" {\n\t\tremote.Name = filepath.Base(url.Path)\n\t}\n\tif !validRemoteName(remote.Name) {\n\t\treturn false\n\t}\n\tif remote.Priority < 1 || remote.Priority > 100 {\n\t\tlog.Printf(\"Priority must be a number between 1 and 100 (currently %d)!\\n\", remote.Priority)\n\t\treturn false\n\t}\n\treturn true\n}", "func AssertBasicURLEscaper(t *testing.T, e escape.Escaper) {\n\tAssertBasicURLEscaperExceptPercent(t, e)\n\t// The escape character must always be escaped\n\tAssertEscapingByte(t, e, \"%25\", '%')\n}", "func notSupported(w http.ResponseWriter, r *http.Request, body []byte, creds auth.Creds, vars map[string]string, req_id string) {\n\tglog.Warningf(\"Docker pattern not accepted, URI=%s\", r.RequestURI)\n\tNoEndpointHandler(w, r)\n}", "func validateUrl(urlString string) (string, error) {\n\t// oauthKey = \"TABzAdrAofgB9Vw7NIffXgSl\"\n\t// client, err := buildOAuthHTTPClient(oauthKey)\n\t// if err != nil {\n\t// \treturn\n\t// }\n\t// service, err := youtube.New(client)\n\t// if err != nil {\n\t// \treturn\n\t// }\n\turl, err := url.Parse(urlString)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Invalid url: Cannot parse\")\n\t}\n\tq := url.Query()\n\thost := url.Host\n\tif !strings.Contains(host, \"youtube.com\") {\n\t\treturn \"\", fmt.Errorf(\"Invalid url: Host not youtube\")\n\t}\n\tid := q.Get(\"v\")\n\t// call := service.VideoListCall.Id(id)\n\t// results, err := call.Do()\n\t// if err != nil {\n\t// \tlog.Printf(\"swag\")\n\t// \treturn\n\t// }\n\treturn \"//\" + host + \"/embed/\" + id, nil\n}", "func checkUrl(writer http.ResponseWriter, request *http.Request) {\n\tvars := mux.Vars(request)\n\thostname := vars[\"hostname\"]\n\tquerypath := vars[\"querypath\"]\n\n\tresponse := APIResponse{}\n\terr := utils.ValidateUrl(hostname)\n\tif err != nil {\n\t\tresponse.BadRequest(err)\n\t\thttp_respond(response, writer)\n\t\treturn\n\t}\n\n\tdecodedPath, err := utils.URLDecode(querypath)\n\tif err != nil {\n\t\tresponse.BadRequest(err)\n\t\thttp_respond(response, writer)\n\t\treturn\n\t}\n\n\t// Generate URL service for querying the URL\n\turlService, err := services.NewUrlService(hostname, decodedPath, config.DBType, config.CacheType)\n\n\tif err != nil {\n\t\tutils.LogError(utils.LogFields{\"hostname\": hostname, \"path\": decodedPath}, err, \"Error getting URL\")\n\t\tresponse.InternalError(errors.New(\"An error occurred\"))\n\t\thttp_respond(response, writer)\n\t\treturn\n\t}\n\n\turlStatus, err := urlService.FindUrl()\n\tif err != nil {\n\t\tutils.LogError(utils.LogFields{\"hostname\": hostname, \"path\": decodedPath}, err, \"Error getting URL\")\n\t\tresponse.InternalError(errors.New(\"An error occurred\"))\n\t} else {\n\t\tresponse.Success(urlStatus)\n\t}\n\n\thttp_respond(response, writer)\n}", "func IsValidUrl(toTest string) bool {\n\t_, err := url.ParseRequestURI(toTest)\n\tif err != nil {\n\t\treturn false\n\t}\n\n\tu, err := url.Parse(toTest)\n\tif err != nil || u.Scheme == \"\" || u.Host == \"\" {\n\t\treturn false\n\t}\n\n\treturn true\n}", "func IsValidUrl(toTest string) bool {\n\t_, err := url.ParseRequestURI(toTest)\n\tif err != nil {\n\t\treturn false\n\t}\n\n\tu, err := url.Parse(toTest)\n\tif err != nil || u.Scheme == \"\" || u.Host == \"\" {\n\t\treturn false\n\t}\n\n\treturn true\n}", "func (s *WebMethodStep) Validate() []error {\n\terrs := []error{}\n\tif _, ok := validMethods[s.Method]; !ok {\n\t\terrs = append(errs, fmt.Errorf(\"%s is not a valid method\", s.Method))\n\t}\n\n\tif u, err := url.Parse(s.URL); err != nil {\n\t\terrs = append(errs, fmt.Errorf(\"%s is not a valid URL: %s\", s.URL, err.Error()))\n\t} else if !u.IsAbs() {\n\t\terrs = append(errs, fmt.Errorf(\"%s is not an absolute URL\", s.URL))\n\t}\n\treturn errs\n}", "func (p Page) validateUrl(url *Url) error {\n\t// valid if link has querystring\n\tif len(url.Query()) > 0 {\n\t\treturn errQueryLink\n\t}\n\n\t// additional host validation\n\tif url.Host != p.Url.Host {\n\t\treturn errExternalLink\n\t}\n\n\t// remove duplicate http & https\n\tif url.Scheme != p.Url.Scheme {\n\t\treturn errInvalidScheme\n\t}\n\n\t// check if parent page already have this sub page\n\tcontain := p.inPage(url.String())\n\tif contain {\n\t\treturn errAlreadyInParent\n\t}\n\treturn nil\n}", "func (m *FileUploadURLInfo) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func isValidHTTPS(urlStr string) bool {\n\tu, err := url.ParseRequestURI(urlStr)\n\tif err != nil {\n\t\treturn false\n\t}\n\t// If a valid url is in form without slashes after scheme consider it invalid.\n\t// If a valid url doesn't have https as a scheme consider it invalid\n\tif u.Host == \"\" || u.Scheme != \"https\" {\n\t\treturn false\n\t}\n\n\treturn true\n}", "func validateReferer(r *http.Request) bool {\n\turlPath := path.Clean(r.URL.Path)\n\tif strings.HasPrefix(urlPath, \"/assets/\") {\n\t\treturn true\n\t}\n\tif r.Method == http.MethodGet {\n\t\tallowedGets := []string{\"/general\", \"/rules\", \"/tls\", \"/\", \"/backlog\", \"/edit_task\",\n\t\t\t\"/add_task\", \"/login\"}\n\t\tfor _, path := range allowedGets {\n\t\t\tif path == urlPath {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\t// Get the Referer\n\treferer := r.Referer()\n\tu, err := url.Parse(referer)\n\tif err != nil {\n\t\treturn false\n\t}\n\treturn u.Host == originalHost(r)\n}", "func SupportedURL(repoURL string) bool {\n\treturn strings.HasSuffix(repoURL, \".git\") ||\n\t\tstrings.HasPrefix(repoURL, \"http\")\n}", "func (e ExternalID) MustURL() string {\n\tret, err := e.URL()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn ret\n}", "func verifyURL(myUrl string) string {\n\tu, _ := url.Parse(myUrl)\n\n\tif u.Scheme != \"\" {\n\t\treturn myUrl\n\t}\n\treturn \"http://\" + myUrl\n}", "func configValid(config UrlConfig) bool {\n\t// Get list of network interfaces\n\tvar ifaceNames map[string]bool\n\tinterfaces, err := net.Interfaces()\n\tif err != nil {\n\t\tlog.Printf(\"Could not get interfaces list: %s\", err)\n\t\treturn false\n\t}\n\tifaceNames = make(map[string]bool, len(interfaces))\n\tfor _, iface := range interfaces {\n\t\tifaceNames[iface.Name] = true\n\t\t// Find out biggest MTU\n\t\tif iface.MTU > MaxMTU {\n\t\t\tMaxMTU = iface.MTU\n\t\t}\n\t}\n\tvar validPath = regexp.MustCompile(VALID_PATH)\n\tfor path, url := range config {\n\t\tif !validPath.MatchString(path) {\n\t\t\tlog.Printf(\"Invalid path found: %s\", path)\n\t\t\treturn false\n\t\t}\n\t\turlParts, err := netUrl.Parse(url.Source)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Could not parse source url %s\", url.Source)\n\t\t\treturn false\n\t\t}\n\t\tif urlParts.Scheme == \"udp\" {\n\t\t\t// url.Source was like udp://123.4.5.6:123\n\t\t\thost, port, err := net.SplitHostPort(urlParts.Host)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"Could not parse host:port source pair of path %s: %s\", path, urlParts.Host)\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tipAddr := net.ParseIP(host)\n\t\t\tif ipAddr == nil {\n\t\t\t\tlog.Printf(\"Invalid ip address in source %s: %s\", path, host)\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tdPort, err := strconv.Atoi(port)\n\t\t\tif dPort == 0 || err != nil {\n\t\t\t\tlog.Printf(\"Invalid port in source %s: %s\", path, port)\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tif _, ok := ifaceNames[url.Interface]; !ok {\n\t\t\t\tlog.Printf(\"Interface for source %s not found: %s\", path, url.Interface)\n\t\t\t\treturn false\n\t\t\t}\n\t\t} else if urlParts.Scheme == \"http\" {\n\t\t\t// Anything will work if URL was parsed with http scheme\n\t\t} else {\n\t\t\tlog.Printf(\"Invalid address in source %s: %s\", path, url.Source)\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func ValidateURL(uri, host string) (*url.URL, error) {\n\tvar finalURI *url.URL\n\tvar err error\n\tif strings.Contains(uri, host) {\n\t\tif finalURI, err = url.Parse(uri); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Couldn't parse the url inside href argument: %s\", err.Error())\n\t\t}\n\t\treturn finalURI, nil\n\t}\n\tif string(uri[0]) == \"/\" {\n\t\tif finalURI, err = url.Parse(fmt.Sprintf(\"https://%s%s\", host, uri)); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Couldn't parse the url inside href argument: %s\", err.Error())\n\t\t}\n\t\treturn finalURI, nil\n\t}\n\treturn nil, nil\n}", "func CheckAPI(url string) bool {\n\treturn true\n}", "func InvalidOpSrc(t *testing.T) {\n\tt.Run(\"invalid-endpoint\", testOpSrcWithInvalidEndpoint)\n\tt.Run(\"invalid-url\", testOpSrcWithInvalidURL)\n\tt.Run(\"nonexistent-registry-namespace\", testOpSrcWithNonexistentRegistryNamespace)\n}", "func isHttpURL(fl FieldLevel) bool {\n\tif !isURL(fl) {\n\t\treturn false\n\t}\n\n\tfield := fl.Field()\n\tswitch field.Kind() {\n\tcase reflect.String:\n\n\t\ts := strings.ToLower(field.String())\n\n\t\turl, err := url.Parse(s)\n\t\tif err != nil || url.Host == \"\" {\n\t\t\treturn false\n\t\t}\n\n\t\treturn url.Scheme == \"http\" || url.Scheme == \"https\"\n\t}\n\n\tpanic(fmt.Sprintf(\"Bad field type %T\", field.Interface()))\n}" ]
[ "0.61628383", "0.60171264", "0.5781071", "0.56981534", "0.566405", "0.56014615", "0.5594309", "0.557612", "0.5561288", "0.55422866", "0.5511598", "0.55095285", "0.5507065", "0.5505966", "0.54857326", "0.54577285", "0.54563826", "0.54048747", "0.5387832", "0.5376122", "0.5342915", "0.5334014", "0.53227633", "0.53095585", "0.5296246", "0.5283971", "0.5274121", "0.5262112", "0.52591556", "0.5249164", "0.52384037", "0.52254814", "0.52205193", "0.52131546", "0.52056557", "0.51937157", "0.5191545", "0.51885444", "0.51608557", "0.51586515", "0.5106536", "0.5088233", "0.5087956", "0.50806105", "0.50798213", "0.5072281", "0.5065651", "0.50654495", "0.5051352", "0.5033815", "0.5017831", "0.5017184", "0.50103503", "0.50008696", "0.4995883", "0.49946353", "0.4987563", "0.4983645", "0.49766624", "0.49699634", "0.49525803", "0.4947006", "0.49370328", "0.49316823", "0.49252012", "0.49182034", "0.49176982", "0.49170038", "0.49067366", "0.4901701", "0.489169", "0.489169", "0.4881032", "0.48798624", "0.48764658", "0.48753586", "0.48691028", "0.48640725", "0.48616907", "0.48613515", "0.48429933", "0.48233733", "0.48203683", "0.4820214", "0.48201293", "0.48163342", "0.48163342", "0.48117578", "0.48091677", "0.4809051", "0.48069605", "0.48059875", "0.48059526", "0.47906646", "0.4789213", "0.47881988", "0.47771358", "0.47759563", "0.47697666", "0.4767706" ]
0.65448236
0
Check for failure when the query is too short to be valid.
func TestShortQuery(t *testing.T) { var qerr *queryError doh, _ := NewTransport(testURL, ips, nil, nil, nil) _, err := doh.Query([]byte{}) if err == nil { t.Error("Empty query should fail") } else if !errors.As(err, &qerr) { t.Errorf("Wrong error type: %v", err) } else if qerr.status != BadQuery { t.Errorf("Wrong error status: %d", qerr.status) } _, err = doh.Query([]byte{1}) if err == nil { t.Error("One byte query should fail") } else if !errors.As(err, &qerr) { t.Errorf("Wrong error type: %v", err) } else if qerr.status != BadQuery { t.Errorf("Wrong error status: %d", qerr.status) } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func IsOverQueryLimit(err error) bool {\n\tif e, ok := err.(*apiError); ok {\n\t\treturn e.Status == \"OVER_QUERY_LIMIT\"\n\t}\n\treturn false\n}", "func check_args(parsed_query []string, num_expected int) bool {\n\treturn (len(parsed_query) >= num_expected)\n}", "func (v *verifier) MinLength(length int) *verifier {\n\treturn v.addVerification(\"MinLength\", len(v.Query) >= length)\n}", "func validateTimeout(timeout time.Duration) error {\n\tif timeout < time.Millisecond {\n\t\treturn nosqlerr.NewIllegalArgument(\"Timeout must be greater than or equal to 1 millisecond\")\n\t}\n\n\treturn nil\n}", "func hasValidTopValuesQuery(query interface{}) error {\n\tqueryConverted := query.(map[string]interface{})\n\t// check query limit\n\tif len(queryConverted) > 5 {\n\t\treturn errors.New(\"Top Values Validator: the top values query has a limit of 5 queries by request.\")\n\t}\n\t// check column limit\n\tfor _, value := range queryConverted {\n\t\tif len(value.(map[string]interface{})) > 6 {\n\t\t\treturn errors.New(\"Top Values Validator: the query exceeds the limit of columns per query in request\")\n\t\t}\n\t}\n\treturn nil\n}", "func validateNeighborsQuery(value string) (string, error) {\n\tif len(value) < 3 {\n\t\t// Maybe make configurable,\n\t\t// A length of 3 would be sufficient for \"DFN\" and\n\t\t// other shorthands.\n\t\treturn \"\", ErrQueryTooShort\n\t}\n\treturn value, nil\n}", "func ValidateQuery(query string) (bool, error) {\n\n\t// simple sql pattern\n\tpattern := \"(select|SELECT) ([a-zA-Z]+(,[a-zA-Z]+)*) (from|FROM) [a-zA-Z]+(\\\\.[a-zA-Z]+)* ((limit|LIMIT) [0-9]+)? ((orderby|ORDERBY) (asc|desc|ASC|DESC))?;\"\n\tmatched, err := regexp.MatchString(pattern, query)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn matched, nil\n}", "func hasValidCountQuery(query interface{}) error {\n\tswitch query.(type) {\n\tcase []interface{}:\n\t\tquerySize := len(query.([]interface{}))\n\t\tif querySize > 10 {\n\t\t\treturn errors.New(\"Count Query Validator: the query count entity has a limit of 10 queries by request.\")\n\t\t}\n\t}\n\n\treturn nil\n}", "func validatePrefixQuery(value string) (string, error) {\n\t// We should at least provide 2 chars\n\tif len(value) < 2 {\n\t\treturn \"\", ErrQueryTooShort\n\t}\n\tif !strings.Contains(value, \":\") && !strings.Contains(value, \".\") {\n\t\treturn \"\", ErrQueryIncomplete\n\t}\n\treturn value, nil\n}", "func TooShort(name, in string, min int64, value interface{}) *Validation {\n\tvar msg string\n\tif in == \"\" {\n\t\tmsg = fmt.Sprintf(tooShortMessageNoIn, name, min)\n\t} else {\n\t\tmsg = fmt.Sprintf(tooShortMessage, name, in, min)\n\t}\n\n\treturn &Validation{\n\t\tcode: TooShortFailCode,\n\t\tName: name,\n\t\tIn: in,\n\t\tValue: value,\n\t\tValid: min,\n\t\tmessage: msg,\n\t}\n}", "func (l Limit) Valid() error {\n\t_, err := strconv.Atoi(string(l))\n\tif err != nil {\n\t\tlog.Println(\"error while validating query param: limit\")\n\t\tlog.Printf(\"value: %s, error: %s\", string(l), err.Error())\n\t\treturn errors.New(\"invalid query param: limit\")\n\t}\n\treturn nil\n}", "func hasValidDataExtractionQuery(query interface{}) error {\n\tqueryConverted := query.(map[string]interface{})\n\tif val, ok := queryConverted[\"columns\"]; ok {\n\t\tcolumns := reflect.ValueOf(val)\n\t\tif columns.Len() > 10 {\n\t\t\treturn errors.New(\"Data Extraction Validator: The key 'columns' in data extraction result must have up to 10 columns.\")\n\t\t}\n\t}\n\treturn nil\n}", "func (q *Query) Validate() error {\n\tif q.rang.Start > q.rang.End {\n\t\treturn fmt.Errorf(\"range error: start time is greater than end time\")\n\t}\n\tif ok := tsdb.VerifyChainPathExists(q.dbPath); !ok {\n\t\treturn fmt.Errorf(\"dbpath error: path doesn't exists\")\n\t}\n\n\tif q.typ != TypeFirst && q.typ != TypeRange {\n\t\treturn fmt.Errorf(\"typ error: invalid query type\")\n\t}\n\treturn nil\n}", "func validateShortID(id string) error {\n\tmaxIDLen := 53\n\tif len(id) > maxIDLen {\n\t\treturn fmt.Errorf(IDLengthExceeded, maxIDLen)\n\t}\n\treturn validation.ValidateID(id)\n}", "func (s SQLQuery) Validate() error {\n\tstr := strings.ToLower(string(s))\n\tif !strings.HasPrefix(str, \"select\") {\n\t\treturn ErrNotSQLQuery\n\t}\n\treturn nil\n}", "func isInsufficientSpace(err error) bool {\n\treturn strings.Contains(strings.ToLower(err.Error()), \"insufficient free space\")\n}", "func CheckUnlimitedSize(size int) error {\n\treturn nil\n}", "func CheckLimitNameLen(name string) error {\n\tif utf8.RuneCountInString(name) > 1000 {\n\t\treturn fmt.Errorf(\"limit up to 1000 sign for name of key\")\n\t}\n\treturn nil\n}", "func checkUserIDShort(id string) (err error) {\n\tif l := len(id); l < minUserChars {\n\t\treturn ErrorUserIDShort\n\t}\n\treturn\n}", "func invalidLength(offset, length, sliceLength int) bool {\n\treturn offset+length < offset || offset+length > sliceLength\n}", "func isInsufficientSpace(err error) bool {\n\treturn strings.Contains(err.Error(), \"insufficient free space\")\n}", "func IsExceedsLimit(err error) bool {\n\treturn errors2.IsCausedBy(err, ErrExceedsLimit)\n}", "func TestTimeoutTooShort(t *testing.T) {\n\t_, err := delayedBatcher.SendRequestWithTimeout(\n\t\t&nonEmptyRequestBody,\n\t\tdelayedBatcher.BatchTimeout,\n\t)\n\tif err == nil {\n\t\tt.Errorf(\n\t\t\t\"Expecting error when timeout too short %v\",\n\t\t\tdelayedBatcher.BatchTimeout,\n\t\t)\n\t}\n}", "func validateQueryParameter(field *surface_v1.Field) {\n\t_, isScalar := protoBufScalarTypes[field.NativeType]\n\tif !(field.Kind == surface_v1.FieldKind_SCALAR ||\n\t\t(field.Kind == surface_v1.FieldKind_ARRAY && isScalar) ||\n\t\t(field.Kind == surface_v1.FieldKind_REFERENCE)) {\n\t\tlog.Println(\"The query parameter with the Name \" + field.Name + \" is invalid. \" +\n\t\t\t\"Note that fields which are mapped to URL query parameters must have a primitive type or\" +\n\t\t\t\" a repeated primitive type or a non-repeated message type. \" +\n\t\t\t\"See: https://github.com/googleapis/googleapis/blob/master/google/api/http.proto#L118 for more information.\")\n\t}\n\n}", "func IsInvalidQuery(err error) bool {\n\treturn unwrapError(err) == ErrInvalidQuery\n}", "func TestValidateMaxQueryLength(t *testing.T) {\n\tt.Parallel()\n\tctx := context.Background()\n\tnow := time.Now()\n\tfor _, tc := range []struct {\n\t\tname string\n\t\tstart time.Time\n\t\tend time.Time\n\t\texpectedStartMs int64\n\t\texpectedEndMs int64\n\t\tmaxQueryLength time.Duration\n\t\texceedQueryLength bool\n\t}{\n\t\t{\n\t\t\tname: \"normal params, not hit max query length\",\n\t\t\tstart: now.Add(-time.Hour),\n\t\t\tend: now,\n\t\t\texpectedStartMs: util.TimeToMillis(now.Add(-time.Hour)),\n\t\t\texpectedEndMs: util.TimeToMillis(now),\n\t\t\tmaxQueryLength: 24 * time.Hour,\n\t\t\texceedQueryLength: false,\n\t\t},\n\t\t{\n\t\t\tname: \"normal params, hit max query length\",\n\t\t\tstart: now.Add(-100 * time.Hour),\n\t\t\tend: now,\n\t\t\texpectedStartMs: util.TimeToMillis(now.Add(-100 * time.Hour)),\n\t\t\texpectedEndMs: util.TimeToMillis(now),\n\t\t\tmaxQueryLength: 24 * time.Hour,\n\t\t\texceedQueryLength: true,\n\t\t},\n\t\t{\n\t\t\tname: \"negative start\",\n\t\t\tstart: time.Unix(-1000, 0),\n\t\t\tend: now,\n\t\t\texpectedStartMs: 0,\n\t\t\texpectedEndMs: util.TimeToMillis(now),\n\t\t\tmaxQueryLength: 24 * time.Hour,\n\t\t\texceedQueryLength: true,\n\t\t},\n\t} {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\t//parallel testing causes data race\n\t\t\tlimits := DefaultLimitsConfig()\n\t\t\toverrides, err := validation.NewOverrides(limits, nil)\n\t\t\trequire.NoError(t, err)\n\t\t\tstartMs, endMs, err := validateQueryTimeRange(ctx, \"test\", util.TimeToMillis(tc.start), util.TimeToMillis(tc.end), overrides, 0)\n\t\t\trequire.NoError(t, err)\n\t\t\tstartTime := model.Time(startMs)\n\t\t\tendTime := model.Time(endMs)\n\t\t\tif tc.maxQueryLength > 0 {\n\t\t\t\trequire.Equal(t, tc.exceedQueryLength, endTime.Sub(startTime) > tc.maxQueryLength)\n\t\t\t}\n\t\t})\n\t}\n}", "func (r *QueryRequest) Validate() error {\n\tif r.SQL == \"\" {\n\t\treturn fmt.Errorf(\"SQL was empty\")\n\t}\n\treturn nil\n}", "func (v *verifier) MaxLength(length int) *verifier {\n\treturn v.addVerification(\"MaxLength\", len(v.Query) <= length)\n}", "func validateStatement(q string) error {\n\tif strings.TrimSpace(q) == \"\" {\n\t\treturn ERR_STMT_EMPTY\n\t}\n\treturn nil\n}", "func queryFailed(t *testing.T, err error) {\n\tt.Fatalf(\"Failed to query tree: %s\\n\", err.Error())\n}", "func (q *Query) validate(timeFirstBlock, timeLastBlock int64, l int) []byte {\n\tif q.Range == nil {\n\t\treturn nil\n\t}\n\tif q.Range.Start < q.Range.End {\n\t\treturn q.ReturnMessageResponse(\"ERROR_fromTimestamp_LESS_THAN_tillTimestamp\")\n\t}\n\tif q.Range.Start < timeLastBlock || q.Range.End > timeFirstBlock {\n\t\treturn q.ReturnNILResponse()\n\t}\n\tif l == 0 {\n\t\treturn q.ReturnNILResponse()\n\t}\n\treturn nil\n}", "func CheckTrunc(raw *Raw) error {\n\tif raw.Trunc && raw.TruncSize < 0 {\n\t\treturn errors.Wrapf(cdnerrors.ErrInvalidValue, \"the truncSize: %d is a negative integer\", raw.Length)\n\t}\n\treturn nil\n}", "func TestQueryError(t *testing.T) {\n\tdb, mock := newMock(t)\n\tmock.ExpectQuery(initialQueryRegex()).WillReturnError(errors.New(\"Query failed.\"))\n\n\tevs := LoadAll(context.Background(), db, initialQuery)\n\tsave := <-evs.Saves()\n\tassertMapLength(t, 0, save.Requests)\n\tassertMapLength(t, 0, save.Imps)\n\tassertExpectationsMet(t, mock)\n}", "func (q *Query) Validate() error {\n\tif q.Project == \"\" {\n\t\treturn errors.New(\"Invalid query: missing project\")\n\t}\n\tif q.Cluster == \"\" {\n\t\treturn errors.New(\"Invalid query: missing cluster\")\n\t}\n\tif q.Namespace == \"\" {\n\t\treturn errors.New(\"Invalid query: missing namespace\")\n\t}\n\tif q.BuildID == \"\" {\n\t\treturn errors.New(\"Invalid query: missing build id\")\n\t}\n\treturn nil\n}", "func (v *verifier) Length(length int) *verifier {\n\treturn v.addVerification(\"Length\", len(v.Query) == length)\n}", "func (qb QueryBuilder) validate() error {\n\tif qb.translator == nil {\n\t\treturn apierr.NewInternalError(fmt.Errorf(\"QueryBuilder tries to build with translator value: nil\"))\n\t}\n\n\tif !qb.nodes.isNodeValuesEmpty() {\n\t\t// node metric\n\t\tif !qb.nodes.isNodeValuesValid() {\n\t\t\treturn apierr.NewInternalError(fmt.Errorf(\"invalid nodes parameter is set to QueryBuilder\"))\n\t\t}\n\t\tif qb.namespace != \"\" {\n\t\t\treturn apierr.NewInternalError(fmt.Errorf(\"both nodes and namespace are provided, expect only one of them.\"))\n\t\t}\n\t\tif !qb.pods.isPodValuesEmpty() {\n\t\t\treturn apierr.NewInternalError(fmt.Errorf(\"both nodes and pods are provided, expect only one of them.\"))\n\t\t}\n\t} else {\n\t\t// pod metric\n\t\tif qb.pods.isPodValuesEmpty() {\n\t\t\treturn apierr.NewInternalError(fmt.Errorf(\"no resources are specified for QueryBuilder, expected one of nodes or pods should be used\"))\n\t\t}\n\t\tif !qb.pods.isPodValuesValid() {\n\t\t\treturn apierr.NewInternalError(fmt.Errorf(\"invalid pods parameter is set to QueryBuilder\"))\n\t\t}\n\t\tnumPods := len(qb.pods.getQuotedPodNames())\n\t\tif numPods > MaxNumOfArgsInOneOfFilter {\n\t\t\treturn apierr.NewInternalError(fmt.Errorf(\"QueryBuilder tries to build with %v pod list, but allowed limit is %v pods\", numPods, MaxNumOfArgsInOneOfFilter))\n\t\t}\n\t}\n\n\tif qb.metricValueType == \"DISTRIBUTION\" && !qb.translator.supportDistributions {\n\t\treturn apierr.NewBadRequest(\"distributions are not supported\")\n\t}\n\n\tif qb.enforceContainerType && !qb.translator.useNewResourceModel {\n\t\treturn apierr.NewInternalError(fmt.Errorf(\"illegal state! Container metrics works only with new resource model\"))\n\t}\n\n\treturn nil\n}", "func isQueryExpired(expires int64) bool {\n\tif expires < time.Now().Unix() {\n\t\tlog.Info(\"Query expired\", \"expirationTime\", expires, \"now\", time.Now().Unix())\n\t\treturn true\n\t}\n\tlog.Info(\"Query is not expired\")\n\treturn false\n}", "func validateExecution(execution *Execution) error {\n\tif execution.ShardID < 0 {\n\t\treturn fmt.Errorf(\"invalid ShardID: %v\", execution.ShardID)\n\t}\n\tif len(execution.DomainID) == 0 {\n\t\treturn errors.New(\"empty DomainID\")\n\t}\n\tif len(execution.WorkflowID) == 0 {\n\t\treturn errors.New(\"empty WorkflowID\")\n\t}\n\tif len(execution.RunID) == 0 {\n\t\treturn errors.New(\"empty RunID\")\n\t}\n\tif execution.State < persistence.WorkflowStateCreated || execution.State > persistence.WorkflowStateCorrupted {\n\t\treturn fmt.Errorf(\"unknown workflow state: %v\", execution.State)\n\t}\n\treturn nil\n}", "func simplifyResultsMismatchedQuery(t *testing.T, query string) string {\n\tt.Helper()\n\tmcmp, closer := start(t)\n\tdefer closer()\n\n\t_, err := mcmp.ExecAllowAndCompareError(query)\n\tif err == nil {\n\t\tt.Fatalf(\"query (%s) does not error\", query)\n\t} else if !strings.Contains(err.Error(), \"mismatched\") {\n\t\tt.Fatalf(\"query (%s) does not error with results mismatched\\nError: %v\", query, err)\n\t}\n\n\trequire.NoError(t, utils.WaitForAuthoritative(t, keyspaceName, \"emp\", clusterInstance.VtgateProcess.ReadVSchema))\n\trequire.NoError(t, utils.WaitForAuthoritative(t, keyspaceName, \"dept\", clusterInstance.VtgateProcess.ReadVSchema))\n\n\tformal, err := vindexes.LoadFormal(\"svschema.json\")\n\trequire.NoError(t, err)\n\tvSchema := vindexes.BuildVSchema(formal)\n\tvSchemaWrapper := &vschemawrapper.VSchemaWrapper{\n\t\tV: vSchema,\n\t\tVersion: planbuilder.Gen4,\n\t}\n\n\tstmt, err := sqlparser.Parse(query)\n\trequire.NoError(t, err)\n\n\tsimplified := simplifier.SimplifyStatement(\n\t\tstmt.(sqlparser.SelectStatement),\n\t\tvSchemaWrapper.CurrentDb(),\n\t\tvSchemaWrapper,\n\t\tfunc(statement sqlparser.SelectStatement) bool {\n\t\t\tq := sqlparser.String(statement)\n\t\t\t_, newErr := mcmp.ExecAllowAndCompareError(q)\n\t\t\tif newErr == nil {\n\t\t\t\treturn false\n\t\t\t} else {\n\t\t\t\treturn strings.Contains(newErr.Error(), \"mismatched\")\n\t\t\t}\n\t\t},\n\t)\n\n\treturn sqlparser.String(simplified)\n}", "func (q *TimeTableQuery) Validate() error {\n\tif q.OriginCountryCode == \"\" {\n\t\treturn errors.New(\"OriginCountryCode is required\")\n\t}\n\n\tif q.OriginPostCode == \"\" {\n\t\treturn errors.New(\"OriginPostCode is required\")\n\t}\n\n\tif q.DestinationCountryCode == \"\" {\n\t\treturn errors.New(\"DestinationCountryCode is required\")\n\t}\n\n\tif q.DestinationPostCode == \"\" {\n\t\treturn errors.New(\"DestinationPostCode is required\")\n\t}\n\n\tif q.Date == \"\" {\n\t\treturn errors.New(\"Date is required\")\n\t}\n\n\treturn nil\n}", "func (handler *AllowAllHandler) CheckQuery(sqlQuery string, parsedQuery sqlparser.Statement) (bool, error) {\n\t// allow any query and stop further checks\n\thandler.logger.Infof(\"Query has been allowed by Allowall handler\")\n\treturn false, nil\n}", "func validateString(fl validator.FieldLevel) bool {\n\tvar err error\n\n\tlimit := 255\n\tparam := strings.Split(fl.Param(), `:`)\n\tif len(param) > 0 {\n\t\tlimit, err = strconv.Atoi(param[0])\n\t\tif err != nil {\n\t\t\tlimit = 255\n\t\t}\n\t}\n\n\tif lengthOfString := utf8.RuneCountInString(fl.Field().String()); lengthOfString > limit {\n\t\treturn false\n\t}\n\n\treturn true\n}", "func validateTimeout(v string) error {\n\tduration, err := time.ParseDuration(v)\n\tif err != nil {\n\t\treturn err\n\t}\n\td := struct {\n\t\tDuration time.Duration `validate:\"min=0\"`\n\t}{duration}\n\treturn useValidator(timeoutFlag, d)\n}", "func (e QueryValidationError) Cause() error { return e.cause }", "func isPacketTooBig(err error) bool {\n\treturn false\n}", "func CheckSizeMin(buf []byte, min int, descrip string) {\n\tif len(buf) < min {\n\t\tpanic(fmt.Sprintf(\"Incorrect %s buffer size, expected (>%d), got (%d).\", descrip, min, len(buf)))\n\t}\n}", "func (q *queue) checkDataSize() error {\n\tif q.dataPageFct.Size()+q.indexPageFct.Size() > q.dataSizeLimit {\n\t\treturn ErrExceedingTotalSizeLimit\n\t}\n\treturn nil\n}", "func CheckArgsLength(args []string, expectedLength int) error {\r\n\tif len(args) != expectedLength {\r\n\t\treturn fmt.Errorf(\"invalid number of arguments. Expected %v, got %v\", expectedLength, len(args))\r\n\t}\r\n\treturn nil\r\n}", "func isCursorExhausted(err error) bool {\n\treturn strings.Contains(err.Error(), \"EXHAUSTED_CURSOR\")\n}", "func IsLimitExceeded(data string) bool {\n\tdata = strings.ToLower(data)\n\treturn strings.Contains(data, \"limit exceeded\")\n}", "func ValidateSql(val string) (int, error) {\n\tstmt, err := sqlparser.Parse(val)\n\tif err != nil {\n\t\treturn BAD, fmt.Errorf(\"SQL Error in %s\", val)\n\t}\n\tswitch smt := stmt.(type) {\n\tcase *sqlparser.DDL:\n\t\tif smt.Action == \"drop\" {\n\t\t\treturn DROP, fmt.Errorf(\"DROP statements are not allowed\")\n\t\t}\n\tcase *sqlparser.Select:\n\t\treturn SELECT, nil\n\t}\n\treturn OTHER, nil\n}", "func (cmd *Command) checkArgs(args []string) {\n\tif len(args) < cmd.MinArgs {\n\t\tsyntaxError()\n\t\tfmt.Fprintf(os.Stderr, \"Command %s needs %d arguments mininum\\n\", cmd.Name, cmd.MinArgs)\n\t\tos.Exit(1)\n\t} else if len(args) > cmd.MaxArgs {\n\t\tsyntaxError()\n\t\tfmt.Fprintf(os.Stderr, \"Command %s needs %d arguments maximum\\n\", cmd.Name, cmd.MaxArgs)\n\t\tos.Exit(1)\n\t}\n}", "func (pq *PatientQuery) Validate() error {\n\tswitch pq.Kind() {\n\tcase KindCitizen:\n\t\tif len(pq.ID) != 10 && strings.HasPrefix(pq.ID, citizenPrefix) {\n\t\t\treturn ErrBadNationalID\n\t\t}\n\t\tif len(pq.BirthDate) <= 4 {\n\t\t\treturn ErrBadBirthDate\n\t\t}\n\tcase KindExpat:\n\t\t// @TODO(kl): find the proper number\n\t\tif len(pq.ID) <= 3 && strings.HasPrefix(pq.ID, expatPrefix) {\n\t\t\treturn ErrBadIqamaID\n\t\t}\n\t\tif len(pq.BirthDate) <= 4 {\n\t\t\treturn ErrBadBirthDate\n\t\t}\n\tdefault:\n\t\treturn ErrUnknownPatientType\n\t}\n\n\treturn nil\n}", "func isInvalidSchemaName(err error) bool {\n\tif pqErr, ok := err.(*pq.Error); ok && pqErr.Code == \"3F000\" {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (a *Api) queryAPITokenIsValid() (qry string) {\n\treturn `SELECT EXISTS(SELECT 1 FROM apis WHERE token = $1 AND is_active = true)`\n}", "func (o Offset) Valid() error {\n\t_, err := strconv.Atoi(string(o))\n\tif err != nil {\n\t\tlog.Println(\"error while validating query param: offset\")\n\t\tlog.Printf(\"value: %s, error: %s\", string(o), err.Error())\n\t\treturn errors.New(\"invalid query param: offset\")\n\t}\n\treturn nil\n}", "func (e QueryValidationError) Reason() string { return e.reason }", "func checkUserIDLong(id string) (err error) {\n\tif l := len(id); l > maxUserChars {\n\t\treturn ErrorUserIDLong\n\t}\n\treturn\n}", "func verifyQuery(query rainslib.MessageSectionQuery, msgSender msgSectionSender) {\n\tif contextInvalid(query.GetContext()) {\n\t\tsendNotificationMsg(msgSender.Token, msgSender.Sender, rainslib.NTRcvInconsistentMsg, \"invalid context\")\n\t\treturn //already logged, that context is invalid\n\t}\n\tif !isQueryExpired(query.GetExpiration()) {\n\t\tprocessQuery(msgSender)\n\t}\n}", "func checkNumberOfArgs(name string, nargs, nresults, min, max int) error {\n\tif min == max {\n\t\tif nargs != max {\n\t\t\treturn ExceptionNewf(TypeError, \"%s() takes exactly %d arguments (%d given)\", name, max, nargs)\n\t\t}\n\t} else {\n\t\tif nargs > max {\n\t\t\treturn ExceptionNewf(TypeError, \"%s() takes at most %d arguments (%d given)\", name, max, nargs)\n\t\t}\n\t\tif nargs < min {\n\t\t\treturn ExceptionNewf(TypeError, \"%s() takes at least %d arguments (%d given)\", name, min, nargs)\n\t\t}\n\t}\n\n\tif nargs > nresults {\n\t\treturn ExceptionNewf(TypeError, \"Internal error: not enough arguments supplied to Unpack*/Parse*\")\n\t}\n\treturn nil\n}", "func LimitsTimeoutValidation(timeout *int) bool {\n\tif timeout == nil {\n\t\treturn true\n\t}\n\tif *timeout < 100 || *timeout > 300000 {\n\t\twskprint.PrintlnOpenWhiskWarning(wski18n.T(wski18n.ID_WARN_LIMITS_TIMEOUT))\n\t\treturn false\n\t}\n\treturn true\n}", "func CheckQueryExist(query string, crntDate string) int {\n\tdb := dbConn()\n\tdefer db.Close()\n\tqryExist := 0\n\tselStmt := db.QueryRow(\"SELECT COUNT(*) FROM query WHERE QueryName=? AND DATE(CreatedDate)=DATE(?)\", query, crntDate)\n\terr := selStmt.Scan(&qryExist)\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\treturn qryExist\n}", "func (pr *prepareResult) check(qd *queryDescr) error {\n\tcall := qd.kind == qkCall\n\tif call != pr.fc.IsProcedureCall() {\n\t\treturn fmt.Errorf(\"function code mismatch: query descriptor %s - function code %s\", qd.kind, pr.fc)\n\t}\n\n\tif !call {\n\t\t// only input parameters allowed\n\t\tfor _, f := range pr.parameterFields {\n\t\t\tif f.Out() {\n\t\t\t\treturn fmt.Errorf(\"invalid parameter %s\", f)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func (a AuthService) ValidateQueryPerms(q string) (string, error) {\n\tvar newQ CDBQuery\n\t// Unmarshal in to a CDBQuery\n\tif err := json.Unmarshal([]byte(q), &newQ); err != nil {\n\t\treturn \"\", errQueryMarshal(err)\n\t}\n\n\t// Pick out the doctype from the query\n\tresource := newQ.Selector[\"docType\"]\n\n\tif resource == nil || resource == \"\" {\n\t\treturn \"\", errQueryDocType()\n\t}\n\n\tfor _, role := range a.userRoles {\n\t\t// Lookup permissions\n\t\truleFunc, ok := a.rolePermissions[role].QueryPermissions[resource.(string)]\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Construct rules from the ruleFunc callback\n\t\trules := ruleFunc(a.userID, a.userRoles)\n\t\tif !rules.Allow {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Enforce any selector appends\n\t\tfor k, v := range rules.SelectorAppend {\n\t\t\tnewQ.Selector[k] = v\n\t\t}\n\n\t\t// Enforce any filter queries (no need to check for nil first)\n\t\tnewQ.Fields = rules.FieldFilter\n\n\t\t// Marshal back to json bytes so it can be sent back as a string\n\t\tnewQBytes, err := json.Marshal(newQ)\n\t\tif err != nil {\n\t\t\treturn \"\", errMarshal(err)\n\t\t}\n\n\t\treturn string(newQBytes), nil\n\t}\n\n\treturn \"\", errQuery(resource.(string))\n}", "func TooLong(name, in string, max int64, value interface{}) *Validation {\n\tvar msg string\n\tif in == \"\" {\n\t\tmsg = fmt.Sprintf(tooLongMessageNoIn, name, max)\n\t} else {\n\t\tmsg = fmt.Sprintf(tooLongMessage, name, in, max)\n\t}\n\treturn &Validation{\n\t\tcode: TooLongFailCode,\n\t\tName: name,\n\t\tIn: in,\n\t\tValue: value,\n\t\tValid: max,\n\t\tmessage: msg,\n\t}\n}", "func CheckNumberIsOkOrDie(limit uint64) {\n\n\tif limit > maxUint64 {\n\t\tpanicMsg := GeneratePanicMsg(limit)\n\t\tpanic(panicMsg)\n\t}\n}", "func (o *GetFetchParams) validateIndex(formats strfmt.Registry) error {\n\n\tif err := validate.MinimumInt(\"index\", \"query\", int64(*o.Index), 1, false); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (r *QueryRequest) Valid() error {\n\tif !r.OrganizationID.Valid() {\n\t\treturn &errors.Error{\n\t\t\tMsg: \"organization_id is not valid\",\n\t\t\tCode: errors.EInvalid,\n\t\t}\n\t}\n\treturn r.Authorization.Valid()\n}", "func (c *QueryClient) Validate() error {\n\tif correct, _ := regexp.MatchString(\"api.newrelic.com/v1/accounts/[0-9]+/query\", c.URL.String()); !correct {\n\t\treturn fmt.Errorf(\"invalid query endpoint %s\", c.URL)\n\t}\n\n\tif len(c.QueryKey) < 1 {\n\t\treturn fmt.Errorf(\"not a valid license key: %s\", c.QueryKey)\n\t}\n\treturn nil\n}", "func (v *verifier) IsntLength(length int) *verifier {\n\treturn v.addVerification(\"Length\", len(v.Query) != length)\n}", "func TestHangAfterError(t *testing.T) {\n\tconnDB := openConnection(t)\n\tdefer closeConnection(t, connDB)\n\n\trows, err := connDB.QueryContext(ctx, \"SELECT 1\")\n\tdefer rows.Close()\n\n\tassertNoErr(t, err)\n\tassertNext(t, rows)\n\tassertNoNext(t, rows)\n\n\trows, err = connDB.QueryContext(ctx, \"SELECT 1+'abcd'\")\n\tverr, ok := err.(*VError)\n\tif !ok {\n\t\tt.Fatalf(\"failed to extract error VError: %v\", err)\n\t}\n\tassertEqual(t, verr.SQLState, \"22V02\")\n\tassertEqual(t, verr.Severity, \"ERROR\")\n\tassertEqual(t, verr.Routine, \"scanint8\")\n\tassertEqual(t, verr.ErrorCode, \"3681\")\n\tassertErr(t, err, \"Invalid input syntax for integer\")\n\n\trows, err = connDB.QueryContext(ctx, \"SELECT 2\")\n\tdefer rows.Close()\n\n\tassertNoErr(t, err)\n\tassertNext(t, rows)\n\tassertNoNext(t, rows)\n}", "func TruncateTooLongNumber(number *PhoneNumber) bool {\n\tif IsValidNumber(number) {\n\t\treturn true\n\t}\n\tnumberCopy := &PhoneNumber{}\n\tproto.Merge(numberCopy, number)\n\tnationalNumber := number.GetNationalNumber()\n\tnationalNumber /= 10\n\tnumberCopy.NationalNumber = proto.Uint64(nationalNumber)\n\tif IsPossibleNumberWithReason(numberCopy) == TOO_SHORT || nationalNumber == 0 {\n\t\treturn false\n\t}\n\tfor !IsValidNumber(numberCopy) {\n\t\tnationalNumber /= 10\n\t\tnumberCopy.NationalNumber = proto.Uint64(nationalNumber)\n\t\tif IsPossibleNumberWithReason(numberCopy) == TOO_SHORT ||\n\t\t\tnationalNumber == 0 {\n\t\t\treturn false\n\t\t}\n\t}\n\n\tnumber.NationalNumber = proto.Uint64(nationalNumber)\n\treturn true\n}", "func (m *MongoSearchSuite) TestInvalidSearchParameterPanics(c *C) {\n\tq := Query{\"Condition\", \"abatement=2012\"}\n\tc.Assert(func() { m.MongoSearcher.CreateQuery(q) }, Panics, createInvalidSearchError(\"SEARCH_NONE\", \"Error: no processable search found for Condition search parameters \\\"abatement\\\"\"))\n}", "func Length(param string, min int, max int) error {\n\tlength := len(param)\n\tif min != -1 && length < min {\n\t\treturn fmt.Errorf(\"length of string %s %d, expected > %d\", param, length, min)\n\t}\n\tif max != -1 && length > max {\n\t\treturn fmt.Errorf(\"length of string %s %d, expected < %d\", param, length, max)\n\t}\n\treturn nil\n}", "func ValidateQueryRequest(request *QueryVisibilityRequest) error {\n\tif request.NamespaceID == \"\" {\n\t\treturn errEmptyNamespaceID\n\t}\n\tif request.PageSize == 0 {\n\t\treturn errInvalidPageSize\n\t}\n\treturn nil\n}", "func (q *QueryTop) IsValid() (bool, error) {\n\treturn true, nil\n}", "func hasValidSavedQuery(query interface{}) error {\n\tqueryConverted := query.(map[string]interface{})\n\tlistQueryTypes := []string{\"count/entity\", \"count/event\", \"count/entity/total\", \"aggregation\", \"top_values\"}\n\tif !stringInSlice(queryConverted[\"type\"].(string), listQueryTypes) {\n\t\treturn errors.New(\"Saved Query Validator: this dictionary don't have query type valid.\")\n\t}\n\treturn nil\n}", "func TestDnsMessageUncompressedQueryConfidenceCheck(t *testing.T) {\n\tm := mustUnpack(uncompressedQueryBytes)\n\tpackedBytes := mustPack(m)\n\tif len(packedBytes) >= len(uncompressedQueryBytes) {\n\t\tt.Errorf(\"Compressed query is not smaller than uncompressed query\")\n\t}\n}", "func (v *Validator) MinLength(field string, d int) {\n\tvalue := v.model[field]\n\n\tif value == \"\" {\n\t\treturn\n\t}\n\tif utf8.RuneCountInString(value) < d {\n\t\tv.errors[\"errors\"] = append(v.errors[\"errors\"], ErrFieldTooShort(d))\n\t}\n}", "func ValidateTransactionIndex(transactionIndex uint32) (bool) {\n if transactionIndex < MaxReasonableTransactionIndex || transactionIndex == SatoshiConst {\n return true\n }\n return false\n}", "func IsValidArgsLength(args []string, n int) bool {\n\tif args == nil && n == 0 {\n\t\treturn true\n\t}\n\tif args == nil {\n\t\treturn false\n\t}\n\n\tif n < 0 {\n\t\treturn false\n\t}\n\n\targsNr := len(args)\n\tif argsNr < n || argsNr > n {\n\t\treturn false\n\t}\n\treturn true\n}", "func ValidateQuery(astarteInterface AstarteInterface, queryPath string) error {\n\tif queryPath == \"/\" {\n\t\t// It is always allowed.\n\t\treturn nil\n\t}\n\n\t// Trailing slash (single) is a valid query (albeit not recommended). Trim it for\n\t// validation reasons.\n\tqueryPath = strings.TrimSuffix(queryPath, \"/\")\n\n\tif astarteInterface.Aggregation == ObjectAggregation {\n\t\treturn validateAggregateQuery(astarteInterface, queryPath)\n\t}\n\n\treturn validateIndividualQuery(astarteInterface, queryPath)\n}", "func isENOBUFS(err error) bool {\n\treturn errors.Is(err, unix.ENOBUFS)\n}", "func (tg Timing) validate() error {\n\tvar err error\n\tif tg.length != len(tg.ts) {\n\t\terr = errors.New(\"ts is wrong length\")\n\t}\n\tif tg.length != len(tg.te) {\n\t\terr = errors.New(\"te is wrong length\")\n\t}\n\tif tg.length != len(tg.Td) {\n\t\terr = errors.New(\"Td is wrong length\")\n\t}\n\tfor i, _ := range tg.te {\n\t\tif tg.te[i].Sub(tg.ts[i]) < 0 {\n\t\t\terr = errors.New(\"time travel detected\")\n\t\t}\n\t}\n\treturn err\n}", "func (o *GetRefPlantsParams) validateLimit(formats strfmt.Registry) error {\n\n\tif err := validate.MinimumInt(\"limit\", \"query\", o.Limit, 1, false); err != nil {\n\t\treturn err\n\t}\n\n\tif err := validate.MaximumInt(\"limit\", \"query\", o.Limit, 100, false); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (ur UnicodeRange) check() {\n\tif ur.Last < ur.First {\n\t\tpanic(\"The last encoding must be greater than the first one.\")\n\t}\n}", "func validateUserName(c *gin.Context) {\n\tuserName := c.Param(\"userName\")\n\n\tif len(userName) < 5 {\n\t\tc.Error(errors.NewCustomError(400, \"userName debe tener al menos 5 caracteres\"))\n\t\tc.Abort()\n\t\treturn\n\t}\n}", "func TooFewItems(name, in string, min int64, value interface{}) *Validation {\n\tmsg := fmt.Sprintf(minItemsFail, name, in, min)\n\tif in == \"\" {\n\t\tmsg = fmt.Sprintf(minItemsFailNoIn, name, min)\n\t}\n\treturn &Validation{\n\t\tcode: MinItemsFailCode,\n\t\tName: name,\n\t\tIn: in,\n\t\tValue: value,\n\t\tValid: min,\n\t\tmessage: msg,\n\t}\n}", "func (f *Form) CheckLength(field string, min, max int) {\n\tvalue := f.Get(field)\n\tif max < 0 {\n\t\tif len(value) < min {\n\t\t\tf.Errors.Add(field, fmt.Sprintf(\"%s min. length is %d\", field, min))\n\t\t}\n\t} else {\n\t\tif len(value) < min || len(value) > max {\n\t\t\tf.Errors.Add(field, fmt.Sprintf(\"%s min.length: %d and max.length: %d\", field, min, max))\n\t\t}\n\t}\n}", "func (q *queue) validateSequence(sequence int64) error {\n\tq.rwMutex.RLock()\n\tdefer q.rwMutex.RUnlock()\n\n\tif sequence > q.appendedSeq.Load() || sequence <= q.acknowledgedSeq.Load() {\n\t\treturn fmt.Errorf(\"%w: get %d, range [%d~%d]\", ErrOutOfSequenceRange,\n\t\t\tsequence, q.appendedSeq.Load(), q.acknowledgedSeq.Load())\n\t}\n\n\treturn nil\n}", "func (q queryManager) checkQueryNeedsTransaction(qp dbquery.QueryParsed) (bool, error) {\n\n\tif qp.IsSelect() {\n\t\treturn false, nil\n\t}\n\t// transaction for any update\n\treturn true, nil\n}", "func validateTransactionSize(next http.HandlerFunc) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\n\t\ttransactions, ctx, err := getTransactions(r)\n\t\tif err != nil {\n\t\t\tlogFailure(err.Error(), w, r, 0)\n\t\t\treturn\n\t\t}\n\n\t\tfor _, transaction := range transactions {\n\t\t\tfor _, action := range transaction.Actions {\n\t\t\t\tif len(action.Data) > appConfig.MaxTransactionSize {\n\t\t\t\t\tlogFailure(\"INVALID_TRANSACTION_SIZE\", w, r, 0)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tnext.ServeHTTP(w, r.WithContext(ctx))\n\t}\n}", "func (m *Query) Validate() error {\n\treturn m.validate(false)\n}", "func HasExceeded(err error) bool {\n\tif err == nil {\n\t\treturn false\n\t}\n\treturn errors.As(err, &exceeded{})\n}", "func newParamLenErr(u, l int) error {\n\treturn fmt.Errorf(\"%w: got %d; requires %d\", ErrInvalidParamLen, u, l)\n}", "func (f *StringSetFilter) ItemShorterThan(length int) *StringSetFilter {\r\n\tf.AddValidator(func(paramName string, paramValue []string) *Error {\r\n\t\tfor _, v := range paramValue {\r\n\t\t\tif len(v) >= length {\r\n\t\t\t\treturn NewError(ErrorInvalidParam, paramName, \"ItemTooLong\")\r\n\t\t\t}\r\n\t\t}\r\n\t\treturn nil\r\n\t})\r\n\treturn f\r\n}", "func checkEmpty(t *testing.T, q *availableUnits) {\n\tassert.Zero(t, q.Len())\n}", "func validateTableLimits(limits *TableLimits) error {\n\tif limits == nil {\n\t\treturn nosqlerr.NewIllegalArgument(\"TableLimits must be non-nil\")\n\t}\n\n\treturn limits.validate()\n}", "func isTimeOK(d time.Duration) bool {\n\treturn d < 500*time.Millisecond\n}", "func validateQueryValidatorFlags(flagSet *pflag.FlagSet) error {\n\tmoniker, err := flagSet.GetString(FlagMoniker)\n\tif err != nil {\n\t\treturn err\n\t}\n\taddr, err := flagSet.GetString(FlagAddr)\n\tif err != nil {\n\t\treturn err\n\t}\n\tvalAddr, err := flagSet.GetString(FlagValAddr)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif moniker == \"\" && addr == \"\" && valAddr == \"\" {\n\t\treturn fmt.Errorf(\"at least one of flags (--moniker, --val-addr, --addr) needs to be set\")\n\t}\n\n\treturn nil\n}" ]
[ "0.60837597", "0.58889705", "0.58200884", "0.58081704", "0.5772958", "0.5753801", "0.5691889", "0.5673438", "0.5651879", "0.5646494", "0.5557274", "0.55330944", "0.5494559", "0.5465842", "0.5409435", "0.5400918", "0.53961563", "0.5393424", "0.53896415", "0.5370335", "0.5316485", "0.530201", "0.5298309", "0.52938646", "0.52857566", "0.52752244", "0.524629", "0.52317953", "0.5229125", "0.5174601", "0.5135998", "0.51083505", "0.51031893", "0.50756997", "0.50424385", "0.5037132", "0.5036355", "0.50104564", "0.50072396", "0.5000919", "0.49967495", "0.49886665", "0.49864113", "0.49759838", "0.49727798", "0.49722362", "0.4971022", "0.4962663", "0.49578667", "0.49485123", "0.49479267", "0.494499", "0.49448746", "0.49426845", "0.49373034", "0.4913", "0.49093974", "0.4898095", "0.48906422", "0.48851648", "0.48796135", "0.48794907", "0.48768225", "0.4874438", "0.4869352", "0.48623666", "0.48607093", "0.4854272", "0.48451972", "0.48392218", "0.4824984", "0.482103", "0.48185834", "0.48101234", "0.48092744", "0.48061106", "0.4801565", "0.47967428", "0.47914353", "0.47870883", "0.47735718", "0.47729135", "0.47704136", "0.47666964", "0.47647375", "0.4753011", "0.47379518", "0.47351658", "0.47349462", "0.47248396", "0.47244492", "0.47193182", "0.4715299", "0.47150382", "0.47017553", "0.46915823", "0.4688543", "0.46847725", "0.46815062", "0.46798167" ]
0.5915572
1
Send a DoH query to an actual DoH server
func TestQueryIntegration(t *testing.T) { queryData := []byte{ 111, 222, // [0-1] query ID 1, 0, // [2-3] flags, RD=1 0, 1, // [4-5] QDCOUNT (number of queries) = 1 0, 0, // [6-7] ANCOUNT (number of answers) = 0 0, 0, // [8-9] NSCOUNT (number of authoritative answers) = 0 0, 0, // [10-11] ARCOUNT (number of additional records) = 0 // Start of first query 7, 'y', 'o', 'u', 't', 'u', 'b', 'e', 3, 'c', 'o', 'm', 0, // null terminator of FQDN (DNS root) 0, 1, // QTYPE = A 0, 1, // QCLASS = IN (Internet) } testQuery := func(queryData []byte) { doh, err := NewTransport(testURL, ips, nil, nil, nil) if err != nil { t.Fatal(err) } resp, err2 := doh.Query(queryData) if err2 != nil { t.Fatal(err2) } if resp[0] != queryData[0] || resp[1] != queryData[1] { t.Error("Query ID mismatch") } if len(resp) <= len(queryData) { t.Error("Response is short") } } testQuery(queryData) paddedQueryBytes, err := AddEdnsPadding(simpleQueryBytes) if err != nil { t.Fatal(err) } testQuery(paddedQueryBytes) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (c *Client) Do(query interface{}) (interface{}, error) {\n\n\t// TODO: make sure it's a real query message - the proto will allow responses as well, and we don't want that\n\n\tmsg, err := c.proto.WriteMessage(query)\n\tif err != nil {\n\t\treturn nil, errors.NewError(\"Could not send query: %s\", err)\n\t}\n\n\tif msg, err = c.roundtrip(msg); err != nil {\n\t\tlogging.Error(\"Could not roundtrip: %s\", errors.Sprint(err))\n\t\treturn nil, err\n\t}\n\n\treturn c.proto.ReadMessage(msg)\n\n}", "func (d *dht) onDHTRequest(req *types.DHTQueryRequest, from types.SwitchPorts) {\n\t// Build a response.\n\tres := types.DHTQueryResponse{\n\t\tRequestID: req.RequestID,\n\t}\n\tcopy(res.PublicKey[:], d.r.public[:])\n\n\t// Look up all nodes that we know about that are closer to\n\t// the public key being searched.\n\tfor _, f := range d.getCloser(req.PublicKey) {\n\t\tnode := types.DHTNode{\n\t\t\tPublicKey: f.PublicKey(),\n\t\t\tCoordinates: f.Coordinates(),\n\t\t}\n\t\tres.Results = append(res.Results, node)\n\t}\n\n\t// Marshal the response into binary format so we can send it\n\t// back.\n\tvar buffer [MaxPayloadSize]byte\n\tn, err := res.MarshalBinary(buffer[:], d.r.private[:])\n\tif err != nil {\n\t\tfmt.Println(\"Failed to sign DHT response:\", err)\n\t\treturn\n\t}\n\n\t// Send the DHT response back to the requestor.\n\td.r.send <- types.Frame{\n\t\tSource: d.r.Coords(),\n\t\tDestination: from,\n\t\tType: types.TypeDHTResponse,\n\t\tPayload: buffer[:n],\n\t}\n}", "func DHTOfferQueryHandler(reader fcrserver.FCRServerRequestReader, writer fcrserver.FCRServerResponseWriter, request *fcrmessages.FCRReqMsg) error {\n\tlogging.Debug(\"Handle dht offer query\")\n\t// Get core structure\n\tc := core.GetSingleInstance()\n\tc.MsgSigningKeyLock.RLock()\n\tdefer c.MsgSigningKeyLock.RUnlock()\n\n\t// Message decoding\n\tnonce, senderID, pieceCID, numDHT, maxOfferRequestedPerDHT, accountAddr, voucher, err := fcrmessages.DecodeDHTOfferDiscoveryRequest(request)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Error in decoding payload: %v\", err.Error())\n\t\tlogging.Error(err.Error())\n\t\treturn writer.Write(fcrmessages.CreateFCRACKErrorMsg(nonce, err), c.MsgSigningKey, c.MsgSigningKeyVer, c.Settings.TCPInactivityTimeout)\n\t}\n\n\t// Verify signature\n\tif request.VerifyByID(senderID) != nil {\n\t\terr = fmt.Errorf(\"Error in verifying request from %v: %v\", senderID, err.Error())\n\t\tlogging.Error(err.Error())\n\t\treturn writer.Write(fcrmessages.CreateFCRACKErrorMsg(nonce, err), c.MsgSigningKey, c.MsgSigningKeyVer, c.Settings.TCPInactivityTimeout)\n\t}\n\n\t// Check numDHT\n\tif numDHT > 16 {\n\t\terr = fmt.Errorf(\"Error exceeding maximum numDHT 16 from %v, got %v\", senderID, numDHT)\n\t\tlogging.Error(err.Error())\n\t\treturn writer.Write(fcrmessages.CreateFCRACKErrorMsg(nonce, err), c.MsgSigningKey, c.MsgSigningKeyVer, c.Settings.TCPInactivityTimeout)\n\t}\n\n\t// Check payment\n\trefundVoucher := \"\"\n\treceived, lane, err := c.PaymentMgr.Receive(accountAddr, voucher)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Error in receiving voucher %v:\", err.Error())\n\t\tlogging.Error(err.Error())\n\t\treturn writer.Write(fcrmessages.CreateFCRACKErrorMsg(nonce, err), c.MsgSigningKey, c.MsgSigningKeyVer, c.Settings.TCPInactivityTimeout)\n\t}\n\tif lane != 0 {\n\t\terr = fmt.Errorf(\"Not correct lane received expect 0 got %v:\", lane)\n\t\tlogging.Error(err.Error())\n\t\treturn writer.Write(fcrmessages.CreateFCRACKErrorMsg(nonce, err), c.MsgSigningKey, c.MsgSigningKeyVer, c.Settings.TCPInactivityTimeout)\n\t}\n\t// expected is 1 * search price + numDHT * (search price + max offer per DHT * offer price)\n\texpected := big.NewInt(0).Add(c.Settings.SearchPrice, big.NewInt(0).Mul(big.NewInt(0).Add(c.Settings.SearchPrice, big.NewInt(0).Mul(c.Settings.OfferPrice, big.NewInt(int64(maxOfferRequestedPerDHT)))), big.NewInt(int64(numDHT))))\n\tif received.Cmp(expected) < 0 {\n\t\t// Short payment\n\t\t// Refund money\n\t\tif received.Cmp(c.Settings.SearchPrice) <= 0 {\n\t\t\t// No refund\n\t\t} else {\n\t\t\trefundVoucher, err = c.PaymentMgr.Refund(accountAddr, lane, big.NewInt(0).Sub(received, c.Settings.SearchPrice))\n\t\t\tif err != nil {\n\t\t\t\t// This should never happen\n\t\t\t\tlogging.Error(\"Error in refunding: %v\", err.Error())\n\t\t\t}\n\t\t}\n\t\terr = fmt.Errorf(\"Short payment received, expect %v got %v, refund voucher %v\", expected.String(), received.String(), refundVoucher)\n\t\tlogging.Error(err.Error())\n\t\treturn writer.Write(fcrmessages.CreateFCRACKErrorMsg(nonce, err), c.MsgSigningKey, c.MsgSigningKeyVer, c.Settings.TCPInactivityTimeout)\n\t}\n\n\t// Payment is fine, search.\n\tc.OfferMgr.IncrementCIDAccessCount(pieceCID)\n\tcidHash, err := pieceCID.CalculateHash()\n\tif err != nil {\n\t\t// Internal error in calculating cid hash\n\t\tvar ierr error\n\t\trefundVoucher, ierr := c.PaymentMgr.Refund(accountAddr, lane, big.NewInt(0).Sub(received, c.Settings.SearchPrice))\n\t\tif err != nil {\n\t\t\t// This should never happen\n\t\t\tlogging.Error(\"Error in refunding %v\", ierr.Error())\n\t\t}\n\t\terr = fmt.Errorf(\"Error in calculating cid hash: %v, refund voucher: %v\", err.Error(), refundVoucher)\n\t\tlogging.Error(err.Error())\n\t\treturn writer.Write(fcrmessages.CreateFCRACKErrorMsg(nonce, err), c.MsgSigningKey, c.MsgSigningKeyVer, c.Settings.TCPInactivityTimeout)\n\t}\n\n\t// Get gateways\n\tgws := c.PeerMgr.GetGWSNearCIDHash(hex.EncodeToString(cidHash), int(numDHT), c.NodeID)\n\tif err != nil {\n\t\t// Internal error in getting near gateways\n\t\tvar ierr error\n\t\trefundVoucher, ierr := c.PaymentMgr.Refund(accountAddr, lane, received)\n\t\tif ierr != nil {\n\t\t\t// This should never happen\n\t\t\tlogging.Error(\"Error in refunding %v\", ierr.Error())\n\t\t}\n\t\terr = fmt.Errorf(\"Internal error in getting near gateways to requested cid: %v with hash: %v, refund voucher: %v\", pieceCID.ToString(), hex.EncodeToString(cidHash), refundVoucher)\n\t\tlogging.Error(err.Error())\n\t\treturn writer.Write(fcrmessages.CreateFCRACKErrorMsg(nonce, err), c.MsgSigningKey, c.MsgSigningKeyVer, c.Settings.TCPInactivityTimeout)\n\t}\n\n\t// TODO: Concurrency\n\tsupposed := big.NewInt(0).Set(c.Settings.SearchPrice)\n\tcontacted := make(map[string]*fcrmessages.FCRACKMsg)\n\tfor _, gw := range gws {\n\t\tresp, err := c.P2PServer.Request(gw.NetworkAddr, fcrmessages.StandardOfferDiscoveryRequestType, gw.NodeID, pieceCID, maxOfferRequestedPerDHT)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tfound := int64(maxOfferRequestedPerDHT)\n\t\t_, offers, _, _ := fcrmessages.DecodeStandardOfferDiscoveryResponse(resp)\n\t\tif len(offers) < int(maxOfferRequestedPerDHT) {\n\t\t\tfound = int64(len(offers))\n\t\t}\n\t\tsupposed.Add(c.Settings.SearchPrice, big.NewInt(0).Mul(c.Settings.OfferPrice, big.NewInt(found)))\n\t\tcontacted[gw.NodeID] = resp\n\t}\n\tif supposed.Cmp(expected) < 0 {\n\t\tvar ierr error\n\t\trefundVoucher, ierr = c.PaymentMgr.Refund(accountAddr, lane, big.NewInt(0).Sub(expected, supposed))\n\t\tif ierr != nil {\n\t\t\t// This should never happen\n\t\t\tlogging.Error(\"Error in refunding %v\", ierr.Error())\n\t\t}\n\t}\n\n\t// Respond\n\tresponse, err := fcrmessages.EncodeDHTOfferDiscoveryResponse(nonce, contacted, refundVoucher)\n\tif err != nil {\n\t\t// Internal error in encoding\n\t\tvar ierr error\n\t\trefundVoucher, ierr = c.PaymentMgr.Refund(accountAddr, lane, received)\n\t\tif ierr != nil {\n\t\t\t// This should never happen\n\t\t\tlogging.Error(\"Error in refunding %v\", ierr.Error())\n\t\t}\n\t\terr = fmt.Errorf(\"Internal error in encoding response: %v, refund voucher %v\", err.Error(), refundVoucher)\n\t\tlogging.Error(err.Error())\n\t\treturn writer.Write(fcrmessages.CreateFCRACKErrorMsg(nonce, err), c.MsgSigningKey, c.MsgSigningKeyVer, c.Settings.TCPInactivityTimeout)\n\t}\n\n\treturn writer.Write(response, c.MsgSigningKey, c.MsgSigningKeyVer, c.Settings.TCPInactivityTimeout)\n}", "func (h *Handler) SendQuery(ctx context.Context, ip net.IP) (err error) {\n\n\tpacket := nodeStatusRequestWireFormat(`* `)\n\t// packet.printHeader()\n\n\tif ip == nil || ip.Equal(net.IPv4zero) {\n\t\treturn fmt.Errorf(\"invalid IP=%v\", ip)\n\t}\n\t// ip[3] = 255 // Network broadcast\n\n\t// To broadcast, use network broadcast i.e 192.168.0.255 for example.\n\ttargetAddr := &net.UDPAddr{IP: ip, Port: 137}\n\tif _, err = h.conn.WriteToUDP(packet, targetAddr); err != nil {\n\t\tif ctx.Err() == nil { // not cancelled\n\t\t\treturn fmt.Errorf(\"nbns failed to send packet: %w\", err)\n\t\t}\n\t}\n\treturn nil\n}", "func (c Client) SendQuery(message dns.Msg) (dns.Msg, error) {\n\t// Open a new QUIC stream\n\tlog.Debugln(\"opening new quic stream\")\n\tstream, err := c.Session.OpenStream()\n\tif err != nil {\n\t\treturn dns.Msg{}, errors.New(\"quic stream open: \" + err.Error())\n\t}\n\n\t// Pack the DNS message for transmission\n\tlog.Debugln(\"packing dns message\")\n\tpacked, err := message.Pack()\n\tif err != nil {\n\t\t_ = stream.Close()\n\t\treturn dns.Msg{}, errors.New(\"dns message pack: \" + err.Error())\n\t}\n\n\t// Send the DNS query over QUIC\n\tlog.Debugln(\"writing packed format to the stream\")\n\t_, err = stream.Write(packed)\n\t_ = stream.Close()\n\tif err != nil {\n\t\treturn dns.Msg{}, errors.New(\"quic stream write: \" + err.Error())\n\t}\n\n\t// Read the response\n\tlog.Debugln(\"reading server response\")\n\tresponse, err := ioutil.ReadAll(stream)\n\tif err != nil {\n\t\treturn dns.Msg{}, errors.New(\"quic stream read: \" + err.Error())\n\t}\n\n\t// Unpack the DNS message\n\tlog.Debugln(\"unpacking response dns message\")\n\tvar msg dns.Msg\n\terr = msg.Unpack(response)\n\tif err != nil {\n\t\treturn dns.Msg{}, errors.New(\"dns message unpack: \" + err.Error())\n\t}\n\n\treturn msg, nil // nil error\n}", "func (cl *DoHClient) Query(msg *Message) (*Message, error) {\n\treturn cl.Get(msg)\n}", "func (q *query) sendResponse(data []byte, err error) {\n\tq.response <- queryResponse{data: data, err: err}\n}", "func (e *Engine) Do(ctx context.Context, query string, response interface{}) error {\n\tpayload := GQLRequest{\n\t\tQuery: query,\n\t\tVariables: map[string]interface{}{},\n\t}\n\n\tbody, err := e.Request(ctx, \"POST\", \"/\", &payload)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Request failed: %w\", err)\n\t}\n\n\t// TODO temporary hack, actually parse the response\n\tif str := string(body); strings.Contains(str, \"errors: \\\"[{\\\"error\") {\n\t\treturn fmt.Errorf(\"pql error: %s\", str)\n\t}\n\n\terr = json.Unmarshal(body, response)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"json unmarshal: %w\", err)\n\t}\n\n\treturn nil\n}", "func (this *Device) Query(query api.IQuery) error {\n if this.queryProcessor == nil {\n return errors.New(fmt.Sprintf(ERR_NO_QUERY_PROCESSOR, this.Info().String()))\n }\n\n var err error\n var queryString string\n\n if queryString, err = this.queryProcessor(this.Info().Mapify(), query); err == nil {\n err = this.Send([]byte(queryString))\n }\n\n return err\n}", "func (h *Handler) serveQuery(w http.ResponseWriter, r *http.Request) {\n\t// TODO: Authentication.\n\n\t// Parse query from query string.\n\tvalues := r.URL.Query()\n\tqueries, err := parser.ParseQuery(values.Get(\"q\"))\n\tif err != nil {\n\t\th.error(w, \"parse error: \"+err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t// Retrieve database from server.\n\tdb := h.server.Database(values.Get(\":db\"))\n\tif db == nil {\n\t\th.error(w, ErrDatabaseNotFound.Error(), http.StatusNotFound)\n\t\treturn\n\t}\n\n\t// Parse the time precision from the query params.\n\tprecision, err := parseTimePrecision(values.Get(\"time_precision\"))\n\tif err != nil {\n\t\th.error(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t// Create processor for writing data out.\n\tvar p engine.Processor\n\tif r.URL.Query().Get(\"chunked\") == \"true\" {\n\t\tp = &chunkWriterProcessor{w, precision, false, (values.Get(\"pretty\") == \"true\")}\n\t} else {\n\t\tp = &pointsWriterProcessor{make(map[string]*protocol.Series), w, precision, (values.Get(\"pretty\") == \"true\")}\n\t}\n\n\t// Execute query against the database.\n\tfor _, q := range queries {\n\t\tif err := db.ExecuteQuery(nil, q, p); err != nil {\n\t\t\th.error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t}\n\n\t// Mark processor as complete. Print error, if applicable.\n\tif err := p.Close(); err != nil {\n\t\th.error(w, err.Error(), http.StatusInternalServerError)\n\t}\n}", "func (o *PluginDnsClient) Query(queries []utils.DnsQueryParams, socket transport.SocketApi) error {\n\tif o.IsNameServer() {\n\t\treturn fmt.Errorf(\"Querying is not permitted for Dns Name Servers!\")\n\t}\n\tquestions, err := utils.BuildQuestions(queries)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(questions) > 0 {\n\t\tdata := o.dnsPktBuilder.BuildQueryPkt(questions, o.Tctx.Simulation)\n\t\tif socket == nil {\n\t\t\treturn fmt.Errorf(\"Invalid Socket in Query!\")\n\t\t}\n\t\ttransportErr, _ := o.socket.Write(data)\n\t\tif transportErr != transport.SeOK {\n\t\t\to.stats.socketWriteError++\n\t\t\treturn transportErr.Error()\n\t\t}\n\t\to.stats.pktTxDnsQuery++ // successfully sent query\n\t\to.stats.txBytes += uint64(len(data)) // number of bytes sent\n\t}\n\treturn nil\n}", "func SendQueryRequest(requestData RequestData) (string, error) {\n\treq, errNewRequest := http.NewRequest(\"GET\", requestData.Url, strings.NewReader(requestData.Data))\n\tif errNewRequest != nil {\n\t\treturn \"\", errNewRequest\n\t}\n\treq.Header.Set(\"Authorization\", requestData.Token.TokenType+\" \"+requestData.Token.AccessToken)\n\n\tresponse, errDo := DoRequest(req)\n\tif errDo != nil {\n\t\treturn \"\", errDo\n\t}\n\n\tdefer response.Body.Close()\n\tbody, err2 := ioutil.ReadAll(response.Body)\n\tif err2 != nil {\n\t\treturn \"\", err2\n\t}\n\tif response.StatusCode == http.StatusOK || response.StatusCode == http.StatusNoContent {\n\t\treturn string(body), nil\n\t} else {\n\t\treturn \"\", errors.New(\"heartbeat request failed, status is \" + strconv.Itoa(response.StatusCode))\n\t}\n}", "func (s *Session) doQuery(ctx context.Context, name Name, f func(ctx context.Context, conn *grpc.ClientConn, header *headers.RequestHeader) (*headers.ResponseHeader, interface{}, error)) (interface{}, error) {\n\theader := s.getQueryHeader(getPrimitiveID(name))\n\treturn s.doRequest(ctx, header, func(conn *grpc.ClientConn) (*headers.ResponseHeader, interface{}, error) {\n\t\treturn f(ctx, conn, header)\n\t})\n}", "func (c *Client) QueryHouston(query string) (*HoustonResponse, error) {\n\tdoOpts := httputil.DoOptions{\n\t\tData: GraphQLQuery{query},\n\t\tHeaders: map[string]string{\n\t\t\t\"Accept\": \"application/json\",\n\t\t},\n\t}\n\n\tcl, err := cluster.GetCurrentCluster()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// set headers\n\tif cl.Token != \"\" {\n\t\tdoOpts.Headers[\"authorization\"] = cl.Token\n\t}\n\n\tvar response httputil.HTTPResponse\n\thttpResponse, err := c.HTTPClient.Do(\"POST\", cl.GetAPIURL(), &doOpts)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer httpResponse.Body.Close()\n\n\t// strings.NewReader(jsonStream)\n\tbody, err := ioutil.ReadAll(httpResponse.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresponse = httputil.HTTPResponse{\n\t\tRaw: httpResponse,\n\t\tBody: string(body),\n\t}\n\tdecode := HoustonResponse{}\n\terr = json.NewDecoder(strings.NewReader(response.Body)).Decode(&decode)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"Failed to JSON decode Houston response\")\n\t}\n\n\t// Houston Specific Errors\n\tif decode.Errors != nil {\n\t\treturn nil, errors.New(decode.Errors[0].Message)\n\t}\n\n\treturn &decode, nil\n}", "func StatsdSend(metricName string) {\n\tif DogStatsd {\n\t\tLog(fmt.Sprintf(\"DogStatsd='true' %s='true' Exec='%s'\", metricName, Exec), \"debug\")\n\t\tstatsd, _ := godspeed.NewDefault()\n\t\tdefer statsd.Conn.Close()\n\t\ttags := makeTags()\n\t\tmetric := fmt.Sprintf(\"shudi.%s\", metricName)\n\t\tstatsd.Incr(metric, tags)\n\t}\n}", "func (p *BeeswaxServiceClient) Query(query *Query) (r *QueryHandle, err error) {\n\tif err = p.sendQuery(query); err != nil {\n\t\treturn\n\t}\n\treturn p.recvQuery()\n}", "func hostDockerQuery() {\n\tlog.Println(\"hostDockerQuery\")\n\tfor {\n\t\ttime.Sleep(2 * time.Second)\n\t\tc, err := net.Dial(\"unix\", \"/var/run/dockerConnection/hostconnection.sock\")\n\t\tif err != nil {\n\t\t\tcontinue;\n\t\t}\n\t\t// send to socket\n\t\tlog.Println(\"sending request to server\")\n\t\tfmt.Fprintf(c, \"hi\" + \"\\n\")\n\t\t// listen for reply\n\t\tmessage, _ := bufio.NewReader(c).ReadString('\\n')\n\t\t//log.Println(\"Message from server: \" + message)\n\t\tlog.Println(\"Received update from host server\")\n\n\t\t// set this to be the latest response\n\t\tlatestHostServerResponse = message\n\t}\n}", "func query(object string, server string, tcpport string) (string, error) {\r\n\t// open connnection\r\n\tloggers.Info.Printf(\"whois.query() setup connection\")\r\n\tconn, err := net.DialTimeout(\"tcp\", net.JoinHostPort(server, tcpport), time.Second*30)\r\n\tif err != nil {\r\n\t\treturn \"\", fmt.Errorf(\"whois: connect to whois server failed: %v\", err)\r\n\t}\r\n\tdefer conn.Close()\r\n\t// set connection write timeout\r\n\t_ = conn.SetWriteDeadline(time.Now().Add(time.Second * 30))\r\n\t_, err = conn.Write([]byte(object + \"\\r\\n\"))\r\n\tif err != nil {\r\n\t\treturn \"\", fmt.Errorf(\"whois: send to whois server failed: %v\", err)\r\n\t}\r\n\t// set connection read timeout\r\n\t_ = conn.SetReadDeadline(time.Now().Add(time.Second * 30))\r\n\tbuffer, err := ioutil.ReadAll(conn)\r\n\tif err != nil {\r\n\t\treturn \"\", fmt.Errorf(\"whois: read from whois server failed: %v\", err)\r\n\t}\r\n\t// return result\r\n\treturn string(buffer), nil\r\n}", "func (c *Client) writeQuery(conn net.Conn, query []byte) error {\n\tvar err error\n\n\tif c.Timeout > 0 {\n\t\t_ = conn.SetWriteDeadline(time.Now().Add(c.Timeout))\n\t}\n\n\t// Write to the connection\n\tif _, ok := conn.(*net.TCPConn); ok {\n\t\tl := make([]byte, 2)\n\t\tbinary.BigEndian.PutUint16(l, uint16(len(query)))\n\t\t_, err = (&net.Buffers{l, query}).WriteTo(conn)\n\t} else {\n\t\t_, err = conn.Write(query)\n\t}\n\n\treturn err\n}", "func (ch *clientHandle) Send(q Query) ([]byte, error) {\n\tif nil == ch.queryQueue {\n\t\treturn nil, fmt.Errorf(\"ClientHandle has been closed\")\n\t}\n\tch.queryQueue <- query{Query: q, response: ch.response}\n\tres := <-ch.response\n\treturn res.data, res.err\n}", "func (h *HaproxyInstace) q(query string) (string, error) {\n\tc, err := net.Dial(h.Network, h.Address)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer c.Close()\n\n\tc.Write([]byte(query + \"\\n\"))\n\n\tvar buf bytes.Buffer\n\tio.Copy(&buf, c)\n\n\treturn strings.TrimSpace(buf.String()), nil\n}", "func (db *DB) simple(query jdh.Query) error {\n\tconn, err := net.Dial(\"tcp\", db.port)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer conn.Close()\n\tenc := json.NewEncoder(conn)\n\treq := &Request{Query: query}\n\tenc.Encode(req)\n\tdec := json.NewDecoder(conn)\n\tans := &Answer{}\n\tif err := dec.Decode(ans); err != nil {\n\t\treturn err\n\t}\n\tif _, err := ans.GetMessage(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (c *HTTPClient) Query(q *Query, timeout time.Duration) (\n res *QueryResponse, err error) {\n\n if q == nil {\n err = fmt.Errorf(\"kairosdb: nil query passed\")\n return\n }\n payload, err := json.Marshal(q)\n if err != nil {\n return\n }\n\n res = &QueryResponse{}\n glog.V(3).Infof(\"querying metric: %s\", string(payload))\n reader := ioutil.NopCloser(bytes.NewReader(payload))\n err = c.backend.Call(\"POST\", c.url+\"/api/v1/datapoints/query\", reader,\n timeout, http.StatusOK, res)\n if err != nil {\n return\n }\n\n glog.V(3).Infof(\"response from query: %+v\", res)\n return\n}", "func QueryHandler(w http.ResponseWriter, r *http.Request) {\n\tdb := Connect()\n\tdefer db.Close()\n\n\tcanAccess, account := ValidateAuth(db, r, w)\n\tif !canAccess {\n\t\treturn\n\t}\n\n\tconnection, err := GetConnection(db, account.Id)\n\tif err != nil {\n\t\tif isBadConn(err, false) {\n\t\t\tpanic(err);\n\t\t\treturn;\n\t\t}\n\t\tstateResponse := &StateResponse{\n\t\t\tPeerId: 0,\n\t\t\tStatus: \"\",\n\t\t\tShouldFetch: false,\n\t\t\tShouldPeerFetch: false,\n\t\t}\n\t\tif err := json.NewEncoder(w).Encode(stateResponse); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\treturn;\n\t}\n\n\tpeerId := connection.GetPeerId(account.Id)\n\tstatus := \"\"\n\tif connection.Status == PENDING {\n\t\tif connection.InviteeId == account.Id {\n\t\t\tstatus = \"pendingWithMe\"\n\t\t} else {\n\t\t\tstatus = \"pendingWithPeer\"\n\t\t}\n\t} else {\n\t\tstatus = \"connected\"\n\t}\n\n\tstateResponse := &StateResponse{\n\t\tPeerId: peerId,\n\t\tStatus: status,\n\t}\n\terr = CompleteFetchResponse(stateResponse, db, connection, account)\n\tif err != nil {\n\t\tlog.Printf(\"QueryPayload failed: %s\", err)\n\t\thttp.Error(w, \"could not query payload\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tif err := json.NewEncoder(w).Encode(stateResponse); err != nil {\n\t\tpanic(err)\n\t}\n}", "func Query(host, domain string) (string, error) {\n\tvar (\n\t\td net.Dialer\n\t\tout string\n\t\terr error\n\t)\n\n\tctx, cancel := context.WithTimeout(context.Background(),\n\t\tTotalTimeout*time.Second)\n\tdefer cancel()\n\n\thostport := net.JoinHostPort(host, PortNumber)\n\tconn, err := d.DialContext(ctx, \"tcp\", hostport)\n\tif err != nil {\n\t\treturn out, err\n\t}\n\tdefer conn.Close()\n\n\terr = conn.SetWriteDeadline(time.Now().Add(WriteTimeout *\n\t\ttime.Second))\n\tif err != nil {\n\t\treturn out, err\n\t}\n\n\tif _, err := conn.Write([]byte(domain + \"\\r\\n\")); err != nil {\n\t\treturn out, err\n\t}\n\n\terr = conn.SetReadDeadline(time.Now().Add(ReadTimeout *\n\t\ttime.Second))\n\tif err != nil {\n\t\treturn out, err\n\t}\n\n\toutput, err := ioutil.ReadAll(conn)\n\tif err != nil {\n\t\treturn out, err\n\t}\n\n\tout = string(output)\n\treturn out, nil\n}", "func MsgExchangeOverGoogleDOH(req *dns.Msg, rt http.RoundTripper) (resp *dns.Msg, err error) {\n\tqtype := req.Question[0].Qtype\n\tname := req.Question[0].Name\n\n\tvar ecs net.IP\n\topt := req.IsEdns0()\n\tif opt != nil {\n\t\tfor _, s := range opt.Option {\n\t\t\tif _ecs, ok := s.(*dns.EDNS0_SUBNET); ok {\n\t\t\t\tecs = _ecs.Address\n\t\t\t}\n\t\t}\n\t}\n\tdohresp, err := google.Query(rt, qtype, name, ecs.String())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// Parse the google Questions to DNS RRs\n\tquestions := []dns.Question{}\n\tfor i, c := range dohresp.Question {\n\t\tquestions = append(questions, dns.Question{\n\t\t\tName: c.Name,\n\t\t\tQtype: uint16(c.Type),\n\t\t\tQclass: req.Question[i].Qclass,\n\t\t})\n\t}\n\n\t// Parse google RRs to DNS RRs\n\tanswers := []dns.RR{}\n\tfor _, a := range dohresp.Answer {\n\t\tanswers = append(answers, RRNewFromGoogleDohRR(a))\n\t}\n\n\t// Parse google RRs to DNS RRs\n\tauthorities := []dns.RR{}\n\tfor _, ns := range dohresp.Authority {\n\t\tauthorities = append(authorities, RRNewFromGoogleDohRR(ns))\n\t}\n\n\t// Parse google RRs to DNS RRs\n\textras := []dns.RR{}\n\tfor _, a := range dohresp.Additional {\n\t\textras = append(extras, RRNewFromGoogleDohRR(a))\n\t}\n\tresp = &dns.Msg{\n\t\tMsgHdr: dns.MsgHdr{\n\t\t\tId: req.Id,\n\t\t\tResponse: (dohresp.Status == 0),\n\t\t\tOpcode: dns.OpcodeQuery,\n\t\t\tAuthoritative: false,\n\t\t\tTruncated: dohresp.TC,\n\t\t\tRecursionDesired: dohresp.RD,\n\t\t\tRecursionAvailable: dohresp.RA,\n\t\t\t//Zero: false,\n\t\t\tAuthenticatedData: dohresp.AD,\n\t\t\tCheckingDisabled: dohresp.CD,\n\t\t\tRcode: int(dohresp.Status),\n\t\t},\n\t\tCompress: req.Compress,\n\t\tQuestion: questions,\n\t\tAnswer: answers,\n\t\tNs: authorities,\n\t\tExtra: extras,\n\t}\n\n\tif ecs != nil {\n\t\tMsgSetECSWithAddr(resp, ecs)\n\t}\n\treturn resp, nil\n}", "func (c *Conn) handleQuery(query string, args []driver.Value) (driver.Rows, error) {\n\tvar (\n\t\tb []byte\n\t\terr error\n\t)\n\n\t// reset the protocol packet sequence number\n\tc.resetSeqno()\n\n\tif b, err = c.createComQuery(replacePlaceholders(query, args)); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// send COM_QUERY to the server\n\tif err := c.writePacket(b); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn c.handleQueryResponse()\n}", "func (o *PluginDnsNs) SendQuery(mac *core.MACKey, domain string) bool {\n\n\tclient := o.Ns.GetClient(mac)\n\n\tif client == nil {\n\t\t// No such client ...\n\t\to.stats.autoPlayClientNotFound++\n\t\treturn true // Restart, next one can be ok\n\t}\n\n\tplug := client.PluginCtx.Get(DNS_PLUG)\n\tif plug == nil {\n\t\t// given client doesn't have Dns\n\t\to.stats.clientNoDns++\n\t\treturn false // Don't restart timer, stop!\n\t}\n\n\tdnsPlug := plug.Ext.(*PluginDnsClient)\n\n\tqueries := o.getClientQueries(mac, domain)\n\n\terr := dnsPlug.Query(queries, dnsPlug.socket)\n\tif err != nil {\n\t\t// Couldn't query properly\n\t\to.stats.autoPlayBadQuery++\n\t} else {\n\t\to.stats.autoPlayQueries++ // one more auto play query sent\n\t}\n\t// If the query amount is not reached, we can restart the timer.\n\tinfiniteQueries := (o.autoPlayParams.QueryAmount == 0)\n\treturn o.stats.autoPlayQueries < o.autoPlayParams.QueryAmount || infiniteQueries\n}", "func sendMsg(conn *net.UDPConn, raddr net.UDPAddr, query interface{}) {\n\ttotalSent.Add(1)\n\tvar b bytes.Buffer\n\tif err := bencode.Marshal(&b, query); err != nil {\n\t\treturn\n\t}\n\tif n, err := conn.WriteToUDP(b.Bytes(), &raddr); err != nil {\n\t\tlogger.Infof(\"DHT: node write failed to %+v, error=%s\", raddr, err)\n\t} else {\n\t\ttotalWrittenBytes.Add(int64(n))\n\t}\n\treturn\n}", "func (r *CortexClient) Query(ctx context.Context, query string) (*http.Response, error) {\n\n\tquery = fmt.Sprintf(\"query=%s&time=%d\", query, time.Now().Unix())\n\tescapedQuery := url.PathEscape(query)\n\n\tres, err := r.doRequest(ctx, \"/api/prom/api/v1/query?\"+escapedQuery, \"GET\", nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn res, nil\n}", "func QueryWhoisServer(domain, server string) (response string, err error) {\n\tconn, err := net.Dial(\"tcp\", server+\":43\")\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer conn.Close()\n\n\tfmt.Fprintf(conn, \"%s\\r\\n\", domain)\n\tif buf, err := ioutil.ReadAll(conn); err == nil {\n\t\tresponse = string(buf)\n\t}\n\n\treturn\n}", "func (d *dht) onDHTResponse(res *types.DHTQueryResponse, from types.SwitchPorts) {\n\t/*\n\t\tctx, cancel := context.WithCancel(d.r.context)\n\t\td.insertNode(&dhtNode{\n\t\t\tctx: ctx,\n\t\t\tcancel: cancel,\n\t\t\tpublic: res.PublicKey,\n\t\t\tcoords: from.Copy(),\n\t\t\tlastseen: time.Now(),\n\t\t})\n\t*/\n\n\treq, ok := d.requests.Load(res.RequestID)\n\tif ok {\n\t\tdhtReq, ok := req.(*dhtRequestContext)\n\t\tif ok && dhtReq.id == res.RequestID {\n\t\t\tdhtReq.ch <- *res\n\t\t\td.requests.Delete(res.RequestID)\n\t\t}\n\t}\n}", "func NewDoHClient(nameserver string, allowInsecure bool) (*DoHClient, error) {\n\tnsURL, err := url.Parse(nameserver)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif nsURL.Scheme != \"https\" {\n\t\terr = fmt.Errorf(\"DoH name server must be HTTPS\")\n\t\treturn nil, err\n\t}\n\n\ttr := &http.Transport{\n\t\tMaxIdleConns: 1,\n\t\tIdleConnTimeout: 30 * time.Second,\n\t\tTLSClientConfig: &tls.Config{\n\t\t\tInsecureSkipVerify: allowInsecure,\n\t\t},\n\t}\n\n\tcl := &DoHClient{\n\t\taddr: nsURL,\n\t\theaders: http.Header{\n\t\t\t\"accept\": []string{\n\t\t\t\t\"application/dns-message\",\n\t\t\t},\n\t\t},\n\t\tquery: nsURL.Query(),\n\t\tconn: &http.Client{\n\t\t\tTransport: tr,\n\t\t\tTimeout: clientTimeout,\n\t\t},\n\t}\n\n\tcl.req = &http.Request{\n\t\tMethod: http.MethodGet,\n\t\tURL: nsURL,\n\t\tProto: \"HTTP/2\",\n\t\tProtoMajor: 2,\n\t\tProtoMinor: 0,\n\t\tHeader: cl.headers,\n\t\tBody: nil,\n\t\tHost: nsURL.Hostname(),\n\t}\n\n\treturn cl, nil\n}", "func (api *OsctrlAPI) RunQuery(env, uuid, query string, hidden bool) (types.ApiQueriesResponse, error) {\n\tq := types.ApiDistributedQueryRequest{\n\t\tUUID: uuid,\n\t\tQuery: query,\n\t\tHidden: hidden,\n\t}\n\tvar r types.ApiQueriesResponse\n\treqURL := fmt.Sprintf(\"%s%s%s/%s\", api.Configuration.URL, APIPath, APIQueries, env)\n\tjsonMessage, err := json.Marshal(q)\n\tif err != nil {\n\t\tlog.Printf(\"error marshaling data %s\", err)\n\t}\n\tjsonParam := strings.NewReader(string(jsonMessage))\n\trawQ, err := api.PostGeneric(reqURL, jsonParam)\n\tif err != nil {\n\t\treturn r, fmt.Errorf(\"error api request - %v - %s\", err, string(rawQ))\n\t}\n\tif err := json.Unmarshal(rawQ, &r); err != nil {\n\t\treturn r, fmt.Errorf(\"can not parse body - %v\", err)\n\t}\n\treturn r, nil\n}", "func (client *Client) SendQuery(query *Query) (*Response, error) {\n\tif client.XMLMode {\n\t\treturn nil, fmt.Errorf(\"XML mode not yet supported\")\n\t}\n\n\tif !client.IsLoggedIn() && query.Action() != ActionLogin {\n\t\treturn nil, fmt.Errorf(\"need to log in before sending action %s\", query.Action())\n\t}\n\tif client.IsLoggedIn() && query.Action() == ActionLogin {\n\t\treturn nil, fmt.Errorf(\"already logged in\")\n\t}\n\n\tif query.Action() == ActionLogout {\n\t\tdefer func() {\n\t\t\t// after action logout the connection and session are closed\n\t\t\tclient.connection = nil\n\t\t\tclient.currentUser = \"\"\n\t\t\tclient.lastUser = \"\"\n\t\t\tclient.lastPass = \"\"\n\t\t}()\n\t}\n\n\trawResponse, err := client.SendRaw(query.EncodeKV())\n\tif err != nil {\n\t\tif err == io.EOF && query.Action() == ActionLogout {\n\t\t\t// the server will immediately close the connection once LOGOUT is received\n\t\t\treturn nil, nil\n\t\t}\n\t\treturn nil, err\n\t}\n\n\tresponse, err := ParseResponse(rawResponse)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"received malformed response: %s\", err.Error())\n\t}\n\n\tif query.Action() == ActionLogin && response.IsSuccessful() {\n\t\tclient.currentUser = query.FirstField(QueryFieldNameUser)\n\t\t// save credentials to restore session after lost connections\n\t\tclient.lastUser = client.currentUser\n\t\tpwField := query.Field(QueryFieldNamePassword)\n\t\tif len(pwField) > 0 {\n\t\t\tclient.lastPass = query.Field(QueryFieldNamePassword)[0]\n\t\t} else {\n\t\t\tclient.lastPass = \"\"\n\t\t}\n\t}\n\n\treturn response, nil\n}", "func (c *Client) Do(ctx context.Context, req *Request, resp *Response) error {\n\tr := graphql.Response{}\n\tif resp != nil {\n\t\tr.Data = resp.Data\n\t\tr.Errors = resp.Errors\n\t\tr.Extensions = resp.Extensions\n\t}\n\treturn c.gql.MakeRequest(ctx, &graphql.Request{\n\t\tQuery: req.Query,\n\t\tVariables: req.Variables,\n\t\tOpName: req.OpName,\n\t}, &r)\n}", "func (c *Client) Do(query string, vars interface{}, key string, res interface{}) error {\n\treturn c.DoOperation(query, \"\", vars, key, res)\n}", "func ClickHouseQuery(data *[][]string, sql string, hostname string) error {\n\turl, err := neturl.Parse(fmt.Sprintf(chQueryUrlPattern, hostname))\n\tif err != nil {\n\t\treturn err\n\t}\n\tencodeQuery(url, sql)\n\thttpCall(data, url.String())\n\t// glog.Infof(\"Loaded %d rows\", len(*data))\n\treturn nil\n}", "func (c *Client) Do(m *Msg, a string) {\n\tc.QueryChan <- &Request{Client: c, Addr: a, Request: m}\n}", "func SendQuery(queryName string, payload interface{}) {\n\tjsonPayload, err := json.Marshal(payload)\n\tif err != nil {\n\t\tlogger.Errorf(err.Error())\n\t\treturn\n\t}\n\tq := &serfclient.QueryParam{Name: queryName, Payload: jsonPayload}\n\terr = Serfer.Query(q)\n\tif err != nil {\n\t\tlogger.Debugf(err.Error())\n\t}\n\treturn\n}", "func (c Conn) QueryHandler(q Query, h string) (*QueryResponse, error) {\n\tcl := c.HTTPClient\n\tif cl == nil {\n\t\tcl = http.DefaultClient\n\t}\n\n\tu, err := c.URL.Parse(h)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// copy the query into a url.Values because it's rude to modify someone elses map\n\tp := url.Values{}\n\tfor k, v := range q {\n\t\tp[k] = v\n\t}\n\n\tp.Set(\"wt\", \"json\")\n\tp.Set(\"json.nl\", \"arrarr\")\n\n\tu.RawQuery = p.Encode()\n\n\tresp, err := cl.Get(u.String())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer flush(resp.Body)\n\n\tif resp.StatusCode >= http.StatusBadRequest {\n\t\treturn nil, ErrHTTPStatus(resp.Status)\n\t}\n\n\tqr := new(QueryResponse)\n\tdec := json.NewDecoder(resp.Body)\n\terr = dec.Decode(qr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn qr, err\n}", "func Query(addr string, cmd []byte) ([]byte, error) {\n\tudpAddr, err := net.ResolveUDPAddr(\"udp\", addr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tconn, err := net.DialUDP(\"udp\", nil, udpAddr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer conn.Close()\n\n\t_, err = conn.Write(cmd)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar buf [1024]byte\n\tconn.SetReadDeadline(time.Now().Add(5000 * time.Millisecond))\n\tn, err := conn.Read(buf[0:])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn buf[0:n], nil\n}", "func (g *GraphQL) Do(ctx context.Context, command string, r io.Reader, response interface{}) error {\n\n\t// Want to capture the query being executed for logging.\n\t// The TeeReader will write the query to this buffer when\n\t// the request reads the query for the http call.\n\tvar query bytes.Buffer\n\tr = io.TeeReader(r, &query)\n\n\treq, err := http.NewRequestWithContext(ctx, http.MethodPost, g.url+command, r)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"graphql create request error: %w\", err)\n\t}\n\n\treq.Header.Set(\"Cache-Control\", \"no-cache\")\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\treq.Header.Set(\"Accept\", \"application/json\")\n\tif g.authToken != \"\" {\n\t\treq.Header.Set(g.authHeaderName, g.authToken)\n\t}\n\n\tresp, err := g.client.Do(req)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"graphql request error: %w\", err)\n\t}\n\tdefer resp.Body.Close()\n\n\tdata, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"graphql copy error: %w\", err)\n\t}\n\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn fmt.Errorf(\"graphql op error: status code: %s\", resp.Status)\n\t}\n\n\t// fmt.Println(\"*****graphql*******>\\n\", query.String(), \"\\n\", string(data))\n\n\tresult := struct {\n\t\tData interface{}\n\t\tErrors []struct {\n\t\t\tMessage string\n\t\t}\n\t}{\n\t\tData: response,\n\t}\n\tif err := json.Unmarshal(data, &result); err != nil {\n\t\treturn fmt.Errorf(\"graphql decoding error: %w response: %s\", err, string(data))\n\t}\n\n\tif len(result.Errors) > 0 {\n\t\treturn fmt.Errorf(\"graphql op error:\\nquery:\\n%sgraphql error:\\n%s\", query.String(), result.Errors[0].Message)\n\t}\n\n\treturn nil\n}", "func TestShortQuery(t *testing.T) {\n\tvar qerr *queryError\n\tdoh, _ := NewTransport(testURL, ips, nil, nil, nil)\n\t_, err := doh.Query([]byte{})\n\tif err == nil {\n\t\tt.Error(\"Empty query should fail\")\n\t} else if !errors.As(err, &qerr) {\n\t\tt.Errorf(\"Wrong error type: %v\", err)\n\t} else if qerr.status != BadQuery {\n\t\tt.Errorf(\"Wrong error status: %d\", qerr.status)\n\t}\n\n\t_, err = doh.Query([]byte{1})\n\tif err == nil {\n\t\tt.Error(\"One byte query should fail\")\n\t} else if !errors.As(err, &qerr) {\n\t\tt.Errorf(\"Wrong error type: %v\", err)\n\t} else if qerr.status != BadQuery {\n\t\tt.Errorf(\"Wrong error status: %d\", qerr.status)\n\t}\n}", "func (c *Client) queryNode(miniProtocol multiplex.MiniProtocol, dataItems []cbor.DataItem) (*multiplex.ServiceDataUnit, error) {\n\n\t// Step 1: Create message for the request\n\tsdu := multiplex.NewServiceDataUnit(miniProtocol, multiplex.MessageModeInitiator, dataItems)\n\tif log.IsLevelEnabled(log.DebugLevel) {\n\t\tlog.Debug(\"Multiplexed Request:\")\n\t\tfmt.Println(\">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> R E Q U E S T >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\")\n\t\tfmt.Println(sdu.Debug())\n\t}\n\n\t// Step 2: Send the request\n\tmessageResponse, err := c.socket.Write(sdu.Bytes())\n\tif err != nil && err != io.EOF {\n\t\treturn nil, fmt.Errorf(\"Error writing to socket %w\", err)\n\t}\n\tif log.IsLevelEnabled(log.DebugLevel) && messageResponse != nil {\n\t\tlog.Debug(\"Multiplexed Response:\")\n\t\tfmt.Println(\"<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< R E S P O N S E <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\")\n\t\tfmt.Println(messageResponse.Debug())\n\t}\n\n\treturn messageResponse, nil\n}", "func handleQuery(msg *arbor.ProtocolMessage, out arbor.Writer, store *arbor.Store) {\n\tresult := store.Get(msg.ChatMessage.UUID)\n\tif result == nil {\n\t\tlog.Println(\"Unable to find queried id: \" + msg.ChatMessage.UUID)\n\t\treturn\n\t}\n\tmsg.ChatMessage = result\n\tmsg.Type = arbor.NewMessageType\n\terr := out.Write(msg)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\tlog.Println(\"Query response: \", msg.String())\n}", "func query(domain, server string) (string, error) {\n\tif server == \"whois.arin.net\" {\n\t\tdomain = \"n + \" + domain\n\t}\n\n\tconn, err := net.DialTimeout(\"tcp\", net.JoinHostPort(server, DEFAULT_WHOIS_PORT), time.Second*30)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"whois: connect to whois server failed: %v\", err)\n\t}\n\n\tdefer conn.Close()\n\t_ = conn.SetWriteDeadline(time.Now().Add(time.Second * 30))\n\t_, err = conn.Write([]byte(domain + \"\\r\\n\"))\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"whois: send to whois server failed: %v\", err)\n\t}\n\n\t_ = conn.SetReadDeadline(time.Now().Add(time.Second * 30))\n\tbuffer, err := ioutil.ReadAll(conn)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"whois: read from whois server failed: %v\", err)\n\t}\n\n\treturn string(buffer), nil\n}", "func Query(ctx context.Context, r *room.Room, servers chan<- *Server) error {\n\tc := make(chan *mdns.ServiceEntry)\n\tgo func() {\n\t\tfor entry := range c {\n\t\t\tvar ip net.IP\n\t\t\tif entry.AddrV4 != nil {\n\t\t\t\tip = entry.AddrV4\n\t\t\t} else if entry.AddrV6 != nil {\n\t\t\t\tip = entry.AddrV6\n\t\t\t}\n\n\t\t\ttcpaddr := &net.TCPAddr{\n\t\t\t\tIP: ip,\n\t\t\t\tPort: entry.Port,\n\t\t\t}\n\t\t\taddr := &Server{\n\t\t\t\tRoom: r,\n\t\t\t\tTCPAddr: tcpaddr,\n\t\t\t\tEntry: entry,\n\t\t\t}\n\n\t\t\tservers <- addr\n\t\t}\n\t}()\n\n\tparams := mdns.DefaultParams(r.Service)\n\tparams.WantUnicastResponse = true\n\tparams.Entries = c\n\tparams.Timeout = 10 * time.Second\n\terr := mdns.Query(params)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (s *Server) handleQuery(service *ServiceEntry, request *Request) error {\n\t// Ignore answer for now\n\tif len(request.query.Answer) > 0 {\n\t\treturn nil\n\t}\n\t// Ignore questions with Authorative section for now\n\tif len(request.query.Ns) > 0 {\n\t\treturn nil\n\t}\n\n\t// Handle each question\n\tvar (\n\t\tresp dns.Msg\n\t\terr error\n\t)\n\tif len(request.query.Question) > 0 {\n\t\tfor _, q := range request.query.Question {\n\t\t\tresp = dns.Msg{}\n\t\t\tresp.SetReply(&request.query)\n\t\t\tresp.Answer = []dns.RR{}\n\t\t\tresp.Extra = []dns.RR{}\n\t\t\tif err = s.handleQuestion(q, service, &resp); err != nil {\n\t\t\t\tlog.Printf(\"[ERR] bonjour: failed to handle question %v: %v\",\n\t\t\t\t\tq, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// Check if there is an answer\n\t\t\tif len(resp.Answer) > 0 {\n\t\t\t\tif isUnicastQuestion(q) {\n\t\t\t\t\t// Send unicast\n\t\t\t\t\tif e := s.unicastResponse(&resp, request.from); e != nil {\n\t\t\t\t\t\terr = e\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\t// Send mulicast\n\t\t\t\t\tif e := s.multicastResponse(&resp); e != nil {\n\t\t\t\t\t\terr = e\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn err\n}", "func Query(w http.ResponseWriter, r *http.Request, format string) error {\n\n\treturn nil\n}", "func (t *HeroesServiceChaincode) queryone(stub shim.ChaincodeStubInterface, args []string) pb.Response {\n\n\tif len(args) < 2 {\n\t\treturn shim.Error(\"The number of arguments is insufficient.\")\n\t}\n\n\t// GetState retrieves the data from ledger using the Key\n\n\tvegAsBytes, _ := stub.GetState(args[1])\n\n\t// Transaction Response\n\n\treturn shim.Success(vegAsBytes)\n\n}", "func queryHandler(w http.ResponseWriter, r *http.Request) {\r\n\r\n\tif r.Header.Get(\"Content-Type\") != \"application/json\" {\r\n\t\tw.WriteHeader(http.StatusBadRequest)\r\n\t\treturn\r\n\t}\r\n\t//To allocate slice for request body\r\n\tlength, err := strconv.Atoi(r.Header.Get(\"Content-Length\"))\r\n\tif err != nil {\r\n\t\tw.WriteHeader(http.StatusInternalServerError)\r\n\t\treturn\r\n\t}\r\n\r\n\t//Read body data to parse json\r\n\tbody := make([]byte, length)\r\n\tlength, err = r.Body.Read(body)\r\n\tif err != nil && err != io.EOF {\r\n\t\tw.WriteHeader(http.StatusInternalServerError)\r\n\t\treturn\r\n\t}\r\n\r\n\t//parse json\r\n\tvar jsonBody map[string]interface{}\r\n\terr = json.Unmarshal(body[:length], &jsonBody)\r\n\tif err != nil {\r\n\t\tw.WriteHeader(http.StatusInternalServerError)\r\n\t\treturn\r\n\t}\r\n\r\n\tvar time_from,time_to time.Time\r\n\tif time_from, err = getTimeFromReq(jsonBody, \"from\"); err != nil{\r\n\t\tfmt.Printf(\"ERR: %v\\n\", err)\r\n\t\tw.WriteHeader(http.StatusBadRequest)\t\t\r\n\t\treturn \r\n\t}\r\n\r\n\tif time_to, err = getTimeFromReq(jsonBody, \"to\"); err != nil{\r\n\t\tfmt.Printf(\"ERR: %v\\n\", err)\r\n\t\tw.WriteHeader(http.StatusBadRequest)\t\t\r\n\t\treturn \r\n\t}\r\n\r\n\tvar targets []string\r\n\tif targets, err = getTargetFromReq(jsonBody); err != nil {\r\n\t\tfmt.Printf(\"ERR: %v\\n\", err)\r\n\t}\r\n\r\n\tjsonOut := getRedisVal(*redisHost,\r\n\t\ttargets,\r\n\t\tstrconv.FormatInt(time_from.Unix(), 10),\r\n\t\tstrconv.FormatInt(time_to.Unix(), 10),\r\n\t\tint(jsonBody[\"maxDataPoints\"].(float64)))\r\n\r\n\tw.Header().Set(\"Content-Type\", \"application/json\")\r\n\tfmt.Fprintf(w, jsonOut)\r\n\treturn\r\n}", "func (c InfluxDBClient) queryDB(cmd string) (*influx.Response, error) {\n\tdefer c.influxHTTPClient.Close()\n\n\tq := influx.Query{\n\t\tCommand: cmd,\n\t\tDatabase: databaseName,\n\t}\n\n\tresponse, err := c.influxHTTPClient.Query(q)\n\tif err != nil {\n\t\tlog.Print(\"Error while querying InfluxDB\", err)\n\t\treturn nil, err\n\t}\n\tif response.Error() != nil {\n\t\tlog.Print(\"Error in response from InfluxDB\", err)\n\t\treturn nil, response.Error()\n\t}\n\n\treturn response, nil\n}", "func (c *client) do(command string, args []interface{}) (interface{}, error) {\n\tconn, ok := c.timedBorrow()\n\tif !ok {\n\t\treturn nil, ErrNoConnection\n\t}\n\n\tresult, err := conn.Do(command, args...)\n\tc.release(conn, err)\n\treturn result, err\n}", "func (db *DB) Exec(query jdh.Query, table jdh.Table, param interface{}) (string, error) {\n\tswitch query {\n\tcase jdh.Add:\n\t\tif param == nil {\n\t\t\treturn \"\", errors.New(\"empty element\")\n\t\t}\n\t\tconn, err := net.Dial(\"tcp\", db.port)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tdefer conn.Close()\n\t\tenc := json.NewEncoder(conn)\n\t\treq := &Request{\n\t\t\tQuery: jdh.Add,\n\t\t\tTable: table,\n\t\t}\n\t\tenc.Encode(req)\n\t\tenc.Encode(param)\n\t\tdec := json.NewDecoder(conn)\n\t\tans := &Answer{}\n\t\tif err := dec.Decode(ans); err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tif id, err := ans.GetMessage(); err != nil {\n\t\t\treturn \"\", err\n\t\t} else {\n\t\t\treturn id, nil\n\t\t}\n\t\treturn \"\", nil\n\tcase jdh.Commit:\n\t\treturn \"\", db.simple(query)\n\tcase jdh.Delete:\n\t\tif param == nil {\n\t\t\treturn \"\", errors.New(\"empty argument list\")\n\t\t}\n\t\tkvs := param.(*jdh.Values)\n\t\tif len(kvs.KV) == 0 {\n\t\t\treturn \"\", errors.New(\"empty argument list\")\n\t\t}\n\t\tconn, err := net.Dial(\"tcp\", db.port)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tdefer conn.Close()\n\t\tenc := json.NewEncoder(conn)\n\t\treq := &Request{\n\t\t\tQuery: jdh.Delete,\n\t\t\tTable: table,\n\t\t\tKvs: kvs.KV,\n\t\t}\n\t\tenc.Encode(req)\n\t\tdec := json.NewDecoder(conn)\n\t\tans := &Answer{}\n\t\tif err := dec.Decode(ans); err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tif _, err := ans.GetMessage(); err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\treturn \"\", nil\n\tcase jdh.Set:\n\t\tif param == nil {\n\t\t\treturn \"\", errors.New(\"empty argument list\")\n\t\t}\n\t\tkvs := param.(*jdh.Values)\n\t\tif len(kvs.KV) == 0 {\n\t\t\treturn \"\", errors.New(\"empty argument list\")\n\t\t}\n\t\tconn, err := net.Dial(\"tcp\", db.port)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tdefer conn.Close()\n\t\tenc := json.NewEncoder(conn)\n\t\treq := &Request{\n\t\t\tQuery: jdh.Set,\n\t\t\tTable: table,\n\t\t\tKvs: kvs.KV,\n\t\t}\n\t\tenc.Encode(req)\n\t\tdec := json.NewDecoder(conn)\n\t\tans := &Answer{}\n\t\tif err := dec.Decode(ans); err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tif _, err := ans.GetMessage(); err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\treturn \"\", nil\n\t}\n\treturn \"\", errors.New(\"invalid query\")\n}", "func (client *Client) QueryDevice(request *QueryDeviceRequest) (response *QueryDeviceResponse, err error) {\n\tresponse = CreateQueryDeviceResponse()\n\terr = client.DoAction(request, response)\n\treturn\n}", "func (t *targetrunner) daemonhdlr(w http.ResponseWriter, r *http.Request) {\n\tswitch r.Method {\n\tcase http.MethodGet:\n\t\tt.httpdaeget(w, r)\n\tcase http.MethodPut:\n\t\tt.httpdaeput(w, r)\n\tdefault:\n\t\tinvalhdlr(w, r)\n\t}\n\tglog.Flush()\n}", "func DAHDIDNDon(client Client, actionID, channel string) (Response, error) {\n\treturn send(client, \"DAHDIDNDon\", actionID, map[string]string{\n\t\t\"DAHDIChannel\": channel,\n\t})\n}", "func DAHDIDNDon(ctx context.Context, client Client, actionID, channel string) (Response, error) {\n\treturn send(ctx, client, \"DAHDIDNDon\", actionID, map[string]string{\n\t\t\"DAHDIChannel\": channel,\n\t})\n}", "func TestAccept(t *testing.T) {\n\tdoh := newFakeTransport()\n\tclient, server := makePair()\n\n\t// Start the forwarder running.\n\tgo Accept(doh, server)\n\n\tlbuf := make([]byte, 2)\n\t// Send Query\n\tqueryData := simpleQueryBytes\n\tbinary.BigEndian.PutUint16(lbuf, uint16(len(queryData)))\n\tn, err := client.Write(lbuf)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif n != 2 {\n\t\tt.Error(\"Length write problem\")\n\t}\n\tn, err = client.Write(queryData)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif n != len(queryData) {\n\t\tt.Error(\"Query write problem\")\n\t}\n\n\t// Read query\n\tqueryRead := <-doh.query\n\tif !bytes.Equal(queryRead, queryData) {\n\t\tt.Error(\"Query mismatch\")\n\t}\n\n\t// Send fake response\n\tresponseData := []byte{1, 2, 8, 9, 10}\n\tdoh.response <- responseData\n\n\t// Get Response\n\tn, err = client.Read(lbuf)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif n != 2 {\n\t\tt.Error(\"Length read problem\")\n\t}\n\trlen := binary.BigEndian.Uint16(lbuf)\n\tresp := make([]byte, int(rlen))\n\tn, err = client.Read(resp)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif !bytes.Equal(responseData, resp) {\n\t\tt.Error(\"Response mismatch\")\n\t}\n\n\tclient.Close()\n}", "func (c *DentistClient) Query() *DentistQuery {\n\treturn &DentistQuery{config: c.config}\n}", "func (b *bot) query(query string, args ...interface{}) (*sql.Rows, error) {\n\treturn b.DB.client.Query(query, args...)\n}", "func queryHandler(w http.ResponseWriter, r *http.Request) {\n\tkeys := readKeys(r.Body)\n\tservs := servers()\n\tnumServers := len(servs)\n\tserverKeys := groupKeysByServer(numServers, keys)\n\tresult := make([]QueryResponse, 0)\n\tfor idx, keys := range serverKeys {\n\t\tif len(keys) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tencodedList, err := json.Marshal(keys)\n\t\tif err != nil {\n\t\t\tlog.Println(\"Error marshalling list of keys:\", err)\n\t\t\tbreak\n\t\t}\n\t\tels := fetchQueryRespFromServer(servs[idx], encodedList)\n\t\tresult = append(result, decodeQueryResponse(els)...)\n\t}\n\tif len(keys) == len(result) {\n\t\tw.WriteHeader(http.StatusOK)\n\t} else {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t}\n\tjson.NewEncoder(w).Encode(result)\n}", "func (act *QueryAction) Do() error {\n\t// business sharding db.\n\tsd, err := act.smgr.ShardingDB(act.req.BizId)\n\tif err != nil {\n\t\treturn act.Err(pbcommon.ErrCode_E_DM_ERR_DBSHARDING, err.Error())\n\t}\n\tact.sd = sd\n\n\t// query variable.\n\tif errCode, errMsg := act.queryVariable(); errCode != pbcommon.ErrCode_E_OK {\n\t\treturn act.Err(errCode, errMsg)\n\t}\n\treturn nil\n}", "func (db *DB) Query(sql string) *ResponseMessage {\n\n\turl := db.url\n\tmethod := \"POST\"\n\n\tjsonTxt := sqlStatementJSON(sql)\n\t//payload := strings.NewReader(\"{\\n \\\"statement\\\": \\\"SELECT * FROM master_erp WHERE type='login_session'\\\"\\n}\\n\\n\")\n\n\tpayload := strings.NewReader(jsonTxt)\n\n\tclient := &http.Client{}\n\treq, err := http.NewRequest(method, url, payload)\n\n\tif err != nil {\n\t\tfmt.Println(\"ERROR @ Query:\", err.Error())\n\t}\n\treq.Header.Add(\"Content-Type\", \"application/json\")\n\treq.Header.Add(\"Authorization\", db.authorization())\n\n\tres, err := client.Do(req)\n\tbody, err := ioutil.ReadAll(res.Body)\n\tdefer res.Body.Close()\n\n\t//local variable as a pointer\n\tvar resPonse ResponseMessage\n\n\tjson.Unmarshal(body, &resPonse)\n\n\treturn &resPonse\n}", "func (cc *Client) Do(resource Requester) (*Response, error) {\n\treq, err := resource.Request(cc.serverURL)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"sending ksql request: %w\", err)\n\t}\n\tctx, cancel := context.WithCancel(cc.ctx)\n\ttrace := cc.HTTPTrace()\n\tif trace != nil && trace.RequestPrepared != nil {\n\t\ttrace.RequestPrepared(req)\n\t}\n\tresp, err := cc.httpClient.Do(cc.WithClientConfig(ctx, req))\n\tif trace != nil && trace.ResponseDelivered != nil {\n\t\ttrace.ResponseDelivered(resp, err)\n\t}\n\tif err != nil {\n\t\t// Avoiding a lost cancel.\n\t\treturn &Response{cancelFunc: cancel}, fmt.Errorf(\"sending ksql request: %w\", err)\n\t}\n\treturn &Response{\n\t\tResponse: resp,\n\t\tContext: ctx,\n\t\tcancelFunc: cancel,\n\t}, nil\n}", "func HTTPQuery(request *http.Request) *http.Response {\n\tclient := &http.Client{\n\t\tTransport: &http.Transport {\n\t\t\t// Support for 'HTTP[S]_PROXY'/'NO_PROXY' envvars\n\t\t\tProxy: http.ProxyFromEnvironment,\n\t\t\t// Support main CLI's 'core.ssl_verify' setting\n\t\t\tTLSClientConfig: buildTLSConfig(),\n\t\t},\n\t}\n\tvar err interface{}\n\tresponse, err := client.Do(request)\n\tswitch err.(type) {\n\tcase *url.Error:\n\t\t// extract wrapped error\n\t\terr = err.(*url.Error).Err\n\t}\n\tif err != nil {\n\t\tswitch err.(type) {\n\t\tcase x509.UnknownAuthorityError:\n\t\t\t// custom suggestions for a certificate error:\n\t\t\tPrintMessage(\"HTTP %s Query for %s failed: %s\", request.Method, request.URL, err)\n\t\t\tPrintMessage(\"- Is the cluster CA certificate configured correctly? Check 'dcos config show core.ssl_verify'.\")\n\t\t\tPrintMessageAndExit(\"- To ignore the unvalidated certificate and force your command (INSECURE), use --force-insecure\")\n\t\tdefault:\n\t\t\tPrintMessage(\"HTTP %s Query for %s failed: %s\", request.Method, request.URL, err)\n\t\t\tPrintMessage(\"- Is 'core.dcos_url' set correctly? Check 'dcos config show core.dcos_url'.\")\n\t\t\tPrintMessage(\"- Is 'core.dcos_acs_token' set correctly? Run 'dcos auth login' to log in.\")\n\t\t\tPrintMessageAndExit(\"- Are any needed proxy settings set correctly via HTTP_PROXY/HTTPS_PROXY/NO_PROXY? Check with your network administrator.\")\n\t\t}\n\t}\n\treturn response\n}", "func ServeDeviceQuery(query DeviceQueryResponse) TestServerPayload {\n\treturn TestServerPayload{DeviceQuery: &query}\n}", "func (c *Client) Query(\n\tmethod, path string,\n\tdata interface{},\n\teTag string,\n) (*Response, string, error) {\n\t// Generate the URL\n\turl := fmt.Sprintf(\"%s%s\", c.httpHost, path)\n\treturn c.rawQuery(method, url, data, eTag)\n}", "func (c *MulticastController) sendJoinQuery(grpIP net.IP, members []net.IP) {\n\t// change the unique identifier for the join query packet\n\tc.packetSeqNo++\n\tjq := joinQuery{\n\t\tseqNum: c.packetSeqNo,\n\t\tttl: ODMRPDefaultTTL,\n\t\tsrcIP: c.ip,\n\t\tprevHop: c.mac,\n\t\tgrpIP: grpIP,\n\t\tdests: members,\n\t}\n\n\t// insert in cache in case it use broadcast\n\tcached := &cacheEntry{seqNo: jq.seqNum, grpIP: jq.grpIP, prevHop: jq.prevHop, cost: ODMRPDefaultTTL - jq.ttl}\n\tc.cacheTable.set(jq.srcIP, cached)\n\n\tencryptedJQ := c.msec.Encrypt(jq.marshalBinary())\n\tc.queryFlooder.Flood(encryptedJQ)\n\t// log.Println(\"sent join query to\", grpIP)\n\n\t// To keep table up to date consistantly send join query and recieve join replies to fill tables\n\t// When you wants to stop call stopSending() func\n\tc.refJoinQuery = c.timers.Add(JQRefreshTime, func() {\n\t\tc.sendJoinQuery(grpIP, members)\n\t})\n}", "func (c *HTTPClient) Query(cmd *Command) (Response, error) {\n\tif err := cmd.Check(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn c.exec(cmd.Name(), cmd.Params(), cmd.Body())\n}", "func (a *ArangoDb) DoQuery(ctx context.Context, query string) error {\n\n\tconst op = \"db.DoUpdateQuery\"\n\n\t_, err := (*a.Db).Query(ctx, query, nil)\n\tif err != nil {\n\t\t// handle error\n\t\treturn &e.Error{Code: e.EINTERNAL, Op: op, Err: err}\n\t}\n\n\treturn nil\n}", "func (d *DaemonClient) sendDataToDaemonServer(data []byte) error {\n\tbaseCmd := command.BaseCommand{}\n\terr := json.Unmarshal(data, &baseCmd)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Failed to unmarshal command\")\n\t}\n\tconn, err := net.DialTimeout(\"tcp\", fmt.Sprintf(\"%s:%d\", \"127.0.0.1\", d.daemonServerListenPort), time.Second*30)\n\tif err != nil {\n\t\treturn errors.Wrap(err, fmt.Sprintf(\"%s failed to dial to daemon\", baseCmd.CommandType))\n\t}\n\tdefer conn.Close()\n\tif err = conn.SetDeadline(time.Now().Add(time.Second * 30)); err != nil {\n\t\tlog.Logf(\"set connection deadline, err: %v\", err)\n\t}\n\tif err = conn.SetWriteDeadline(time.Now().Add(time.Second * 30)); err != nil {\n\t\tlog.Logf(\"set connection write deadline, err: %v\", err)\n\t}\n\t_, err = conn.Write(data)\n\treturn errors.Wrap(err, fmt.Sprintf(\"%s failed to write to daemon\", baseCmd.CommandType))\n}", "func doPing(w http.ResponseWriter, r *http.Request) {\n\thost, _, err := net.SplitHostPort(r.RemoteAddr)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\tfmt.Fprint(w, \"PONG\\n\"+host+\"\\n\")\n}", "func (d *dht) request(coords types.SwitchPorts, pk types.PublicKey) (types.PublicKey, []types.DHTNode, error) {\n\t// Create a new request context.\n\tdhtReq, requestID := d.newRequest()\n\tdefer dhtReq.cancel()\n\tdefer close(dhtReq.ch)\n\tdefer d.requests.Delete(dhtReq.id)\n\n\t// Build the request query.\n\treq := types.DHTQueryRequest{}\n\tcopy(req.PublicKey[:], pk[:])\n\tcopy(req.RequestID[:], requestID[:])\n\n\t// Marshal it into binary so that we can send the request out\n\t// to the network.\n\tvar buffer [MaxPayloadSize]byte\n\tn, err := req.MarshalBinary(buffer[:])\n\tif err != nil {\n\t\treturn types.PublicKey{}, nil, fmt.Errorf(\"res.MarshalBinary: %w\", err)\n\t}\n\n\t// Send the request frame to the switch. The switch will then\n\t// forward it onto the destination as appropriate.\n\td.r.send <- types.Frame{\n\t\tSource: d.r.Coords(),\n\t\tDestination: coords,\n\t\tType: types.TypeDHTRequest,\n\t\tPayload: buffer[:n],\n\t}\n\n\t// Wait for a response that matches our search ID, or for the\n\t// timeout to kick in instead.\n\tselect {\n\tcase res := <-dhtReq.ch:\n\t\treturn res.PublicKey, res.Results, nil\n\tcase <-dhtReq.ctx.Done():\n\t\treturn types.PublicKey{}, nil, fmt.Errorf(\"request timed out\")\n\t}\n}", "func (h *Handler) QueryEventHandle(name string, eventHandler EventHandler) {\n\th.EventHandle(\"query\", name, eventHandler)\n}", "func Query(query string) (*client.Response, error) {\n\tc, err := NewHTTPClient()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer c.Close()\n\n\tlog.Debugf(\"query=\\\"%s\\\"\", query)\n\tq := client.NewQuery(query, Settings.Database, \"s\")\n\treturn c.Query(q)\n}", "func (h *Handler) serveDBUser(w http.ResponseWriter, r *http.Request) {}", "func Do(q *Question) (*Answer, time.Duration, error) {\n\treturn DefaultClient.Do(q)\n}", "func (d *Client) DoStreamQuery(ctx context.Context, q *query.Query) (*query.ResultStream, error) {\n\tresults, err := d.Transport.DoStreamQuery(ctx, q)\n\tif err != nil {\n\t\treturn nil, err\n\t} else {\n\t\treturn results, err\n\t}\n}", "func (c *QueryClient) Query(nrqlQuery string, response interface{}) (err error) {\n\tif response == nil {\n\t\treturn errors.New(\"go-insights: Invalid query response can not be nil\")\n\t}\n\n\terr = c.queryRequest(nrqlQuery, response)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (c *client) query(params *QueryParam) (dnsmessage.ResourceHeader, net.Addr, error) {\n\tctx, _ := context.WithCancel(context.Background())\n\treturn c.conn.Query(ctx, params)\n}", "func runCommand(query string) ([]byte, error) {\n\tpath, err := os.Getwd()\n\tif err != nil {\n\t\treturn []byte{}, err\n\t}\n\tcmd := fmt.Sprintf(\"%s/dasgoclient\", path)\n\tout, err := exec.Command(cmd, \"-query\", query, \"-format\", \"json\").Output()\n\treturn out, err\n}", "func ExecuteQuery(c *clientv1.Client, query string) (*clientv1.Response, error) {\n\tlog.Infof(\"action=ExecuteQuery q=%s client=%+v\", query, c)\n\tq := clientv1.Query{\n\t\tCommand: query,\n\t\tDatabase: Settings.Database,\n\t\tChunked: true,\n\t}\n\tresponse, err := c.Query(q)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn response, nil\n}", "func runSparqlQuery(orgID, query, acceptHeader string) (body []byte, statusCode int, contentType string, err error) {\n\treq, err := http.NewRequest(\"POST\", c.Config.GetSparqlEndpoint(orgID, \"\"), http.NoBody)\n\tif err != nil {\n\t\tlog.Printf(\"%s\", fmt.Errorf(\"%s; %w \", ErrInvalidSparqlRequest, err))\n\t\treturn\n\t}\n\treq.Header.Set(\"Accept\", acceptHeader)\n\tq := req.URL.Query()\n\tq.Add(\"query\", query)\n\treq.URL.RawQuery = q.Encode()\n\n\treturn makeSparqlRequest(req)\n}", "func (c *Client) QueryServers() {\n\t// create the message. Only header, no payload when querying server\n\tvar message Message\n\n\tmessage.Objname = c.params.Objname\n\tmessage.Opnum = c.Opnum\n\tmessage.Phase = c.Phase\n\tmessage.Objparams = c.params\n\tmessage.TagValue_var = TagValue{Tag_var: Tag{Client_id: \"\", Version_num: 0}, Value: make([]byte, 0)}\n\tmessage.Sender = c.client_name\n\n\tmessage_to_send := CreateGobFromMessage(message)\n\n\tfor _, server := range c.params.Servers_names {\n\t\tc.connection[server].SendBytes(message_to_send.Bytes(), NON_BLOCKING)\n\t\t//serverMessageCountUp(server)\n\t}\n\n}", "func (a API) DeviceQuery(ctx context.Context, hardwareAddress string, variables ...string) (DeviceQueryResponse, error) {\n\tdeviceResponse := DeviceQueryResponse{}\n\n\tvar toquery []string\n\tfilter := a.Config.GetFilter()\n\tfor _, v := range variables {\n\t\tif filter.Exclude(v) {\n\t\t\tlog.Printf(\"Excluding %v due to filter: %v\", v, filter)\n\t\t\tcontinue\n\t\t}\n\n\t\ttoquery = append(toquery, v)\n\t}\n\n\tif len(toquery) < 1 {\n\t\treturn deviceResponse, fmt.Errorf(\"Post filter (%v) no variables were available: %v\", filter, variables)\n\t}\n\n\terr := a.post(ctx, NewDeviceQueryCommand(hardwareAddress, toquery...), &deviceResponse)\n\treturn deviceResponse, err\n}", "func (c *DNSBLResponseClient) QueryQuery(dr *DNSBLResponse) *DNSBLQueryQuery {\n\tquery := &DNSBLQueryQuery{config: c.config}\n\tquery.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {\n\t\tid := dr.ID\n\t\tstep := sqlgraph.NewStep(\n\t\t\tsqlgraph.From(dnsblresponse.Table, dnsblresponse.FieldID, id),\n\t\t\tsqlgraph.To(dnsblquery.Table, dnsblquery.FieldID),\n\t\t\tsqlgraph.Edge(sqlgraph.M2O, true, dnsblresponse.QueryTable, dnsblresponse.QueryColumn),\n\t\t)\n\t\tfromV = sqlgraph.Neighbors(dr.driver.Dialect(), step)\n\t\treturn fromV, nil\n\t}\n\treturn query\n}", "func (h *ProxyHandler) HandleQuery(query string) (*go_mysql.Result, error) {\n\tfmt.Println(\"Exec Q: \", query)\n\treturn h.remoteConn.Execute(query)\n}", "func main() {\n\tcfg := config.Load()\n\tcfg.ServiceName = config.String(\"querier\")\n\n\tflusher := hypertrace.Init(cfg)\n\tdefer flusher()\n\n\tvar (\n\t\tdriver driver.Driver\n\t\tdb *sql.DB\n\t)\n\n\t// Explicitly wrap the MySQLDriver driver with hypersql.\n\tdriver = hypersql.Wrap(&mysql.MySQLDriver{})\n\n\t// Register our hypersql wrapper as a database driver.\n\tsql.Register(\"ht-mysql\", driver)\n\n\tdb, err := sql.Open(\"ht-mysql\", \"root:root@tcp(localhost)/\")\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to connect the DB: %v\", err)\n\t}\n\n\tconst dbPingRetries = 5\n\tfor i := 0; i <= dbPingRetries; i++ {\n\t\tif err := db.Ping(); err != nil && i == dbPingRetries {\n\t\t\tlog.Fatalf(\"failed to ping the DB: %v\", err)\n\t\t}\n\t\ttime.Sleep(time.Second)\n\t}\n\n\trows, err := db.QueryContext(context.Background(), \"SELECT 'Hi there :)'\")\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to retrieve message: %v\", err)\n\t}\n\n\tfor rows.Next() {\n\t\tm := new(string)\n\t\trows.Scan(m)\n\t\tfmt.Println(*m)\n\t}\n}", "func (h *DNSHandler) DoUDP(w dns.ResponseWriter, req *dns.Msg) {\n\th.do(\"udp\", w, req)\n}", "func (c *queryClient) Send(body interface{}) (io.ReadCloser, error) {\n\n\tjsonBody, err := json.Marshal(body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar reqBody = new(bytes.Buffer)\n\t_, err = reqBody.Write(jsonBody)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp, err := c.client.Post(c.url, \"application/json\", reqBody)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif resp.StatusCode == 200 {\n\t\treturn resp.Body, nil\n\t}\n\n\t// decode and return error if the status isn't 200\n\trespBody, err := io.ReadAll(resp.Body)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn nil, errors.New(string(respBody))\n}", "func handleDNSRequest(w dns.ResponseWriter, r *dns.Msg) {\n\tdefer w.Close()\n\n\tm := new(dns.Msg)\n\tm.SetReply(r)\n\tm.Compress = false\n\n\tswitch r.Opcode {\n\tcase dns.OpcodeQuery:\n\t\tparseQuery(m)\n\t}\n\n\tw.WriteMsg(m)\n}", "func (instance *NDiscovery) sendCommand(conn *lygo_n_net.NConn, command string, params map[string]interface{}) *lygo_n_commons.Response {\n\treturn conn.Send(command, params)\n}", "func (s *server) Query(c context.Context, req *logdog.QueryRequest) (*logdog.QueryResponse, error) {\n\t// Non-admin users may not request purged results.\n\tcanSeePurged := true\n\tswitch yes, err := coordinator.CheckAdminUser(c); {\n\tcase err != nil:\n\t\treturn nil, status.Error(codes.Internal, \"internal server error\")\n\tcase !yes:\n\t\tcanSeePurged = false\n\t\tif req.Purged == logdog.QueryRequest_YES {\n\t\t\tlog.Errorf(c, \"Non-superuser requested to see purged logs. Denying.\")\n\t\t\treturn nil, status.Errorf(codes.InvalidArgument, \"non-admin user cannot request purged log streams\")\n\t\t}\n\t}\n\n\t// Scale the maximum number of results based on the number of queries in this\n\t// request. If the user specified a maximum result count of zero, use the\n\t// default maximum.\n\t//\n\t// If this scaling results in a limit that is <1 per request, we will return\n\t// back a BadRequest error.\n\tlimit := s.resultLimit\n\tif limit == 0 {\n\t\tlimit = queryResultLimit\n\t}\n\n\t// Execute our queries in parallel.\n\tresp := logdog.QueryResponse{}\n\te := &queryRunner{\n\t\tctx: log.SetField(c, \"path\", req.Path),\n\t\treq: req,\n\t\tcanSeePurged: canSeePurged,\n\t\tlimit: limit,\n\t}\n\n\tstartTime := clock.Now(c)\n\tif err := e.runQuery(&resp); err != nil {\n\t\t// Transient errors would be handled at the \"execute\" level, so these are\n\t\t// specific failure errors. We must escalate individual errors to the user.\n\t\t// We will choose the most severe of the resulting errors.\n\t\tlog.WithError(err).Errorf(c, \"Failed to execute query.\")\n\t\treturn nil, err\n\t}\n\tlog.Infof(c, \"Query took: %s\", clock.Now(c).Sub(startTime))\n\treturn &resp, nil\n}", "func (c *Client) Query(ctx context.Context, q interface{}, variables map[string]interface{}) error {\n\treturn c.gql.Query(ctx, q, variables)\n}", "func (s *Server) sqlHandler(w http.ResponseWriter, req *http.Request) {\n\tstate := s.cluster.State()\n\tif state != \"primary\" {\n\t\thttp.Error(w, \"Only the primary can service queries, but this is a \"+state, http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tquery, err := ioutil.ReadAll(req.Body)\n\tif err != nil {\n\t\tlog.Printf(\"Couldn't read body: %s\", err)\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t}\n\n\tlog.Debugf(\"[%s] Received query: %#v\", s.cluster.State(), string(query))\n\tresp, err := s.execute(query)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t}\n\n\tr := &Replicate{\n\t\tSelf: s.cluster.self,\n\t\tQuery: query,\n\t}\n\tfor _, member := range s.cluster.members {\n\t\tb := util.JSONEncode(r)\n\t\t_, err := s.client.SafePost(member.ConnectionString, \"/replicate\", b)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Couldn't replicate query to %v: %s\", member, err)\n\t\t}\n\t}\n\n\tlog.Debugf(\"[%s] Returning response to %#v: %#v\", s.cluster.State(), string(query), string(resp))\n\tw.Write(resp)\n}", "func (s Session) SendDH(keyInfo *api.KeyExchange) error {\n\trandomPadding, err := getRandomPading(128)\n\tif err != nil {\n\t\treturn err\n\t}\n\tkeyInfo.RandomPadding = randomPadding\n\tmarshaledKey, err := proto.Marshal(keyInfo)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error marshaling key %v\", err)\n\t}\n\tmarshLen := uint32(len(marshaledKey))\n\tlenBuf := make([]byte, 4)\n\tbinary.LittleEndian.PutUint32(lenBuf, 64+marshLen)\n\tfullKeyMsg := make([]byte, 4+64+marshLen)\n\tsigned := signMessage(hashMessage(marshaledKey))\n\tcopy(fullKeyMsg, lenBuf)\n\tcopy(fullKeyMsg[4:], signed)\n\tcopy(fullKeyMsg[4+64:], marshaledKey)\n\n\tif _, err := s.connection.Write(fullKeyMsg); err != nil {\n\t\treturn fmt.Errorf(\"Error sending dh key %v\", err)\n\t}\n\treturn nil\n}", "func (p *MyServiceThreadsafeClient) Query(s *module0.MyStruct, i *includes1.Included) (err error) {\n p.Mu.Lock()\n defer p.Mu.Unlock()\n args := MyServiceQueryArgs{\n S : s,\n I : i,\n }\n err = p.CC.SendMsg(\"query\", &args, thrift.CALL)\n if err != nil { return }\n return p.recvQuery()\n}", "func (r *Results) send(host string) error {\n\tvar payload postBody\n\tpayload.Body = *r\n\tbody, err := json.Marshal(r)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\turl := fmt.Sprintf(\"%s/event\", host)\n\terr = post(body, url)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (s *Server) sqlHandler(w http.ResponseWriter, req *http.Request) {\n if(s.block) {\n time.Sleep(1000000* time.Second)\n }\n\n\tquery, err := ioutil.ReadAll(req.Body)\n\tif err != nil {\n\t\tlog.Printf(\"Couldn't read body: %s\", err)\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t}\n\n\tif s.leader != s.listen {\n\n\t\tcs, errLeader := transport.Encode(s.leader)\n\t\t\n\t\tif errLeader != nil {\n\t\t\thttp.Error(w, \"Only the primary can service queries, but this is a secondary\", http.StatusBadRequest)\t\n\t\t\tlog.Printf(\"Leader ain't present?: %s\", errLeader)\n\t\t\treturn\n\t\t}\n\n\t\t//_, errLeaderHealthCheck := s.client.SafeGet(cs, \"/healthcheck\") \n\n //if errLeaderHealthCheck != nil {\n // http.Error(w, \"Primary is down\", http.StatusBadRequest)\t\n // return\n //}\n\n\t\tbody, errLResp := s.client.SafePost(cs, \"/sql\", bytes.NewBufferString(string(query)))\n\t\tif errLResp != nil {\n s.block = true\n http.Error(w, \"Can't forward request to primary, gotta block now\", http.StatusBadRequest)\t\n return \n\t//\t log.Printf(\"Didn't get reply from leader: %s\", errLResp)\n\t\t}\n\n formatted := fmt.Sprintf(\"%s\", body)\n resp := []byte(formatted)\n\n\t\tw.Write(resp)\n\t\treturn\n\n\t} else {\n\n\t\tlog.Debugf(\"Primary Received query: %#v\", string(query))\n\t\tresp, err := s.execute(query)\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\t}\n\n\t\tw.Write(resp)\n\t\treturn\n\t}\n}" ]
[ "0.6129731", "0.60646135", "0.60184616", "0.6008058", "0.5970393", "0.5874811", "0.5786087", "0.5687894", "0.56664693", "0.5643446", "0.5627055", "0.5622696", "0.562168", "0.560722", "0.5519367", "0.5510898", "0.5503905", "0.5501435", "0.54969597", "0.54964566", "0.5490829", "0.54814583", "0.5480934", "0.5464118", "0.5444667", "0.5435051", "0.54252136", "0.54125416", "0.54076236", "0.53780884", "0.5370704", "0.53536826", "0.53495353", "0.53361505", "0.5331687", "0.5283083", "0.5276815", "0.52667075", "0.5261457", "0.52553093", "0.52079725", "0.5200533", "0.5190562", "0.51838994", "0.51696366", "0.5165633", "0.51532453", "0.51417273", "0.5129028", "0.51256466", "0.512011", "0.5114434", "0.5097646", "0.5077658", "0.50766027", "0.5056672", "0.505502", "0.5044682", "0.50436425", "0.5037039", "0.503321", "0.5014294", "0.50129", "0.50119585", "0.5005497", "0.5003186", "0.49988815", "0.49969843", "0.49791366", "0.49669486", "0.4955745", "0.49514169", "0.49508998", "0.49498418", "0.4944902", "0.4941832", "0.49311256", "0.49217153", "0.4908254", "0.49054524", "0.49051982", "0.48988897", "0.48903152", "0.4873847", "0.48675552", "0.4863292", "0.4859833", "0.48497713", "0.48389304", "0.48368225", "0.48292363", "0.48251298", "0.48168442", "0.4811108", "0.48079622", "0.48055506", "0.48016658", "0.48002937", "0.4792943", "0.47914836", "0.47896582" ]
0.0
-1
Check that a DNS query is converted correctly into an HTTP query.
func TestRequest(t *testing.T) { doh, _ := NewTransport(testURL, ips, nil, nil, nil) transport := doh.(*transport) rt := makeTestRoundTripper() transport.client.Transport = rt go doh.Query(simpleQueryBytes) req := <-rt.req if req.URL.String() != testURL { t.Errorf("URL mismatch: %s != %s", req.URL.String(), testURL) } reqBody, err := ioutil.ReadAll(req.Body) if err != nil { t.Error(err) } if len(reqBody)%PaddingBlockSize != 0 { t.Errorf("reqBody has unexpected length: %d", len(reqBody)) } // Parse reqBody into a Message. newQuery := mustUnpack(reqBody) // Ensure the converted request has an ID of zero. if newQuery.Header.ID != 0 { t.Errorf("Unexpected request header id: %v", newQuery.Header.ID) } // Check that all fields except for Header.ID and Additionals // are the same as the original. Additionals may differ if // padding was added. if !queriesMostlyEqual(simpleQuery, *newQuery) { t.Errorf("Unexpected query body:\n\t%v\nExpected:\n\t%v", newQuery, simpleQuery) } contentType := req.Header.Get("Content-Type") if contentType != "application/dns-message" { t.Errorf("Wrong content type: %s", contentType) } accept := req.Header.Get("Accept") if accept != "application/dns-message" { t.Errorf("Wrong Accept header: %s", accept) } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func TestShortQuery(t *testing.T) {\n\tvar qerr *queryError\n\tdoh, _ := NewTransport(testURL, ips, nil, nil, nil)\n\t_, err := doh.Query([]byte{})\n\tif err == nil {\n\t\tt.Error(\"Empty query should fail\")\n\t} else if !errors.As(err, &qerr) {\n\t\tt.Errorf(\"Wrong error type: %v\", err)\n\t} else if qerr.status != BadQuery {\n\t\tt.Errorf(\"Wrong error status: %d\", qerr.status)\n\t}\n\n\t_, err = doh.Query([]byte{1})\n\tif err == nil {\n\t\tt.Error(\"One byte query should fail\")\n\t} else if !errors.As(err, &qerr) {\n\t\tt.Errorf(\"Wrong error type: %v\", err)\n\t} else if qerr.status != BadQuery {\n\t\tt.Errorf(\"Wrong error status: %d\", qerr.status)\n\t}\n}", "func parseDNSQuery(req []byte) (que *dnsQuestion, err error) {\n\thead := &dnsHeader{}\n\thead.getHeader(req)\n\tif !head.isAQuery() {\n\t\treturn nil, errors.New(\"not a dns query, ignore\")\n\t}\n\tque = &dnsQuestion{\n\t\tevent: eventNothing,\n\t}\n\t// Generally, when the recursive DNS server requests upward, it may\n\t// initiate a resolution request for multiple aliases/domain names\n\t// at once, Edge DNS does not need to process a message that carries\n\t// multiple questions at a time.\n\tif head.qdCount != 1 {\n\t\tque.event = eventUpstream\n\t\treturn\n\t}\n\n\toffset := uint16(unsafe.Sizeof(dnsHeader{}))\n\t// DNS NS <ROOT> operation\n\tif req[offset] == 0x0 {\n\t\tque.event = eventUpstream\n\t\treturn\n\t}\n\tque.getQuestion(req, offset, head)\n\terr = nil\n\treturn\n}", "func HTTPQuery(request *http.Request) *http.Response {\n\tclient := &http.Client{\n\t\tTransport: &http.Transport {\n\t\t\t// Support for 'HTTP[S]_PROXY'/'NO_PROXY' envvars\n\t\t\tProxy: http.ProxyFromEnvironment,\n\t\t\t// Support main CLI's 'core.ssl_verify' setting\n\t\t\tTLSClientConfig: buildTLSConfig(),\n\t\t},\n\t}\n\tvar err interface{}\n\tresponse, err := client.Do(request)\n\tswitch err.(type) {\n\tcase *url.Error:\n\t\t// extract wrapped error\n\t\terr = err.(*url.Error).Err\n\t}\n\tif err != nil {\n\t\tswitch err.(type) {\n\t\tcase x509.UnknownAuthorityError:\n\t\t\t// custom suggestions for a certificate error:\n\t\t\tPrintMessage(\"HTTP %s Query for %s failed: %s\", request.Method, request.URL, err)\n\t\t\tPrintMessage(\"- Is the cluster CA certificate configured correctly? Check 'dcos config show core.ssl_verify'.\")\n\t\t\tPrintMessageAndExit(\"- To ignore the unvalidated certificate and force your command (INSECURE), use --force-insecure\")\n\t\tdefault:\n\t\t\tPrintMessage(\"HTTP %s Query for %s failed: %s\", request.Method, request.URL, err)\n\t\t\tPrintMessage(\"- Is 'core.dcos_url' set correctly? Check 'dcos config show core.dcos_url'.\")\n\t\t\tPrintMessage(\"- Is 'core.dcos_acs_token' set correctly? Run 'dcos auth login' to log in.\")\n\t\t\tPrintMessageAndExit(\"- Are any needed proxy settings set correctly via HTTP_PROXY/HTTPS_PROXY/NO_PROXY? Check with your network administrator.\")\n\t\t}\n\t}\n\treturn response\n}", "func checkUrl(writer http.ResponseWriter, request *http.Request) {\n\tvars := mux.Vars(request)\n\thostname := vars[\"hostname\"]\n\tquerypath := vars[\"querypath\"]\n\n\tresponse := APIResponse{}\n\terr := utils.ValidateUrl(hostname)\n\tif err != nil {\n\t\tresponse.BadRequest(err)\n\t\thttp_respond(response, writer)\n\t\treturn\n\t}\n\n\tdecodedPath, err := utils.URLDecode(querypath)\n\tif err != nil {\n\t\tresponse.BadRequest(err)\n\t\thttp_respond(response, writer)\n\t\treturn\n\t}\n\n\t// Generate URL service for querying the URL\n\turlService, err := services.NewUrlService(hostname, decodedPath, config.DBType, config.CacheType)\n\n\tif err != nil {\n\t\tutils.LogError(utils.LogFields{\"hostname\": hostname, \"path\": decodedPath}, err, \"Error getting URL\")\n\t\tresponse.InternalError(errors.New(\"An error occurred\"))\n\t\thttp_respond(response, writer)\n\t\treturn\n\t}\n\n\turlStatus, err := urlService.FindUrl()\n\tif err != nil {\n\t\tutils.LogError(utils.LogFields{\"hostname\": hostname, \"path\": decodedPath}, err, \"Error getting URL\")\n\t\tresponse.InternalError(errors.New(\"An error occurred\"))\n\t} else {\n\t\tresponse.Success(urlStatus)\n\t}\n\n\thttp_respond(response, writer)\n}", "func checkQueryStringHandler(w ResponseWriter, r *Request) {\n\tu := *r.URL\n\tu.Scheme = HTTP\n\tu.Host = r.Host\n\tu.RawQuery = \"\"\n\tif HttpUrlPrefix+r.URL.RawQuery == u.String() {\n\t\tw.WriteHeader(200)\n\t} else {\n\t\tw.WriteHeader(500)\n\t}\n}", "func CheckHTTP(url string, redirect, insecure bool, host string, timeout int, format, path, expectedValue, expression string) (string, int) {\n\tconst checkName = \"CheckHttp\"\n\tvar retCode int\n\tvar msg string\n\n\tacceptText, err := getAcceptText(format)\n\tif err != nil {\n\t\tmsg, _ = resultMessage(checkName, statusTextCritical, fmt.Sprintf(\"The format (--format) \\\"%s\\\" is not valid. The only valid value is \\\"json\\\".\", format))\n\n\t\treturn msg, 2\n\t}\n\n\tstatus, body, _ := statusCode(url, insecure, timeout, acceptText, host)\n\n\tretCode, responseStateText := evaluateStatusCode(status, redirect)\n\tresponseCode := strconv.Itoa(status)\n\n\tvar checkMsg = \"\"\n\tif retCode == 0 && len(format) > 0 && len(path) > 0 {\n\t\tvar queryValue string\n\n\t\tswitch {\n\t\tcase format == \"json\":\n\t\t\texpectedValueLen := len(expectedValue)\n\t\t\texpressionLen := len(expression)\n\n\t\t\tvalue := gojsonq.New().JSONString(body).Find(path)\n\n\t\t\tif value == nil {\n\t\t\t\tretCode = 2\n\t\t\t\tresponseStateText = statusTextCritical\n\t\t\t\tcheckMsg = fmt.Sprintf(\". No entry at path %s\", path)\n\t\t\t} else if expectedValueLen > 0 && expressionLen > 0 {\n\t\t\t\tretCode = 2\n\t\t\t\tresponseStateText = statusTextCritical\n\t\t\t\tcheckMsg = fmt.Sprintf(\". Both --expectedValue and --expression given but only one is used\")\n\t\t\t} else if expectedValueLen > 0 {\n\t\t\t\tqueryValue = fmt.Sprintf(\"%v\", value)\n\t\t\t\tretCode, responseStateText, checkMsg = evaluateExpectedValue(queryValue, expectedValue, path)\n\t\t\t} else if expressionLen > 0 {\n\t\t\t\tretCode, responseStateText, checkMsg = evaluateExpression(value, expression, path)\n\t\t\t} else {\n\t\t\t\tretCode = 2\n\t\t\t\tresponseStateText = statusTextCritical\n\t\t\t\tcheckMsg = fmt.Sprintf(\". --expectedValue or --expression not given\")\n\t\t\t}\n\t\t}\n\t}\n\n\tmsg, _ = resultMessage(checkName, responseStateText, fmt.Sprintf(\"Url %s responded with %s%s\", url, responseCode, checkMsg))\n\n\treturn msg, retCode\n}", "func queryDNS(o []byte, t, id, domain string, exfilLen int) []string {\n\tvar as []string\n\tfor _, q := range createQueries(o, t, id, domain, exfilLen) {\n\t\ta, err := net.LookupTXT(q)\n\t\tif nil != err && !strings.HasSuffix(\n\t\t\terr.Error(),\n\t\t\t\"no such host\",\n\t\t) && !strings.HasSuffix(\n\t\t\terr.Error(),\n\t\t\t\"No records found for given DNS query.\",\n\t\t) {\n\t\t\tlog.Printf(\"Query error (%v): %v\", q, err)\n\t\t}\n\t\tif nil != a {\n\t\t\tas = append(as, a...)\n\t\t}\n\t}\n\treturn as\n}", "func (h *dnsHeader) isAQuery() bool {\n\treturn h.flags&dnsQR != dnsQR\n}", "func TestDnsMessageUncompressedQueryConfidenceCheck(t *testing.T) {\n\tm := mustUnpack(uncompressedQueryBytes)\n\tpackedBytes := mustPack(m)\n\tif len(packedBytes) >= len(uncompressedQueryBytes) {\n\t\tt.Errorf(\"Compressed query is not smaller than uncompressed query\")\n\t}\n}", "func TestWhoisQuery(t *testing.T) {\n\t// Retry WhoisQuery up to 3 times for network timeout errors.\n\tfor i := 0; i < 3; i++ {\n\t\tres, err := WhoisQuery(\"koding.com\", \"whois.arin.net\", 5*time.Second)\n\t\tif e, ok := err.(net.Error); ok && e.Timeout() {\n\t\t\tcontinue\n\t\t}\n\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tif res == \"\" {\n\t\t\tt.Fatal(\"Whois response empty.\")\n\t\t}\n\n\t\t// Use a the street name to validate the response\n\t\tif regexp.MustCompile(`(?i)brannan`).MatchString(res) != true {\n\t\t\tt.Fatal(\"Response does not match as expected.\" +\n\t\t\t\t`Wanted the regexp \"brannan\" to match`)\n\t\t}\n\n\t\treturn\n\t}\n\n\tt.Fatal(\"exceeded max retry attempts for WhoisQuery\")\n}", "func (d *Dnsfilter) CheckHost(host string) (Result, error) {\n\t// sometimes DNS clients will try to resolve \".\", which is a request to get root servers\n\tif host == \"\" {\n\t\treturn Result{Reason: NotFilteredNotFound}, nil\n\t}\n\thost = strings.ToLower(host)\n\t// prevent recursion\n\tif host == d.parentalServer || host == d.safeBrowsingServer {\n\t\treturn Result{}, nil\n\t}\n\n\t// try filter lists first\n\tresult, err := d.matchHost(host)\n\tif err != nil {\n\t\treturn result, err\n\t}\n\tif result.Reason.Matched() {\n\t\treturn result, nil\n\t}\n\n\t// check safeSearch if no match\n\tif d.SafeSearchEnabled {\n\t\tresult, err = d.checkSafeSearch(host)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Failed to safesearch HTTP lookup, ignoring check: %v\", err)\n\t\t\treturn Result{}, nil\n\t\t}\n\n\t\tif result.Reason.Matched() {\n\t\t\treturn result, nil\n\t\t}\n\t}\n\n\t// check safebrowsing if no match\n\tif d.SafeBrowsingEnabled {\n\t\tresult, err = d.checkSafeBrowsing(host)\n\t\tif err != nil {\n\t\t\t// failed to do HTTP lookup -- treat it as if we got empty response, but don't save cache\n\t\t\tlog.Printf(\"Failed to do safebrowsing HTTP lookup, ignoring check: %v\", err)\n\t\t\treturn Result{}, nil\n\t\t}\n\t\tif result.Reason.Matched() {\n\t\t\treturn result, nil\n\t\t}\n\t}\n\n\t// check parental if no match\n\tif d.ParentalEnabled {\n\t\tresult, err = d.checkParental(host)\n\t\tif err != nil {\n\t\t\t// failed to do HTTP lookup -- treat it as if we got empty response, but don't save cache\n\t\t\tlog.Printf(\"Failed to do parental HTTP lookup, ignoring check: %v\", err)\n\t\t\treturn Result{}, nil\n\t\t}\n\t\tif result.Reason.Matched() {\n\t\t\treturn result, nil\n\t\t}\n\t}\n\n\t// nothing matched, return nothing\n\treturn Result{}, nil\n}", "func TestQueryIntegration(t *testing.T) {\n\tqueryData := []byte{\n\t\t111, 222, // [0-1] query ID\n\t\t1, 0, // [2-3] flags, RD=1\n\t\t0, 1, // [4-5] QDCOUNT (number of queries) = 1\n\t\t0, 0, // [6-7] ANCOUNT (number of answers) = 0\n\t\t0, 0, // [8-9] NSCOUNT (number of authoritative answers) = 0\n\t\t0, 0, // [10-11] ARCOUNT (number of additional records) = 0\n\t\t// Start of first query\n\t\t7, 'y', 'o', 'u', 't', 'u', 'b', 'e',\n\t\t3, 'c', 'o', 'm',\n\t\t0, // null terminator of FQDN (DNS root)\n\t\t0, 1, // QTYPE = A\n\t\t0, 1, // QCLASS = IN (Internet)\n\t}\n\n\ttestQuery := func(queryData []byte) {\n\n\t\tdoh, err := NewTransport(testURL, ips, nil, nil, nil)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tresp, err2 := doh.Query(queryData)\n\t\tif err2 != nil {\n\t\t\tt.Fatal(err2)\n\t\t}\n\t\tif resp[0] != queryData[0] || resp[1] != queryData[1] {\n\t\t\tt.Error(\"Query ID mismatch\")\n\t\t}\n\t\tif len(resp) <= len(queryData) {\n\t\t\tt.Error(\"Response is short\")\n\t\t}\n\t}\n\n\ttestQuery(queryData)\n\n\tpaddedQueryBytes, err := AddEdnsPadding(simpleQueryBytes)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ttestQuery(paddedQueryBytes)\n}", "func checkDSNParams(dsn string) error {\n\tdsnURL, err := url.Parse(dsn)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tparams, err := url.ParseQuery(dsnURL.RawQuery)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif params.Get(\"parseTime\") != \"true\" {\n\t\treturn ErrDSNParam\n\t}\n\n\treturn nil\n}", "func domainFrontQuery(c *http.Client, q string) ([]string, error) {\n\t/* Request the domain */\n\tres, err := c.Get(DOHPREFIX + q)\n\tif nil != err {\n\t\treturn nil, err\n\t}\n\n\t/* Make sure we made an ok request */\n\tif http.StatusOK != res.StatusCode {\n\t\treturn nil, fmt.Errorf(\"received %s\", res.Status)\n\t}\n\n\t/* Get HTTP response body */\n\tvar b bytes.Buffer\n\tn, err := b.ReadFrom(res.Body)\n\tdefer res.Body.Close()\n\tif nil != err {\n\t\treturn nil, err\n\t}\n\tif 0 == n {\n\t\treturn nil, errors.New(\"empty body\")\n\t}\n\n\t/* Unroll answer */\n\tvar a googleResponse\n\tif err := json.Unmarshal(b.Bytes(), &a); nil != err {\n\t\treturn nil, err\n\t}\n\n\t/* Make sure it worked */\n\tif 0 != a.Status {\n\t\treturn nil, fmt.Errorf(\"unsuccessful, status %v\", a.Status)\n\t}\n\n\t/* Make sure we didn't get truncated */\n\tif a.TC {\n\t\treturn nil, errors.New(\"truncated answer\")\n\t}\n\n\tvar ss []string\n\tfor _, s := range a.Answers {\n\t\td, err := strconv.Unquote(s.Data)\n\t\tif nil != err {\n\t\t\tlog.Printf(\"Unable to unmarshal %q: %v\", s.Data, err)\n\t\t\tcontinue\n\t\t}\n\t\tss = append(ss, d)\n\t}\n\treturn ss, nil\n}", "func handleDNSRequest(w dns.ResponseWriter, r *dns.Msg) {\n\tdefer w.Close()\n\n\tm := new(dns.Msg)\n\tm.SetReply(r)\n\tm.Compress = false\n\n\tswitch r.Opcode {\n\tcase dns.OpcodeQuery:\n\t\tparseQuery(m)\n\t}\n\n\tw.WriteMsg(m)\n}", "func CheckHost(ip net.IP, domain string) (Result, error) {\n\tr := &resolution{ip, 0, nil}\n\treturn r.Check(domain)\n}", "func validHTTP(req *http.Request, resp *http.Response) error {\r\n\tswitch {\r\n\tcase resp.StatusCode >= 400:\r\n\t\treturn errNotFound\r\n\tcase resp.StatusCode >= 300 && resp.StatusCode != http.StatusNotModified:\r\n\t\tl := resp.Header.Get(\"Location\")\r\n\t\tu, err := url.Parse(l)\r\n\t\tif err != nil {\r\n\t\t\treturn err\r\n\t\t}\r\n\t\tif u.Host == \"alert.scansafe.net\" {\r\n\t\t\treturn errFiltered\r\n\t\t}\r\n\t}\r\n\treturn nil\r\n}", "func dnsQuery(domain, name, server string) ([]recon.DNSAnswer, error) {\n\tvar resolved bool\n\n\tanswers, name := recursiveCNAME(name, server)\n\t// Obtain the DNS answers for the A records related to the name\n\tans, err := recon.ResolveDNS(name, server, \"A\")\n\tif err == nil {\n\t\tanswers = append(answers, ans...)\n\t\tresolved = true\n\t}\n\t// Obtain the DNS answers for the AAAA records related to the name\n\tans, err = recon.ResolveDNS(name, server, \"AAAA\")\n\tif err == nil {\n\t\tanswers = append(answers, ans...)\n\t\tresolved = true\n\t}\n\n\tif !resolved {\n\t\treturn []recon.DNSAnswer{}, errors.New(\"No A or AAAA records resolved for the name\")\n\t}\n\treturn answers, nil\n}", "func ValidateQuery(query string) (bool, error) {\n\n\t// simple sql pattern\n\tpattern := \"(select|SELECT) ([a-zA-Z]+(,[a-zA-Z]+)*) (from|FROM) [a-zA-Z]+(\\\\.[a-zA-Z]+)* ((limit|LIMIT) [0-9]+)? ((orderby|ORDERBY) (asc|desc|ASC|DESC))?;\"\n\tmatched, err := regexp.MatchString(pattern, query)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn matched, nil\n}", "func (f HandlerQueryFunc) QueryDNS(w RequestWriter, r *Msg) {\n\tgo f(w, r)\n}", "func TestDnsMessageCompressedQueryConfidenceCheck(t *testing.T) {\n\tm := mustUnpack(compressedQueryBytes)\n\tpackedBytes := mustPack(m)\n\tif len(packedBytes) != len(compressedQueryBytes) {\n\t\tt.Errorf(\"Packed query has different size than original:\\n %v\\n %v\", packedBytes, compressedQueryBytes)\n\t}\n}", "func domainCheck(host string) bool {\n\tcheck, err := regexp.MatchString(\".[a-z]+$\", host)\n\tif err != nil {\n\t\treturn false\n\t}\n\n\treturn check\n}", "func (o *PluginDnsClient) Query(queries []utils.DnsQueryParams, socket transport.SocketApi) error {\n\tif o.IsNameServer() {\n\t\treturn fmt.Errorf(\"Querying is not permitted for Dns Name Servers!\")\n\t}\n\tquestions, err := utils.BuildQuestions(queries)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(questions) > 0 {\n\t\tdata := o.dnsPktBuilder.BuildQueryPkt(questions, o.Tctx.Simulation)\n\t\tif socket == nil {\n\t\t\treturn fmt.Errorf(\"Invalid Socket in Query!\")\n\t\t}\n\t\ttransportErr, _ := o.socket.Write(data)\n\t\tif transportErr != transport.SeOK {\n\t\t\to.stats.socketWriteError++\n\t\t\treturn transportErr.Error()\n\t\t}\n\t\to.stats.pktTxDnsQuery++ // successfully sent query\n\t\to.stats.txBytes += uint64(len(data)) // number of bytes sent\n\t}\n\treturn nil\n}", "func (e ENS) Query(domain string, name string, qtype uint16, do bool) ([]dns.RR, error) {\n\tlog.Debugf(\"request type %d for name %s in domain %v\", qtype, name, domain)\n\n\tresults := make([]dns.RR, 0)\n\n\t// Short-circuit empty ENS domain\n\tif domain == \".\" {\n\t\treturn results, nil\n\t}\n\n\tif strings.HasSuffix(domain, e.EthLinkRoot) {\n\t\tvar ethLinkResults []dns.RR\n\t\tvar err error\n\t\t// This is a link request, using a secondary domain (e.g. eth.link) to redirect to .eth domains.\n\t\t// Map to a .eth domain and provide relevant (munged) information\n\t\tswitch qtype {\n\t\tcase dns.TypeSOA:\n\t\t\tethLinkResults, err = e.handleEthLinkSOA(name, domain)\n\t\tcase dns.TypeTXT:\n\t\t\tethLinkResults, err = e.handleEthLinkTXT(name, domain)\n\t\tcase dns.TypeA:\n\t\t\tethLinkResults, err = e.handleEthLinkA(name, domain)\n\t\tcase dns.TypeAAAA:\n\t\t\tethLinkResults, err = e.handleEthLinkAAAA(name, domain)\n\t\tdefault:\n\t\t\t// Unknown request; ignore\n\t\t\tethLinkResults = make([]dns.RR, 0)\n\t\t}\n\t\tif err != nil {\n\t\t\tlog.Warnf(\"failed to handle an EthLink %v request for %v: %v\", qtype, name, err)\n\t\t} else {\n\t\t\tresults = append(results, ethLinkResults...)\n\t\t}\n\t\treturn results, err\n\t}\n\n\t// Fetch whatever data we have on-chain for this RRset\n\tresolver, err := ens.NewDNSResolver(e.Client, strings.TrimSuffix(domain, \".\"))\n\tif err != nil {\n\t\tif err.Error() != \"no contract code at given address\" {\n\t\t\tlog.Warnf(\"error obtaining DNS resolver for %v: %v\", domain, err)\n\t\t}\n\t\treturn results, err\n\t}\n\n\tdata, err := resolver.Record(name, qtype)\n\tif err != nil {\n\t\tlog.Warnf(\"error obtaining DNS record: %v\", err)\n\t\treturn results, err\n\t}\n\n\toffset := 0\n\tfor offset < len(data) {\n\t\tvar result dns.RR\n\t\tresult, offset, err = dns.UnpackRR(data, offset)\n\t\tif err == nil {\n\t\t\tresults = append(results, result)\n\t\t}\n\t}\n\n\treturn results, err\n}", "func validateHTTP(url string) error {\n\treq := []byte(`{\"res\":\"PASS\"}`)\n\terr := sendHTTP(url, req)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to get response: %v\", err)\n\t}\n\toutput, err := ioutil.ReadFile(outputFile)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"reading output file: %v\", err)\n\t}\n\tif string(output) != string(req) {\n\t\treturn fmt.Errorf(\"unexpected HTTP data: got %s, want %s\", output, req)\n\t}\n\treturn nil\n}", "func TestWsDialAddress(t *testing.T) {\n\ttables := []struct {\n\t\trawurl string\n\t\tresultHost string\n\t}{\n\t\t{\"https://dxchain.com/\", \"dxchain.com\"},\n\t\t{\"http://explorer.dxchain.com/\", \"explorer.dxchain.com\"},\n\t\t{\"wss://dxchain.com:8080/\", \"dxchain.com:8080\"},\n\t\t{\"ws://explorer.dxchain.com:1688/\", \"explorer.dxchain.com:1688\"},\n\t\t{\"wss://dxchain.com/\", \"dxchain.com:443\"},\n\t\t{\"ws://explorer.dxchain.com/\", \"explorer.dxchain.com:80\"},\n\t}\n\n\tfor _, table := range tables {\n\t\tformatUrl, err := url.Parse(table.rawurl)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Error while paring the rawurl: %s\", err)\n\t\t} else {\n\t\t\thost := wsDialAddress(formatUrl)\n\t\t\tif host != table.resultHost {\n\t\t\t\tt.Errorf(\"Input rawurl %s, got host %s, expecte host %s\",\n\t\t\t\t\ttable.rawurl, host, table.resultHost)\n\t\t\t}\n\t\t}\n\t}\n}", "func validateNeighborsQuery(value string) (string, error) {\n\tif len(value) < 3 {\n\t\t// Maybe make configurable,\n\t\t// A length of 3 would be sufficient for \"DFN\" and\n\t\t// other shorthands.\n\t\treturn \"\", ErrQueryTooShort\n\t}\n\treturn value, nil\n}", "func IsExistedUriQuery(queryParams []string) bool {\n for _, s := range queryParams {\n if 0 < len(s) {\n if strings.Contains(s,string(TargetPrefix)) || strings.Contains(s,string(TargetPort)) || strings.Contains(s,string(TargetProtocol)) ||\n strings.Contains(s,string(TargetFqdn)) || strings.Contains(s,string(TargetUri)) || strings.Contains(s,string(AliasName)) ||\n strings.Contains(s,string(SourcePrefix)) || strings.Contains(s,string(SourcePort)) || strings.Contains(s,string(SourceIcmpType)) ||\n strings.Contains(s,string(Content)) {\n return true\n }\n }\n }\n return false\n}", "func validateQueryParameter(field *surface_v1.Field) {\n\t_, isScalar := protoBufScalarTypes[field.NativeType]\n\tif !(field.Kind == surface_v1.FieldKind_SCALAR ||\n\t\t(field.Kind == surface_v1.FieldKind_ARRAY && isScalar) ||\n\t\t(field.Kind == surface_v1.FieldKind_REFERENCE)) {\n\t\tlog.Println(\"The query parameter with the Name \" + field.Name + \" is invalid. \" +\n\t\t\t\"Note that fields which are mapped to URL query parameters must have a primitive type or\" +\n\t\t\t\" a repeated primitive type or a non-repeated message type. \" +\n\t\t\t\"See: https://github.com/googleapis/googleapis/blob/master/google/api/http.proto#L118 for more information.\")\n\t}\n\n}", "func (suite *DnsQuerySuite) TestShouldHandleQueryForManagedZone() {\n\tseedDBwithRecords(suite.DB, defaultSeedRecords)\n\n\tmockMetricsService := NewMockMetricsService()\n\n\tgo serveDNS(suite.DB, defaultDnsConfig, &mockMetricsService)\n\n\t// Avoid connection refused because the DNS server is not ready\n\t// FIXME: I tried to set the Timeout + Dialtimeout for the client\n\t// but that seem's to have no effect\n\ttime.Sleep(100 * time.Millisecond)\n\n\tclient := new(dns.Client)\n\tm := new(dns.Msg)\n\n\tm.Question = append(m.Question, dns.Question{\"a.rock.\", dns.TypeA, dns.ClassINET})\n\tm.RecursionDesired = true\n\n\tr, _, err := client.Exchange(m, \"127.0.0.1:8053\")\n\n\tif err != nil {\n\t\tfmt.Printf(\"%s\", err.Error())\n\t\tsuite.Fail(\"error on exchange\")\n\t}\n\n\tif r.Rcode != dns.RcodeSuccess {\n\t\tsuite.Fail(\" *** invalid answer name\")\n\t}\n\n\tsuite.Equal(len(defaultSeedRecords[0]), len(r.Answer))\n\n\tif answer, ok := r.Answer[0].(*dns.A); ok {\n\t\tsuite.True(answer.A.Equal(net.ParseIP(defaultSeedRecords[0][0].Content)))\n\t} else {\n\t\tsuite.Fail(\"Invalid dns answer type: requested a type A\")\n\t}\n}", "func isAnswerToQuery(sec section.WithSig, q section.Section) bool {\n\tswitch sec := sec.(type) {\n\tcase *section.Assertion:\n\t\tif q, ok := q.(*query.Name); ok {\n\t\t\tif q.Name == fmt.Sprintf(\"%s.%s\", sec.SubjectName, sec.SubjectZone) {\n\t\t\t\tfor _, oType := range q.Types {\n\t\t\t\t\tif _, ok := object.ContainsType(sec.Content, oType); ok {\n\t\t\t\t\t\treturn true\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn false\n\tcase *section.Shard:\n\t\tif q, ok := q.(*query.Name); ok {\n\t\t\tif name, ok := getSubjectName(q.Name, sec.SubjectZone); ok {\n\t\t\t\treturn sec.InRange(name)\n\t\t\t}\n\t\t}\n\t\treturn false\n\tcase *section.Zone:\n\t\tif q, ok := q.(*query.Name); ok {\n\t\t\tif _, ok := getSubjectName(q.Name, sec.SubjectZone); ok {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\treturn false\n\tcase *section.AddrAssertion:\n\t\t//TODO CFE implement the host address and network address case if delegation is a response\n\t\t//or not.\n\t\t_, ok := q.(*query.Address)\n\t\treturn ok\n\tdefault:\n\t\tlog.Error(\"Not supported message section with sig. This case must be prevented beforehand\")\n\t}\n\treturn true\n}", "func Query(host, domain string) (string, error) {\n\tvar (\n\t\td net.Dialer\n\t\tout string\n\t\terr error\n\t)\n\n\tctx, cancel := context.WithTimeout(context.Background(),\n\t\tTotalTimeout*time.Second)\n\tdefer cancel()\n\n\thostport := net.JoinHostPort(host, PortNumber)\n\tconn, err := d.DialContext(ctx, \"tcp\", hostport)\n\tif err != nil {\n\t\treturn out, err\n\t}\n\tdefer conn.Close()\n\n\terr = conn.SetWriteDeadline(time.Now().Add(WriteTimeout *\n\t\ttime.Second))\n\tif err != nil {\n\t\treturn out, err\n\t}\n\n\tif _, err := conn.Write([]byte(domain + \"\\r\\n\")); err != nil {\n\t\treturn out, err\n\t}\n\n\terr = conn.SetReadDeadline(time.Now().Add(ReadTimeout *\n\t\ttime.Second))\n\tif err != nil {\n\t\treturn out, err\n\t}\n\n\toutput, err := ioutil.ReadAll(conn)\n\tif err != nil {\n\t\treturn out, err\n\t}\n\n\tout = string(output)\n\treturn out, nil\n}", "func validChallengeAddr(a string) bool {\n\t// TODO: flesh this out. parse a, make configurable, support\n\t// IPv6. Good enough for now.\n\treturn strings.HasPrefix(a, \"10.\") || strings.HasPrefix(a, \"192.168.\")\n}", "func query(domain, server string) (string, error) {\n\tif server == \"whois.arin.net\" {\n\t\tdomain = \"n + \" + domain\n\t}\n\n\tconn, err := net.DialTimeout(\"tcp\", net.JoinHostPort(server, DEFAULT_WHOIS_PORT), time.Second*30)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"whois: connect to whois server failed: %v\", err)\n\t}\n\n\tdefer conn.Close()\n\t_ = conn.SetWriteDeadline(time.Now().Add(time.Second * 30))\n\t_, err = conn.Write([]byte(domain + \"\\r\\n\"))\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"whois: send to whois server failed: %v\", err)\n\t}\n\n\t_ = conn.SetReadDeadline(time.Now().Add(time.Second * 30))\n\tbuffer, err := ioutil.ReadAll(conn)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"whois: read from whois server failed: %v\", err)\n\t}\n\n\treturn string(buffer), nil\n}", "func TestQueryFunc(t *testing.T) {\n\tts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif r.Header == nil {\n\t\t\tt.Errorf(\"Expected non-nil request Header\")\n\t\t}\n\t\tv := r.URL.Query()\n\t\tcheckQuery(t, v, \"query1\", \"test1\")\n\t\tcheckQuery(t, v, \"query2\", \"test2\")\n\t}))\n\tdefer ts.Close()\n\n\tNew().Post(ts.URL).\n\t\tQuery(\"query1=test1\").\n\t\tQuery(\"query2=test2\").\n\t\tEnd()\n\n\tqq := struct {\n\t\tQuery1 string `json:\"query1\"`\n\t\tQuery2 string `json:\"query2\"`\n\t}{\n\t\tQuery1: \"test1\",\n\t\tQuery2: \"test2\",\n\t}\n\tNew().Post(ts.URL).\n\t\tQuery(qq).\n\t\tEnd()\n}", "func ValidateResourceRecordUpdatesUsingCloudflareDNS(reqLogger logr.Logger, name string, value string) (bool, error) {\n\trequestUrl := cloudflareDNSOverHttpsEndpoint + \"?name=\" + name + \"&type=TXT\"\n\n\treqLogger.Info(fmt.Sprintf(\"cloudflare dns-over-https Request URL: %v\", requestUrl))\n\n\tvar request, err = http.NewRequest(\"GET\", requestUrl, nil)\n\tif err != nil {\n\t\treqLogger.Error(err, \"error occurred creating new cloudflare dns-over-https request\")\n\t\treturn false, err\n\t}\n\n\trequest.Header.Set(\"accept\", cloudflareRequestContentType)\n\n\tnetClient := &http.Client{\n\t\tTimeout: time.Second * cloudflareRequestTimeout,\n\t}\n\n\tresponse, err := netClient.Do(request)\n\tif err != nil {\n\t\treqLogger.Error(err, \"error occurred executing request\")\n\t\treturn false, err\n\t}\n\tdefer response.Body.Close()\n\n\tresponseBody, err := ioutil.ReadAll(response.Body)\n\tif err != nil {\n\t\treqLogger.Error(err, \"\")\n\t\treturn false, err\n\t}\n\n\treqLogger.Info(\"response from Cloudflare: \" + string(responseBody))\n\n\tvar cloudflareResponse CloudflareResponse\n\n\terr = json.Unmarshal(responseBody, &cloudflareResponse)\n\tif err != nil {\n\t\treqLogger.Error(err, \"there was problem parsing the json response from cloudflare.\")\n\t\treturn false, err\n\t}\n\n\t// If there is no answer field, this is likely an expected NXDOMAIN response;\n\t// retry\n\tif len(cloudflareResponse.Answers) == 0 {\n\t\treqLogger.Info(\"no answers received from cloudflare; likely not propagated\")\n\t\treturn false, nil\n\t}\n\n\t// Name never has a trailing dot but he answer from Cloudflare sometimes does.\n\t// If the answer has a trailing dot we add one to the name we compare it to.\n\tif strings.HasSuffix(cloudflareResponse.Answers[0].Name, \".\") {\n\t\tname = name + \".\"\n\t}\n\n\tif len(cloudflareResponse.Answers) > 0 &&\n\t\tstrings.EqualFold(cloudflareResponse.Answers[0].Name, name) {\n\t\tcfData := cloudflareResponse.Answers[0].Data\n\t\t// trim quotes from value\n\t\tif len(cfData) >= 2 {\n\t\t\tif cfData[0] == '\"' && cfData[len(cfData)-1] == '\"' {\n\t\t\t\tcfData = cfData[1 : len(cfData)-1]\n\t\t\t}\n\t\t}\n\t\treturn cfData == value, nil\n\t}\n\n\treturn false, errors.New(\"could not validate DNS propogation for \" + name)\n}", "func CheckQueryPattern(b []byte) bool {\n\n\ttheQuery := string(b)\n\ttheQuery = strings.ToLower(theQuery)\n\ttheQuery = strings.TrimSpace(theQuery)\n\n\t// проверка на первый key_word\n\tif !strings.HasPrefix(theQuery, \"select\") {\n\t\treturn false\n\t}\n\n\tfor _, patt := range QueryPatterns {\n\t\tmatched, _ := regexp.Match(patt, []byte(theQuery))\n\t\tif matched {\n\t\t\treturn true // также надо запомнить, какой паттерн подошел\n\t\t}\n\t}\n\treturn false\n}", "func IsDNS(str string) bool {\n\tif str == \"\" || len(strings.Replace(str, \".\", \"\", -1)) > 255 {\n\t\t// constraints already violated\n\t\treturn false\n\t}\n\treturn !IsIP(str) && rxDNSName.MatchString(str)\n}", "func assertQuery(t *testing.T, expected map[string]string, req *http.Request) {\n\tqueryValues := req.URL.Query() // net/url Values is a map[string][]string\n\texpectedValues := url.Values{}\n\tfor key, value := range expected {\n\t\texpectedValues.Add(key, value)\n\t}\n\tif !reflect.DeepEqual(expectedValues, queryValues) {\n\t\tt.Errorf(\"expected parameters %v, got %v\", expected, req.URL.RawQuery)\n\t}\n}", "func analyzeDns(w io.Writer, server, hostname string, samples, waitMillis int) {\n\tm := new(dns.Msg)\n\tm.Id = dns.Id()\n\tm.RecursionDesired = true\n\tm.Question = make([]dns.Question, 1)\n\tm.Question[0] = dns.Question{Name: dns.Fqdn(hostname), Qtype: dns.TypeA, Qclass: dns.ClassINET}\n\twait := time.Duration(waitMillis) * time.Millisecond\n\n\tc := new(dns.Client)\n\n\tfmt.Printf(\"QUERY %v (@%v): %v data bytes\\n\", hostname, server, m.Len())\n\n\trtts := make(DurationSlice, samples, samples)\n\tfor i := 0; i < samples; i++ {\n\t\tin, rtt, err := c.Exchange(m, server+\":53\")\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t\tcontinue\n\t\t}\n\t\trtts[i] = rtt\n\t\tfmt.Fprintf(w, \"%v bytes from %v: ttl=%v time=%v\\n\", in.Len(), server, time.Second*6, rtt)\n\t\ttime.Sleep(wait)\n\t}\n\n\t// NOTE: Potentially Eating Performance for Pretties\n\tvar min, max, avg, stddev time.Duration\n\tmin = rtts.Min()\n\tmax = rtts.Max()\n\tavg = rtts.Avg()\n\tstddev = rtts.Std()\n\n\tfmt.Fprintf(w, \"round-trip min/avg/max/stddev = %v/%v/%v/%v\\n\", min, avg, max, stddev)\n}", "func parseQuery(m *dns.Msg) {\n\tfor _, q := range m.Question {\n\t\tswitch q.Qtype {\n\t\tcase dns.TypeTXT:\n\t\t\t// Debug log\n\t\t\tif mConfig.Debug {\n\t\t\t\tlog.Printf(\"TXT Query for %s\\n\", q.Name)\n\t\t\t}\n\n\t\t\t// Get IP\n\t\t\treplacer := strings.NewReplacer(\n\t\t\t\t\".\"+mConfig.Suffix+\".\", \"\",\n\t\t\t\t\"x\", \":\",\n\t\t\t\t\"z\", \".\")\n\t\t\tip := replacer.Replace(q.Name)\n\n\t\t\t// Send response\n\t\t\tfor _, response := range g.GeoHandle(ip) {\n\t\t\t\tr := new(dns.TXT)\n\t\t\t\tr.Hdr = dns.RR_Header{\n\t\t\t\t\tName: q.Name,\n\t\t\t\t\tRrtype: dns.TypeTXT,\n\t\t\t\t\tClass: dns.ClassINET,\n\t\t\t\t\tTtl: 1,\n\t\t\t\t}\n\t\t\t\tr.Txt = []string{response}\n\t\t\t\tm.Answer = append(m.Answer, r)\n\t\t\t}\n\t\t}\n\t}\n}", "func isValidHost(targetUrl string) bool {\n\tif !stringsutil.HasPrefixAny(targetUrl, \"http:\", \"https:\") {\n\t\treturn true\n\t}\n\tif networkPolicy == nil {\n\t\treturn true\n\t}\n\turlx, err := urlutil.Parse(targetUrl)\n\tif err != nil {\n\t\t// not a valid url\n\t\treturn false\n\t}\n\ttargetUrl = urlx.Hostname()\n\t_, ok := networkPolicy.ValidateHost(targetUrl)\n\treturn ok\n}", "func Validate(url string) (string, bool) {\n\tmatch := true\n\tresp, err := webtool.HttpResponse(url)\n\tif err != nil || resp.StatusCode >= 400 {\n\t\tmatch = false\n\t}\n\tif !match {\n\t\treturn \"domain does not exist\", match\n\t}\n\tdefer resp.Body.Close()\n\treturn \"\", match\n}", "func Check(collection *collection.Collection, dbg, suggest bool) Results {\n\n\tresults := Results{}\n\n\t// Start tests\n\n\t/* Check to make sure that the public DNS server NS records match\n\t Check to make sure the one of the public and the private NS record servers match\n\t Check to make sure there are at least 1 NS server\n\t*/\n\n\tcollection.PublicMatchNS = reflect.DeepEqual(collection.DNS1NS, collection.DNS2NS)\n\tcollection.LocalMatchNS = reflect.DeepEqual(collection.DNS1NS, collection.LocalNS)\n\tif collection.PublicMatchNS && collection.LocalMatchNS && len(collection.LocalNS) > 0 {\n\t\tresults.ResultNS = true\n\t} else {\n\t\tresults.ResultNS = false\n\t}\n\n\t/* Check to make sure the public DNS server Glue records match\n\t Check to make sure the one of the public and the private Glue record servers match\n\t Check to make sure there the Glue record length matches the ns record length\n\t*/\n\n\tcollection.PublicMatchGlue = reflect.DeepEqual(collection.DNS1Glue, collection.DNS2Glue)\n\tcollection.LocalMatchGlue = reflect.DeepEqual(collection.DNS1Glue, collection.LocalGlue)\n\n\tif collection.PublicMatchGlue && collection.LocalMatchGlue && (len(collection.LocalNS) == len(collection.LocalGlue)) && len(collection.LocalNS) > 0 {\n\t\tresults.ResultGlue = true\n\t} else {\n\t\tresults.ResultGlue = false\n\t}\n\n\t/* Check to make sure that we can access all of the name servers and the numbers match */\n\n\tresults.ResultAccess = true\n\tfor _, a := range collection.EndpointStatus {\n\t\tif a && results.ResultAccess {\n\t\t} else {\n\t\t\tresults.ResultAccess = false\n\t\t}\n\t}\n\tif len(collection.EndpointStatus) != len(collection.LocalNS) || len(collection.EndpointStatus) < 1 {\n\t\tresults.ResultAccess = false\n\t}\n\n\t/* Check to make sure both public DNS server results match\n\t Check that the LocalDNS and one of the remotes match\n\t Check that there is more than 1 A record\n\t*/\n\n\tcollection.PublicMatchA = reflect.DeepEqual(collection.DNS1A, collection.DNS2A)\n\tcollection.LocalMatchA = reflect.DeepEqual(collection.DNS1A, collection.LocalA)\n\n\tif collection.PublicMatchA && collection.LocalMatchA && len(collection.LocalA) > 0 && (len(collection.LocalA) == len(collection.DNS1A)) {\n\t\tresults.ResultA = true\n\t} else {\n\t\tresults.ResultA = false\n\t}\n\n\t// check to make sure the SOA records match the domain name we expect\n\tresults.ResultSOAMatch = collection.SOAMatch\n\n\t// Show test results if suggest or debug\n\tif dbg || suggest {\n\t\tfmt.Printf(\"--------------------------------\\n\")\n\t\tdebugPrint(\"NS Record Test\", results.ResultNS)\n\t\tdebugPrint(\"Glue Record Test\", results.ResultGlue)\n\t\tdebugPrint(\"NS Access Test\", results.ResultAccess)\n\t\tdebugPrint(\"SOA Match Test\", results.ResultSOAMatch)\n\t\tdebugPrint(\"A Record Test\", results.ResultA)\n\t\tfmt.Printf(\"--------------------------------\\n\")\n\t}\n\n\t// only print datastructure if debug is on\n\tif dbg {\n\t\tcolor.Cyan.Printf(\"Results Debug:\\n%+v\\n\", results)\n\t}\n\n\treturn (results)\n}", "func (s *Server) handleQuery(service *ServiceEntry, request *Request) error {\n\t// Ignore answer for now\n\tif len(request.query.Answer) > 0 {\n\t\treturn nil\n\t}\n\t// Ignore questions with Authorative section for now\n\tif len(request.query.Ns) > 0 {\n\t\treturn nil\n\t}\n\n\t// Handle each question\n\tvar (\n\t\tresp dns.Msg\n\t\terr error\n\t)\n\tif len(request.query.Question) > 0 {\n\t\tfor _, q := range request.query.Question {\n\t\t\tresp = dns.Msg{}\n\t\t\tresp.SetReply(&request.query)\n\t\t\tresp.Answer = []dns.RR{}\n\t\t\tresp.Extra = []dns.RR{}\n\t\t\tif err = s.handleQuestion(q, service, &resp); err != nil {\n\t\t\t\tlog.Printf(\"[ERR] bonjour: failed to handle question %v: %v\",\n\t\t\t\t\tq, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// Check if there is an answer\n\t\t\tif len(resp.Answer) > 0 {\n\t\t\t\tif isUnicastQuestion(q) {\n\t\t\t\t\t// Send unicast\n\t\t\t\t\tif e := s.unicastResponse(&resp, request.from); e != nil {\n\t\t\t\t\t\terr = e\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\t// Send mulicast\n\t\t\t\t\tif e := s.multicastResponse(&resp); e != nil {\n\t\t\t\t\t\terr = e\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn err\n}", "func (o *PluginDnsClient) isValidAnswer(entry DnsEntry, qType layers.DNSType, qClass layers.DNSClass) bool {\n\t// Dns Types must equal. Dns Class should equal in case it isn't Any.\n\treturn (entry.DnsType == qType.String()) && (entry.DnsClass == qClass.String() || qClass == layers.DNSClassAny)\n}", "func (o *DnsEventAllOf) GetQueryOk() (*string, bool) {\n\tif o == nil || o.Query == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Query, true\n}", "func DstHostIs(host string) ReqConditionFunc {\n\treturn func(req *http.Request, ctx *ProxyCtx) bool {\n\t\treturn req.URL.Host == host\n\t}\n}", "func ValidateQueryRequest(request *QueryVisibilityRequest) error {\n\tif request.NamespaceID == \"\" {\n\t\treturn errEmptyNamespaceID\n\t}\n\tif request.PageSize == 0 {\n\t\treturn errInvalidPageSize\n\t}\n\treturn nil\n}", "func validURL(s string) bool {\n\tif s == \"\" {\n\t\treturn false\n\t}\n\tu, err := url.Parse(s)\n\tif err != nil {\n\t\treturn false\n\t}\n\tif u.Host == \"\" {\n\t\treturn false\n\t}\n\tswitch u.Scheme {\n\tcase \"http\", \"https\":\n\tdefault:\n\t\treturn false\n\t}\n\tfor _, r := range u.RawQuery {\n\t\t// https://tools.ietf.org/html/rfc3986#section-3.4 defines:\n\t\t//\n\t\t//\tquery = *( pchar / \"/\" / \"?\" )\n\t\t//\tpchar = unreserved / pct-encoded / sub-delims / \":\" / \"@\"\n\t\t//\tunreserved = ALPHA / DIGIT / \"-\" / \".\" / \"_\" / \"~\"\n\t\t//\tpct-encoded = \"%\" HEXDIG HEXDIG\n\t\t//\tsub-delims = \"!\" / \"$\" / \"&\" / \"'\" / \"(\" / \")\"\n\t\t//\t\t\t/ \"*\" / \"+\" / \",\" / \";\" / \"=\"\n\t\t//\n\t\t// check for these\n\t\tswitch {\n\t\tcase r >= '0' && r <= '9':\n\t\tcase r >= 'A' && r <= 'Z':\n\t\tcase r >= 'a' && r <= 'z':\n\t\tdefault:\n\t\t\tswitch r {\n\t\t\tcase '/', '?',\n\t\t\t\t':', '@',\n\t\t\t\t'-', '.', '_', '~',\n\t\t\t\t'%', '!', '$', '&', '\\'', '(', ')', '*', '+', ',', ';', '=':\n\t\t\tdefault:\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t}\n\treturn true\n}", "func check_args(parsed_query []string, num_expected int) bool {\n\treturn (len(parsed_query) >= num_expected)\n}", "func validatePrefixQuery(value string) (string, error) {\n\t// We should at least provide 2 chars\n\tif len(value) < 2 {\n\t\treturn \"\", ErrQueryTooShort\n\t}\n\tif !strings.Contains(value, \":\") && !strings.Contains(value, \".\") {\n\t\treturn \"\", ErrQueryIncomplete\n\t}\n\treturn value, nil\n}", "func QueryWhoisServer(domain, server string) (response string, err error) {\n\tconn, err := net.Dial(\"tcp\", server+\":43\")\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer conn.Close()\n\n\tfmt.Fprintf(conn, \"%s\\r\\n\", domain)\n\tif buf, err := ioutil.ReadAll(conn); err == nil {\n\t\tresponse = string(buf)\n\t}\n\n\treturn\n}", "func TestBuildSimpleQuery(t *testing.T) {\n\tq := []QueryElement{\n\t\tQueryElement{Key: \"key0\", Op: \"op0\", Val: \"val0\"},\n\t\tQueryElement{Key: \"key1\", Op: \"op1\", Val: \"val1\"},\n\t}\n\ts := BuildQuery(\"http://localhost/v1/test\", q)\n\tu, err := url.Parse(s)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tif u.Query().Get(\"k0\") != \"key0\" {\n\t\tt.Errorf(\"Issue with k0: %v\\n\", u.Query().Get(\"k0\"))\n\t}\n\tif u.Query().Get(\"o0\") != \"op0\" {\n\t\tt.Errorf(\"Issue with o0: %v\\n\", u.Query().Get(\"o0\"))\n\t}\n\tif u.Query().Get(\"v0\") != \"val0\" {\n\t\tt.Errorf(\"Issue with v0: %v\\n\", u.Query().Get(\"v0\"))\n\t}\n\tif u.Query().Get(\"k1\") != \"key1\" {\n\t\tt.Errorf(\"Issue with k1: %v\\n\", u.Query().Get(\"k1\"))\n\t}\n\tif u.Query().Get(\"o1\") != \"op1\" {\n\t\tt.Errorf(\"Issue with o1: %v\\n\", u.Query().Get(\"o1\"))\n\t}\n\tif u.Query().Get(\"v1\") != \"val1\" {\n\t\tt.Errorf(\"Issue with v1: %v\\n\", u.Query().Get(\"v1\"))\n\t}\n}", "func addressQuery(q *query.Address, sender connection.Info, oldToken token.Token) {\n\tlog.Warn(\"Address Queries not yet supported\")\n\t//FIXME CFE make it compatible with the new caches\n\t/*log.Debug(\"Start processing address query\", \"addressQuery\", q)\n\tassertion, ok := getAddressCache(q.SubjectAddr, q.Context).Get(q.SubjectAddr, q.Types)\n\t//TODO CFE add heuristic which assertion to return\n\tif ok {\n\t\tif assertion != nil {\n\t\t\tsendSection(assertion, oldToken, sender)\n\t\t\tlog.Debug(\"Finished handling query by sending address assertion from cache\", \"q\", q)\n\t\t\treturn\n\t\t}\n\t}\n\tlog.Debug(\"No entry found in address cache matching the query\")\n\n\tif q.ContainsOption(query.QOCachedAnswersOnly) {\n\t\tlog.Debug(\"Send a notification message back to the sender due to query option: 'Cached Answers only'\")\n\t\tsendNotificationMsg(oldToken, sender, section.NTNoAssertionAvail, \"\")\n\t\tlog.Debug(\"Finished handling query (unsuccessful) \", \"query\", q)\n\t\treturn\n\t}\n\n\tdelegate := getRootAddr()\n\tif delegate.Equal(serverConnInfo) {\n\t\tsendNotificationMsg(oldToken, sender, section.NTNoAssertionAvail, \"\")\n\t\tlog.Error(\"Stop processing query. I am authoritative and have no answer in cache\")\n\t\treturn\n\t}\n\t//we have a valid delegation\n\ttok := oldToken\n\tif !q.ContainsOption(query.QOTokenTracing) {\n\t\ttok = token.New()\n\t}\n\tnewQuery := *q\n\t//Upper bound for forwarded query expiration time\n\tif newQuery.Expiration > time.Now().Add(Config.AddressQueryValidity).Unix() {\n\t\tnewQuery.Expiration = time.Now().Add(Config.AddressQueryValidity).Unix()\n\t}\n\t//FIXME CFE allow multiple connection\n\t//FIXME CFE only send query if not already in cache.\n\tpendingQueries.Add(msgSectionSender{Section: q, Sender: sender, Token: oldToken})\n\tlog.Debug(\"Added query into to pending query cache\", \"query\", q)\n\tsendSection(&newQuery, tok, delegate)*/\n}", "func (domain Domain) IsHTTP() bool {\n\t// The default state of a domain is an HTTP domain; so if it is anything\n\t// other than TCP, it is HTTP.\n\treturn !domain.IsTCP()\n}", "func queryIPWhois(ipAddr string) (string, error) {\n\tresponse, err := queryWhois(ipAddr, \"whois.iana.org\")\n\n\tm, err := whois.ParseWhois(response)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\thost, found := m[WHOIS_KEY]\n\tif !found {\n\t\treturn \"\", errors.New(\"no whois server returned\")\n\t}\n\n\treturn queryWhois(ipAddr, host)\n}", "func (a AuthService) ValidateQueryPerms(q string) (string, error) {\n\tvar newQ CDBQuery\n\t// Unmarshal in to a CDBQuery\n\tif err := json.Unmarshal([]byte(q), &newQ); err != nil {\n\t\treturn \"\", errQueryMarshal(err)\n\t}\n\n\t// Pick out the doctype from the query\n\tresource := newQ.Selector[\"docType\"]\n\n\tif resource == nil || resource == \"\" {\n\t\treturn \"\", errQueryDocType()\n\t}\n\n\tfor _, role := range a.userRoles {\n\t\t// Lookup permissions\n\t\truleFunc, ok := a.rolePermissions[role].QueryPermissions[resource.(string)]\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Construct rules from the ruleFunc callback\n\t\trules := ruleFunc(a.userID, a.userRoles)\n\t\tif !rules.Allow {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Enforce any selector appends\n\t\tfor k, v := range rules.SelectorAppend {\n\t\t\tnewQ.Selector[k] = v\n\t\t}\n\n\t\t// Enforce any filter queries (no need to check for nil first)\n\t\tnewQ.Fields = rules.FieldFilter\n\n\t\t// Marshal back to json bytes so it can be sent back as a string\n\t\tnewQBytes, err := json.Marshal(newQ)\n\t\tif err != nil {\n\t\t\treturn \"\", errMarshal(err)\n\t\t}\n\n\t\treturn string(newQBytes), nil\n\t}\n\n\treturn \"\", errQuery(resource.(string))\n}", "func TestResponse(t *testing.T) {\n\tdoh, _ := NewTransport(testURL, ips, nil, nil, nil)\n\ttransport := doh.(*transport)\n\trt := makeTestRoundTripper()\n\ttransport.client.Transport = rt\n\n\t// Fake server.\n\tgo func() {\n\t\t<-rt.req\n\t\tr, w := io.Pipe()\n\t\trt.resp <- &http.Response{\n\t\t\tStatusCode: 200,\n\t\t\tBody: r,\n\t\t\tRequest: &http.Request{URL: parsedURL},\n\t\t}\n\t\t// The DOH response should have a zero query ID.\n\t\tvar modifiedQuery dnsmessage.Message = simpleQuery\n\t\tmodifiedQuery.Header.ID = 0\n\t\tw.Write(mustPack(&modifiedQuery))\n\t\tw.Close()\n\t}()\n\n\tresp, err := doh.Query(simpleQueryBytes)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\t// Parse the response as a DNS message.\n\trespParsed := mustUnpack(resp)\n\n\t// Query() should reconstitute the query ID in the response.\n\tif respParsed.Header.ID != simpleQuery.Header.ID ||\n\t\t!queriesMostlyEqual(*respParsed, simpleQuery) {\n\t\tt.Errorf(\"Unexpected response %v\", resp)\n\t}\n}", "func mssqlCheckParameter(dsn string, option string) bool {\n\traw, err := neturl.QueryUnescape(dsn)\n\tif err != nil {\n\t\traw = dsn\n\t}\n\treturn strings.Contains(strings.ToLower(raw), option)\n}", "func verifyDnsResourceRecordUpdate(reqLogger logr.Logger, fqdn string, txtValue string) bool {\n\treqLogger.Info(fmt.Sprintf(\"will query DNS in %v seconds\", waitTimePeriodDnsPropogationCheck))\n\n\ttime.Sleep(time.Duration(waitTimePeriodDnsPropogationCheck) * time.Second)\n\n\tdnsChangesPropogated, err := ValidateResourceRecordUpdatesUsingCloudflareDNS(reqLogger, fqdn, txtValue)\n\tif err != nil {\n\t\treqLogger.Error(err, \"could not validate DNS propagation.\")\n\t\treturn false\n\t}\n\n\treturn dnsChangesPropogated\n}", "func ValidateHTTP(proxy *url.URL) (bool, error) {\n\tp := &url.URL{\n\t\tScheme: \"http\",\n\t\tHost: proxy.Host,\n\t\tUser: proxy.User,\n\t\tPath: proxy.Path,\n\t}\n\n\tclient := &http.Client{\n\t\tTransport: &http.Transport{\n\t\t\tProxy: http.ProxyURL(p),\n\t\t},\n\t\tTimeout: validateTimeout,\n\t}\n\n\tresp, err := client.Get(validateUrl)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tif resp.StatusCode != 200 {\n\t\treturn false, errInvalidProxy\n\t}\n\n\treturn true, nil\n}", "func HTTPBuildQuery(queryData url.Values) string {\n\treturn queryData.Encode()\n}", "func ValidateHost(s string) (bool, error) {\n\thost := net.ParseIP(s)\n\tif host != nil {\n\t\treturn true, nil\n\t}\n\thostname, _ := net.LookupHost(s)\n\tif len(hostname) > 0 {\n\t\treturn true, nil\n\t}\n\treturn false, fmt.Errorf(\"'%s' does not seem to be a valid IP or Hostname\", s)\n}", "func TestLookupDNSPanicsOnInvalidType(t *testing.T) {\n\tdefer func() {\n\t\tif r := recover(); r == nil {\n\t\t\tt.Error(\"lookupDNS should panic if an invalid conntype is specified.\")\n\t\t}\n\t}()\n\tlookupDNS(context.Background(), nil, \"name\", \"wssorbashorsomething\")\n}", "func TestHTTPError(t *testing.T) {\n\tdoh, _ := NewTransport(testURL, ips, nil, nil, nil)\n\ttransport := doh.(*transport)\n\trt := makeTestRoundTripper()\n\ttransport.client.Transport = rt\n\n\tgo func() {\n\t\t<-rt.req\n\t\tr, w := io.Pipe()\n\t\trt.resp <- &http.Response{\n\t\t\tStatusCode: 500,\n\t\t\tBody: r,\n\t\t\tRequest: &http.Request{URL: parsedURL},\n\t\t}\n\t\tw.Write([]byte{0, 0, 8, 9, 10})\n\t\tw.Close()\n\t}()\n\n\t_, err := doh.Query(simpleQueryBytes)\n\tvar qerr *queryError\n\tif err == nil {\n\t\tt.Error(\"Empty body should cause an error\")\n\t} else if !errors.As(err, &qerr) {\n\t\tt.Errorf(\"Wrong error type: %v\", err)\n\t} else if qerr.status != HTTPError {\n\t\tt.Errorf(\"Wrong error status: %d\", qerr.status)\n\t}\n}", "func (r QueryRequest) Validate() error {\n\tif r.Query == \"\" && r.Spec == nil && r.AST == nil {\n\t\treturn errors.New(`request body requires either query, spec, or AST`)\n\t}\n\n\tif r.Type != \"flux\" {\n\t\treturn fmt.Errorf(`unknown query type: %s`, r.Type)\n\t}\n\n\tif len(r.Dialect.CommentPrefix) > 1 {\n\t\treturn fmt.Errorf(\"invalid dialect comment prefix: must be length 0 or 1\")\n\t}\n\n\tif len(r.Dialect.Delimiter) != 1 {\n\t\treturn fmt.Errorf(\"invalid dialect delimeter: must be length 1\")\n\t}\n\n\trune, size := utf8.DecodeRuneInString(r.Dialect.Delimiter)\n\tif rune == utf8.RuneError && size == 1 {\n\t\treturn fmt.Errorf(\"invalid dialect delimeter character\")\n\t}\n\n\tfor _, a := range r.Dialect.Annotations {\n\t\tswitch a {\n\t\tcase \"group\", \"datatype\", \"default\":\n\t\tdefault:\n\t\t\treturn fmt.Errorf(`unknown dialect annotation type: %s`, a)\n\t\t}\n\t}\n\n\tswitch r.Dialect.DateTimeFormat {\n\tcase \"RFC3339\", \"RFC3339Nano\":\n\tdefault:\n\t\treturn fmt.Errorf(`unknown dialect date time format: %s`, r.Dialect.DateTimeFormat)\n\t}\n\n\treturn nil\n}", "func isHttpConnError(err error) bool {\n\n\testr := err.Error()\n\treturn strings.Contains(estr, \"broken pipe\") ||\n\t\tstrings.Contains(estr, \"broken connection\") ||\n\t\tstrings.Contains(estr, \"connection reset\")\n}", "func DNSResolveCheck(host string, timeout time.Duration) Check {\n\tresolver := net.Resolver{}\n\treturn func() error {\n\t\tctx, cancel := context.WithTimeout(context.Background(), timeout)\n\t\tdefer cancel()\n\t\taddrs, err := resolver.LookupHost(ctx, host)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif len(addrs) < 1 {\n\t\t\treturn fmt.Errorf(\"could not resolve host\")\n\t\t}\n\t\treturn nil\n\t}\n}", "func DNSResolveCheck(host string, timeout time.Duration) Check {\n\tresolver := net.Resolver{}\n\treturn func() error {\n\t\tctx, cancel := context.WithTimeout(context.Background(), timeout)\n\t\tdefer cancel()\n\t\taddrs, err := resolver.LookupHost(ctx, host)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif len(addrs) < 1 {\n\t\t\treturn fmt.Errorf(\"could not resolve host\")\n\t\t}\n\t\treturn nil\n\t}\n}", "func Query(w http.ResponseWriter, r *http.Request) {\n\t// Return all responses as plain text\n\tw.Header().Set(\"Content-Type\", \"text/plain; charset=utf-8\")\n\n\t// Parse the POST\n\tif err := r.ParseForm(); err != nil {\n\t\thttp.Error(w, fmt.Sprintf(\"Could not parse request: %s\", err), 400)\n\t\treturn\n\t}\n\n\tq := fromRequest(r)\n\tdefer logger(q, time.Now())\n\n\t// Parse the text as a url\n\tu, err := q.URL()\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), 400)\n\t\treturn\n\t}\n\n\t// TODO query mutliple APIs\n\tcomments, err := redditAPI.Search(u)\n\tif err != nil {\n\t\t// TODO Distinguish between a 400 and a 500?\n\t\thttp.Error(w, err.Error(), 400)\n\t\treturn\n\t}\n\n\tif len(comments) < 1 {\n\t\thttp.Error(w, \"No groupthink was found for this link\", 404)\n\t\treturn\n\t}\n\n\ttimeAgo := time.Now().UTC().Sub(comments[0].Created.AsTime()).Hours()\n\tvar age string\n\tswitch {\n\tcase timeAgo < 1:\n\t\tage = \"less than an hour ago\"\n\tcase timeAgo < 24:\n\t\tage = \"less than a day ago\"\n\tcase timeAgo < (24 * 7):\n\t\tage = \"less than a week ago\"\n\tdefault:\n\t\tage = fmt.Sprintf(\"%d days ago\", int(timeAgo/24.0))\n\t}\n\n\tmessage := fmt.Sprintf(`>>> %s\n\n%s (%s)`,\n\t\tcomments[0].Body,\n\t\tcomments[0].Permalink,\n\t\tage,\n\t)\n\tw.Write([]byte(message))\n}", "func ValidateHost(host string) (err error) {\n\tverResp, err := http.Get(host + \"/api/server/version\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tif verResp.StatusCode != 200 {\n\t\treturn fmt.Errorf(\"❌ Invalid host, unable to obtain server version, status code %d\", verResp.StatusCode)\n\t}\n\treturn nil\n}", "func QueryResponder(t *testing.T, resp httpmock.Responder, q url.Values) httpmock.Responder {\n\treturn func(r *http.Request) (*http.Response, error) {\n\t\tt.Helper()\n\n\t\tfor k := range q {\n\t\t\tif q.Get(k) != \"\" {\n\t\t\t\tassert.Equal(t, q.Get(k), r.URL.Query().Get(k))\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tassert.Zero(t, r.URL.Query().Get(k))\n\t\t}\n\n\t\treturn resp(r)\n\t}\n}", "func IsInvalidQuery(err error) bool {\n\treturn unwrapError(err) == ErrInvalidQuery\n}", "func HandleDNS(w dns.ResponseWriter, r *dns.Msg) {\n\n\t/* Response packet */\n\tm := new(dns.Msg)\n\n\tdefer func() {\n\t\tm.SetReply(r)\n\t\tm.MsgHdr.Authoritative = true\n\t\tw.WriteMsg(m)\n\t}()\n\n\t/* If there's not one question in the packet, it's not for us */\n\tif 1 != len(r.Question) {\n\t\tm = m.SetRcode(r, dns.RcodeNameError)\n\t\treturn\n\t}\n\tq := r.Question[0]\n\tq.Name = strings.ToLower(q.Name)\n\n\t/* If the question's for the A record of the bare domain, return it. */\n\tif DOMAIN == q.Name {\n\t\tif dns.TypeA == q.Qtype && nil != AREC {\n\t\t\tm.Answer = append(m.Answer, AREC)\n\t\t}\n\t\treturn\n\t}\n\n\t/* We can really only process one of these at once */\n\tdnsCacheLock.Lock()\n\tdefer dnsCacheLock.Unlock()\n\n\t/* If we already have this one, use it again */\n\tif v, ok := dnsCache.Get(q.Name); ok {\n\t\trr, ok := v.(*dns.TXT)\n\t\tif !ok {\n\t\t\tlog.Panicf(\"invalid RR type %T\", v)\n\t\t}\n\t\t/* nil means no tasking */\n\t\tif nil != rr {\n\t\t\tm.Answer = append(m.Answer, rr)\n\t\t}\n\t\treturn\n\t}\n\n\t/* Get interesting parts of request. There should be 4 */\n\tparts := strings.SplitN(dnsutil.TrimDomainName(q.Name, DOMAIN), \".\", 4)\n\tif 4 != len(parts) {\n\t\tm.SetRcode(r, dns.RcodeFormatError)\n\t\treturn\n\t}\n\tvar (\n\t\toutHex = parts[0] /* Output, in hex */\n\t\tcounter = parts[1] /* Cachebuster */\n\t\tmt = parts[2] /* Message Type */\n\t\tid = strings.ToLower(parts[3]) /* Implant ID */\n\t)\n\n\t/* Only TXT records are supported, and only message types t and o */\n\tif !((mt == TASKINGLABEL && dns.TypeTXT == q.Qtype) ||\n\t\tmt == OUTPUTLABEL) ||\n\t\t\"\" == id {\n\t\tm.SetRcode(r, dns.RcodeRefused)\n\t\treturn\n\t}\n\n\t/* Make sure we have an expected message type */\n\tswitch mt {\n\tcase OUTPUTLABEL: /* Output, no need to respond with anything */\n\t\tdnsCache.Add(q.Name, (*dns.TXT)(nil))\n\t\tupdateLastSeen(id)\n\t\tgo handleOutput(outHex, id)\n\t\treturn\n\tcase TASKINGLABEL: /* Tasking */\n\t\tbreak /* Handled below */\n\tdefault: /* Not something we expect */\n\t\tlog.Panicf(\"unpossible message type %q\", mt)\n\t}\n\n\t/* Update the last seen time for this implant */\n\tupdateLastSeen(id)\n\n\t/* Send beacon to interested clients */\n\tgo sendBeaconToClients(id, counter)\n\n\t/* Get the next tasking for this implant */\n\tt := GetTasking(id)\n\tif \"\" == t {\n\t\tdnsCache.Add(q.Name, (*dns.TXT)(nil))\n\t\treturn\n\t}\n\t/* Sanitize tasking */\n\ts := strings.Replace(t, \"`\", \"``\", -1)\n\ts = strings.Replace(s, `\\`, `\\\\`, -1)\n\tm.Answer = append(m.Answer, &dns.TXT{\n\t\tHdr: dns.RR_Header{\n\t\t\tName: q.Name,\n\t\t\tRrtype: dns.TypeTXT,\n\t\t\tClass: dns.ClassINET,\n\t\t\tTtl: TTL,\n\t\t},\n\t\tTxt: []string{s},\n\t})\n\tdnsCache.Add(q.Name, m.Answer[0])\n\tlog.Printf(\"[ID-%v] TASKING: %s (%s)\", id, t, s)\n}", "func (req *ServerHTTPRequest) CheckQueryValue(key string) bool {\n\tsuccess := req.parseQueryValues()\n\tif !success {\n\t\treturn false\n\t}\n\n\tvalues := req.queryValues[key]\n\tif len(values) == 0 {\n\t\treq.contextLogger.WarnZ(req.Context(), \"Got request with missing query string value\",\n\t\t\tzap.String(\"expectedKey\", key),\n\t\t)\n\t\tif !req.parseFailed {\n\t\t\treq.res.SendErrorString(\n\t\t\t\t400, \"Could not parse query string\",\n\t\t\t)\n\t\t\treq.parseFailed = true\n\t\t}\n\t\treturn false\n\t}\n\n\treturn true\n}", "func (w *worker) resolveFromWith(ip net.IP, proto string) (*nameresolver.Entry, *errors.ErrorStack) {\n\tvar ipList []net.IP\n\n\t// We first query about the IPv4 addresses associated to the request topic.\n\tclnt := new(dns.Client)\n\tclnt.Net = proto\n\n\tma := new(dns.Msg)\n\tma.SetEdns0(4096, false)\n\tma.SetQuestion(w.req.Name(), dns.TypeA)\n\tma.RecursionDesired = false\n\tans, _, err := clnt.Exchange(ma, net.JoinHostPort(ip.String(), \"53\"))\n\n\tif err != nil {\n\t\terrStack := errors.NewErrorStack(err)\n\t\terrStack.Push(fmt.Errorf(\"resolveFromWith: error while exchanging with %s over %s for %s %s?\", ip.String(), errors.PROTO_TO_STR[errors.STR_TO_PROTO[proto]], w.req.Name(), dns.TypeToString[dns.TypeA]))\n\t\treturn nil, errStack\n\t}\n\tif ans == nil {\n\t\treturn nil, errors.NewErrorStack(fmt.Errorf(\"resolveFromWith: got empty answer from %s over %s for %s %s?\", ip.String(), errors.PROTO_TO_STR[errors.STR_TO_PROTO[proto]], w.req.Name(), dns.TypeToString[dns.TypeA]))\n\t}\n\tif ans.Rcode != dns.RcodeSuccess {\n\t\treturn nil, errors.NewErrorStack(fmt.Errorf(\"resolveFromWith: got DNS error %s from %s over %s for %s %s?\", dns.RcodeToString[ans.Rcode], ip.String(), errors.PROTO_TO_STR[errors.STR_TO_PROTO[proto]], w.req.Name(), dns.TypeToString[dns.TypeA]))\n\t}\n\tif !ans.Authoritative {\n\t\t// We expect an non-empty answer from the server, with a positive answer (no NXDOMAIN (lame delegation),\n\t\t// no SERVFAIL (broken server)). We also expect the server to be authoritative; if it is not, it is not clear\n\t\t// why, because the name is delegated to this server according to the parent zone, so we assume that this server\n\t\t// is broken, but there might be other reasons for this that I can't think off from the top of my head.\n\t\treturn nil, errors.NewErrorStack(fmt.Errorf(\"resolveFromWith: got non-authoritative data from %s over %s for %s %s?\", ip.String(), errors.PROTO_TO_STR[errors.STR_TO_PROTO[proto]], w.req.Name(), dns.TypeToString[dns.TypeA]))\n\t}\n\n\t// If the answer is truncated, we might want to retry over TCP... except of course if the truncated answer is\n\t// already provided over TCP (see Spotify blog post about when it happened to them :))\n\tif ans.Truncated {\n\t\tif proto == \"tcp\" {\n\t\t\treturn nil, errors.NewErrorStack(fmt.Errorf(\"resolveFromWith: got a truncated answer from %s over %s for %s %s?\", ip.String(), errors.PROTO_TO_STR[errors.STR_TO_PROTO[proto]], w.req.Name(), dns.TypeToString[dns.TypeA]))\n\t\t}\n\t\treturn w.resolveFromWith(ip, \"tcp\")\n\t}\n\n\tfor _, grr := range ans.Answer {\n\t\t// We only consider records from the answer section that have a owner name equal to the qname.\n\t\tif dns.CompareDomainName(grr.Header().Name, w.req.Name()) == dns.CountLabel(w.req.Name()) && dns.CountLabel(grr.Header().Name) == dns.CountLabel(w.req.Name()){\n\t\t\t// We may receive either A or CNAME records with matching owner name. We dismiss all other cases\n\t\t\t// (which are probably constituted of NSEC and DNAME and similar stuff. NSEC is of no value here, and DNAME\n\t\t\t// are not supported by this tool.\n\t\t\tswitch rr := grr.(type) {\n\t\t\tcase *dns.A:\n\t\t\t\t// We stack IPv4 addresses because the RRSet might be composed of multiple A records\n\t\t\t\tipList = append(ipList, rr.A)\n\t\t\tcase *dns.CNAME:\n\t\t\t\t// A CNAME is supposed to be the only record at a given domain name. Thus, we return this alias marker\n\t\t\t\t// and forget about all other records that might resides here.\n\t\t\t\treturn nameresolver.NewAliasEntry(w.req.Name(), rr.Target), nil\n\t\t\t}\n\t\t}\n\t}\n\n\t// We now query for the AAAA records to also get the IPv6 addresses\n\tclnt = new(dns.Client)\n\tclnt.Net = proto\n\n\tmaaaa := new(dns.Msg)\n\tmaaaa.SetEdns0(4096, false)\n\tmaaaa.SetQuestion(w.req.Name(), dns.TypeAAAA)\n\tmaaaa.RecursionDesired = false\n\tans, _, err = clnt.Exchange(maaaa, net.JoinHostPort(ip.String(), \"53\"))\n\n\tif err != nil {\n\t\terrStack := errors.NewErrorStack(err)\n\t\terrStack.Push(fmt.Errorf(\"resolveFromWith: error while exchanging with %s over %s for %s %s?\", ip.String(), errors.PROTO_TO_STR[errors.STR_TO_PROTO[proto]], w.req.Name(), dns.TypeToString[dns.TypeAAAA]))\n\t\treturn nil, errStack\n\t}\n\tif ans == nil {\n\t\treturn nil, errors.NewErrorStack(fmt.Errorf(\"resolveFromWith: got empty answer from %s over %s for %s %s?\", ip.String(), errors.PROTO_TO_STR[errors.STR_TO_PROTO[proto]], w.req.Name(), dns.TypeToString[dns.TypeAAAA]))\n\t}\n\tif ans.Rcode != dns.RcodeSuccess {\n\t\treturn nil, errors.NewErrorStack(fmt.Errorf(\"resolveFromWith: got DNS error %s from %s over %s for %s %s?\", dns.RcodeToString[ans.Rcode], ip.String(), errors.PROTO_TO_STR[errors.STR_TO_PROTO[proto]], w.req.Name(), dns.TypeToString[dns.TypeAAAA]))\n\t}\n\tif !ans.Authoritative {\n\t\treturn nil, errors.NewErrorStack(fmt.Errorf(\"resolveFromWith: got non-authoritative data from %s over %s for %s %s?\", ip.String(), errors.PROTO_TO_STR[errors.STR_TO_PROTO[proto]], w.req.Name(), dns.TypeToString[dns.TypeAAAA]))\n\t}\n\tif ans.Truncated {\n\t\tif proto == \"tcp\" {\n\t\t\treturn nil, errors.NewErrorStack(fmt.Errorf(\"resolveFromWith: got a truncated answer from %s over %s for %s %s?\", ip.String(), errors.PROTO_TO_STR[errors.STR_TO_PROTO[proto]], w.req.Name(), dns.TypeToString[dns.TypeAAAA]))\n\t\t}\n\t\treturn w.resolveFromWith(ip, \"tcp\")\n\t}\n\n\tfor _, grr := range ans.Answer {\n\t\tif dns.CompareDomainName(grr.Header().Name, w.req.Name()) == dns.CountLabel(w.req.Name()) && dns.CountLabel(grr.Header().Name) == dns.CountLabel(w.req.Name()){\n\t\t\tswitch rr := grr.(type) {\n\t\t\tcase *dns.AAAA:\n\t\t\t\tipList = append(ipList, rr.AAAA)\n\t\t\tcase *dns.CNAME:\n\t\t\t\t// We should have a CNAME here because the CNAME was not returned when asked for A records, and if we\n\t\t\t\t// had received a CNAME, we would already have returned.\n\t\t\t\treturn nil, errors.NewErrorStack(fmt.Errorf(\"resolveFromWith: got a CNAME that was not provided for the A query from %s over %s for %s %s?\", ip.String(), errors.PROTO_TO_STR[errors.STR_TO_PROTO[proto]], w.req.Name(), dns.TypeToString[dns.TypeAAAA]))\n\t\t\t}\n\t\t}\n\t}\n\treturn nameresolver.NewIPEntry(w.req.Name(), ipList), nil\n}", "func (o *PluginDnsNs) SendQuery(mac *core.MACKey, domain string) bool {\n\n\tclient := o.Ns.GetClient(mac)\n\n\tif client == nil {\n\t\t// No such client ...\n\t\to.stats.autoPlayClientNotFound++\n\t\treturn true // Restart, next one can be ok\n\t}\n\n\tplug := client.PluginCtx.Get(DNS_PLUG)\n\tif plug == nil {\n\t\t// given client doesn't have Dns\n\t\to.stats.clientNoDns++\n\t\treturn false // Don't restart timer, stop!\n\t}\n\n\tdnsPlug := plug.Ext.(*PluginDnsClient)\n\n\tqueries := o.getClientQueries(mac, domain)\n\n\terr := dnsPlug.Query(queries, dnsPlug.socket)\n\tif err != nil {\n\t\t// Couldn't query properly\n\t\to.stats.autoPlayBadQuery++\n\t} else {\n\t\to.stats.autoPlayQueries++ // one more auto play query sent\n\t}\n\t// If the query amount is not reached, we can restart the timer.\n\tinfiniteQueries := (o.autoPlayParams.QueryAmount == 0)\n\treturn o.stats.autoPlayQueries < o.autoPlayParams.QueryAmount || infiniteQueries\n}", "func (d *DNS) Check(ipaddr net.IP) error {\n\t// NOTE: We are ignoring error. It says: \"nodename nor servname\n\t// provided, or not known\" if there is no DNS name for the IP address.\n\tnames, _ := net.LookupAddr(ipaddr.String())\n\td.Names = names\n\treturn nil\n}", "func validateQueryString(req *http.Request, key string) (string, error) {\n\tquery := req.URL.Query()\n\tvalues, ok := query[key]\n\tif !ok {\n\t\treturn \"\", NewErrMissingParam(key)\n\t}\n\n\tif len(values) != 1 {\n\t\treturn \"\", NewErrAmbigousParam(key)\n\t}\n\n\tvalue := values[0]\n\tif value == \"\" {\n\t\treturn \"\", NewErrEmptyParam(key)\n\t}\n\n\treturn value, nil\n}", "func isHttpURL(fl FieldLevel) bool {\n\tif !isURL(fl) {\n\t\treturn false\n\t}\n\n\tfield := fl.Field()\n\tswitch field.Kind() {\n\tcase reflect.String:\n\n\t\ts := strings.ToLower(field.String())\n\n\t\turl, err := url.Parse(s)\n\t\tif err != nil || url.Host == \"\" {\n\t\t\treturn false\n\t\t}\n\n\t\treturn url.Scheme == \"http\" || url.Scheme == \"https\"\n\t}\n\n\tpanic(fmt.Sprintf(\"Bad field type %T\", field.Interface()))\n}", "func verifySearchByKnownURLs(t *testing.T, wi *app.WorkItem2Single, host, searchQuery string) {\n\tresult := searchByURL(t, host, searchQuery)\n\tassert.NotEmpty(t, result.Data)\n\tassert.Equal(t, *wi.Data.ID, *result.Data[0].ID)\n\n\tknown := search.GetAllRegisteredURLs()\n\trequire.NotNil(t, known)\n\tassert.NotEmpty(t, known)\n\tassert.Contains(t, known[search.HostRegistrationKeyForListWI].URLRegex, host)\n\tassert.Contains(t, known[search.HostRegistrationKeyForBoardWI].URLRegex, host)\n}", "func query(object string, server string, tcpport string) (string, error) {\r\n\t// open connnection\r\n\tloggers.Info.Printf(\"whois.query() setup connection\")\r\n\tconn, err := net.DialTimeout(\"tcp\", net.JoinHostPort(server, tcpport), time.Second*30)\r\n\tif err != nil {\r\n\t\treturn \"\", fmt.Errorf(\"whois: connect to whois server failed: %v\", err)\r\n\t}\r\n\tdefer conn.Close()\r\n\t// set connection write timeout\r\n\t_ = conn.SetWriteDeadline(time.Now().Add(time.Second * 30))\r\n\t_, err = conn.Write([]byte(object + \"\\r\\n\"))\r\n\tif err != nil {\r\n\t\treturn \"\", fmt.Errorf(\"whois: send to whois server failed: %v\", err)\r\n\t}\r\n\t// set connection read timeout\r\n\t_ = conn.SetReadDeadline(time.Now().Add(time.Second * 30))\r\n\tbuffer, err := ioutil.ReadAll(conn)\r\n\tif err != nil {\r\n\t\treturn \"\", fmt.Errorf(\"whois: read from whois server failed: %v\", err)\r\n\t}\r\n\t// return result\r\n\treturn string(buffer), nil\r\n}", "func validateDNSContent(dnsContent string) (string, error) {\n\tdnsContent = sanitize(dnsContent)\n\tswitch dnsContent {\n\tcase \"\":\n\t\treturn \"container\", nil\n\tcase \"container\":\n\t\treturn \"container\", nil\n\tdefault:\n\t\tip := net.ParseIP(dnsContent)\n\t\tif ip == nil {\n\t\t\treturn \"\", fmt.Errorf(\"Invalid dns-content specified. `%s` must be a valid IPv4 address or one of `container`\", dnsContent)\n\t\t}\n\t\tip = ip.To4()\n\t\t// TODO: remove this check when we add IPv6 support. We might want to split this config variable in 2 when we do (MODE and actual IP)\n\t\tif ip == nil {\n\t\t\treturn \"\", fmt.Errorf(\"Invalid dns-content specified. `%s` must be a valid IPv4 address or one of `container`\", dnsContent)\n\t\t}\n\t\treturn ip.String(), nil\n\t}\n}", "func CheckDomain(sender string) (bool, error) {\n\trbool := false\n\thost := strings.Split(sender, \"@\")[1]\n\thostfinal := strings.Split(host, \">\")[0]\n\tfor _, domain := range OURDOMAIN {\n\t\t// log.Println(\"Checking domain: \" + domain)\n\t\tif hostfinal == domain {\n\t\t\t// log.Println(domain + \" == \" + hostfinal)\n\t\t\trbool = true\n\t\t\tbreak\n\t\t}\n\t}\n\treturn rbool, nil\n}", "func validateDNS(){\n dns, err := exec.Command(\"/bin/bash\", \"-c\", \"dig +short rackspace.com\").Output()\n if err != nil {\n log.Fatal(err)\n fmt.Println(\"Unable to validate DNS.\")\n fmt.Printf(\"%s\", err)\n os.Exit(1)\n }\n fmt.Printf(\"%s\", dns)\n}", "func makeDNSHandler(blacklist *Blacklist, upstream string, logging bool) func(dns.ResponseWriter, *dns.Msg) {\n\n\t// create the logger functions\n\tlogger := func(res *dns.Msg, duration time.Duration, how string) {}\n\terrorLogger := func(err error, description string) {\n\t\tlog.Print(description, err)\n\t}\n\tif logging {\n\t\tlogger = func(msg *dns.Msg, rtt time.Duration, how string) {\n\t\t\tlog.Printf(\"Using %s, response time %s:\\n%s\\n\", how, rtt.String(), msg.String())\n\t\t}\n\t\terrorLogger = func(err error, description string) {\n\n\t\t}\n\t}\n\n\t// cache for the DNS replies from the DNS server\n\tcache := NewCache()\n\n\t// we use a single client to resolve queries against the upstream DNS\n\tclient := new(dns.Client)\n\n\t// create the real handler\n\treturn func(w dns.ResponseWriter, req *dns.Msg) {\n\t\tstart := time.Now()\n\n\t\t// the standard allows multiple DNS questions in a single query... but nobody uses it, so we disallow it\n\t\t// https://stackoverflow.com/questions/4082081/requesting-a-and-aaaa-records-in-single-dns-query/4083071\n\t\tif len(req.Question) != 1 {\n\n\t\t\t// reply with a format error\n\t\t\tres := new(dns.Msg)\n\t\t\tres.SetRcode(req, dns.RcodeFormatError)\n\t\t\terr := w.WriteMsg(res)\n\t\t\tif err != nil {\n\t\t\t\terrorLogger(err, \"Error to write DNS response message to client\")\n\t\t\t}\n\n\t\t\t// collect metrics\n\t\t\tduration := time.Since(start).Seconds()\n\t\t\tqueriesHistogram.WithLabelValues(\"malformed_query\", \"-\").Observe(duration)\n\n\t\t\treturn\n\t\t}\n\n\t\t// extract the DNS question\n\t\tquery := req.Question[0]\n\t\tdomain := strings.TrimRight(query.Name, \".\")\n\t\tqueryType := dns.TypeToString[query.Qtype]\n\n\t\t// check the cache first: if a domain is in the cache, it cannot be blocked\n\t\t// this optimized response times for allowed domains over the blocked domains\n\t\tcached, found := cache.Get(&query)\n\t\tif found {\n\n\t\t\t// cache found, use the cached answer\n\t\t\tres := cached.SetReply(req)\n\t\t\tres.Answer = cached.Answer\n\t\t\terr := w.WriteMsg(res)\n\t\t\tif err != nil {\n\t\t\t\terrorLogger(err, \"Error to write DNS response message to client\")\n\t\t\t}\n\n\t\t\t// log the query\n\t\t\tduration := time.Since(start)\n\t\t\tlogger(res, duration, \"cache\")\n\n\t\t\t// collect metrics\n\t\t\tdurationSeconds := duration.Seconds()\n\t\t\tqueriesHistogram.WithLabelValues(\"cache\", queryType).Observe(durationSeconds)\n\n\t\t\treturn\n\t\t}\n\n\t\t// then, check if the domain is blocked\n\t\tblocked := blacklist.Contains(domain)\n\t\tif blocked {\n\n\t\t\t// reply with \"domain not found\"\n\t\t\tres := new(dns.Msg)\n\t\t\tres.SetRcode(req, dns.RcodeNameError)\n\t\t\terr := w.WriteMsg(res)\n\t\t\tif err != nil {\n\t\t\t\terrorLogger(err, \"Error to write DNS response message to client\")\n\t\t\t}\n\n\t\t\t// log the query\n\t\t\tduration := time.Since(start)\n\t\t\tlogger(res, duration, \"block\")\n\n\t\t\t// collect metrics\n\t\t\tdurationSeconds := duration.Seconds()\n\t\t\tqueriesHistogram.WithLabelValues(\"block\", queryType).Observe(durationSeconds)\n\n\t\t\treturn\n\t\t}\n\n\t\t// finally, query an upstream DNS\n\t\tres, rtt, err := client.Exchange(req, upstream)\n\t\tif err == nil {\n\n\t\t\t// reply to the query\n\t\t\terr := w.WriteMsg(res)\n\t\t\tif err != nil {\n\t\t\t\terrorLogger(err, \"Error to write DNS response message to client\")\n\t\t\t}\n\n\t\t\t// cache the result if any\n\t\t\tif len(res.Answer) > 0 {\n\t\t\t\texpiration := time.Duration(res.Answer[0].Header().Ttl) * time.Second\n\t\t\t\tcache.Set(&query, res, expiration)\n\t\t\t}\n\n\t\t\t// log the query\n\t\t\tlogger(res, rtt, \"upstream\")\n\n\t\t\t// collect metrics\n\t\t\tdurationSeconds := time.Since(start).Seconds()\n\t\t\tqueriesHistogram.WithLabelValues(\"upstream\", queryType).Observe(durationSeconds)\n\n\t\t} else {\n\n\t\t\t// log the error\n\t\t\terrorLogger(err, \"Error in resolve query against upstream DNS \"+upstream)\n\n\t\t\t// collect metrics\n\t\t\tdurationSeconds := time.Since(start).Seconds()\n\t\t\tqueriesHistogram.WithLabelValues(\"upstream_error\", queryType).Observe(durationSeconds)\n\t\t}\n\t}\n}", "func validateProxyVerbRequest(client *http.Client, urlString string, httpVerb string, msg string) func() (bool, error) {\n\treturn func() (bool, error) {\n\t\tvar err error\n\n\t\trequest, err := http.NewRequest(httpVerb, urlString, nil)\n\t\tif err != nil {\n\t\t\tframework.Logf(\"Failed to get a new request. %v\", err)\n\t\t\treturn false, nil\n\t\t}\n\n\t\tresp, err := client.Do(request)\n\t\tif err != nil {\n\t\t\tframework.Logf(\"Failed to get a response. %v\", err)\n\t\t\treturn false, nil\n\t\t}\n\t\tdefer resp.Body.Close()\n\n\t\tbuf := new(bytes.Buffer)\n\t\tbuf.ReadFrom(resp.Body)\n\t\tresponse := buf.String()\n\n\t\tswitch httpVerb {\n\t\tcase \"HEAD\":\n\t\t\tframework.Logf(\"http.Client request:%s | StatusCode:%d\", httpVerb, resp.StatusCode)\n\t\t\tif resp.StatusCode != 200 {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t\treturn true, nil\n\t\tdefault:\n\t\t\tvar jr *jsonResponse\n\t\t\terr = json.Unmarshal([]byte(response), &jr)\n\t\t\tif err != nil {\n\t\t\t\tframework.Logf(\"Failed to process jsonResponse. %v\", err)\n\t\t\t\treturn false, nil\n\t\t\t}\n\n\t\t\tframework.Logf(\"http.Client request:%s | StatusCode:%d | Response:%s | Method:%s\", httpVerb, resp.StatusCode, jr.Body, jr.Method)\n\t\t\tif resp.StatusCode != 200 {\n\t\t\t\treturn false, nil\n\t\t\t}\n\n\t\t\tif msg != jr.Body {\n\t\t\t\treturn false, nil\n\t\t\t}\n\n\t\t\tif httpVerb != jr.Method {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t\treturn true, nil\n\t\t}\n\t}\n}", "func IsHost(str string) bool {\n\treturn IsIP(str) || IsDNSName(str)\n}", "func isValidHostURL(hostURL string) bool {\n\tif strings.TrimSpace(hostURL) == \"\" {\n\t\treturn false\n\t}\n\turl := client.NewURL(hostURL)\n\tif url.Scheme != \"https\" && url.Scheme != \"http\" {\n\t\treturn false\n\t}\n\tif url.Path != \"\" && url.Path != \"/\" {\n\t\treturn false\n\t}\n\treturn true\n}", "func isHTTPTimeout(err error) bool {\n\tif netErr, ok := err.(interface {\n\t\tTimeout() bool\n\t}); ok && netErr.Timeout() {\n\t\treturn true\n\t} else if strings.Contains(err.Error(), \"use of closed network connection\") { //To deprecate when using GO > 1.5\n\t\treturn true\n\t}\n\treturn false\n}", "func hasValidSavedQuery(query interface{}) error {\n\tqueryConverted := query.(map[string]interface{})\n\tlistQueryTypes := []string{\"count/entity\", \"count/event\", \"count/entity/total\", \"aggregation\", \"top_values\"}\n\tif !stringInSlice(queryConverted[\"type\"].(string), listQueryTypes) {\n\t\treturn errors.New(\"Saved Query Validator: this dictionary don't have query type valid.\")\n\t}\n\treturn nil\n}", "func Validator(request events.APIGatewayProxyRequest) (bool, string, int, searchutil.SearchPeriod) {\n\tif request.HTTPMethod != \"GET\" {\n\t\treturn false, \"{\\\"msg\\\": \\\"only HTTP GET is allowed on this resource\\\"}\", -1, searchutil.Day\n\t}\n\tnumber, pres := request.QueryStringParameters[\"number\"]\n\tif !pres {\n\t\tnumber = \"10\"\n\t}\n\tnum, _ := strconv.Atoi(number)\n\tif num <= 0 {\n\t\treturn false, \"{\\\"msg\\\": \\\"number nmust be greater than 0\\\"}\", -1, searchutil.Day\n\t}\n\tduration, _ := request.QueryStringParameters[\"duration\"]\n\treturn true, \"\", num, ConvertDurationToSearchPeriod(duration)\n}", "func (handler *AllowAllHandler) CheckQuery(sqlQuery string, parsedQuery sqlparser.Statement) (bool, error) {\n\t// allow any query and stop further checks\n\thandler.logger.Infof(\"Query has been allowed by Allowall handler\")\n\treturn false, nil\n}", "func ConnVerifyHostname(c *tls.Conn, host string) error", "func (f TransportQueryRuleFunc) EvalQuery(ctx context.Context, q ent.Query) error {\n\tif q, ok := q.(*ent.TransportQuery); ok {\n\t\treturn f(ctx, q)\n\t}\n\treturn Denyf(\"ent/privacy: unexpected query type %T, expect *ent.TransportQuery\", q)\n}", "func answerQuery(q *query.Name, sender connection.Info, oldToken token.Token, s *Server) {\n\tlog.Debug(\"Start processing query\", \"query\", q)\n\ttrace(oldToken, fmt.Sprintf(\"Processing QueryForward for name: %v, connection: %v\", q.Name, q.Types))\n\n\tassertions := []section.Section{}\n\tassertionSet := make(map[string]bool)\n\tasKey := func(a *section.Assertion) string {\n\t\treturn fmt.Sprintf(\"%s_%s_%s\", a.SubjectName, a.SubjectZone, a.Context)\n\t}\n\n\tfor _, t := range q.Types {\n\t\tif asserts, ok := s.caches.AssertionsCache.Get(q.Name, q.Context, t, false); ok {\n\t\t\ttrace(oldToken, fmt.Sprintf(\"received from cache: %v\", asserts))\n\t\t\t//TODO implement a more elaborate policy to filter returned assertions instead\n\t\t\t//of sending all non expired once back.\n\t\t\tlog.Debug(fmt.Sprintf(\"before transitive closure: %v\", asserts))\n\t\t\tqueryTransitiveClosure(&asserts, q.Context, s.caches.AssertionsCache)\n\t\t\tlog.Debug(fmt.Sprintf(\"after transitive closure: %v\", asserts))\n\t\t\tfor _, a := range asserts {\n\t\t\t\tif _, ok := assertionSet[asKey(a)]; ok {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif a.ValidUntil() > time.Now().Unix() {\n\t\t\t\t\ttrace(oldToken, fmt.Sprintf(\"appending valid assertion %v to response\", a))\n\t\t\t\t\tlog.Debug(fmt.Sprintf(\"appending valid assertion: %v\", a))\n\t\t\t\t\tassertions = append(assertions, a)\n\t\t\t\t\tassertionSet[asKey(a)] = true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tif len(assertions) > 0 {\n\t\tsendSections(assertions, oldToken, sender, s)\n\t\ttrace(oldToken, fmt.Sprintf(\"successfully sent response assertions: %v\", assertions))\n\t\tlog.Info(\"Finished handling query by sending assertion from cache\", \"query\", q)\n\t\treturn\n\t}\n\ttrace(oldToken, \"no entry found in assertion cache\")\n\tlog.Debug(\"No entry found in assertion cache\", \"name\", q.Name,\n\t\t\"context\", q.Context, \"type\", q.Types)\n\n\t//negative answer lookup (note that it can occur a positive answer if assertion removed from cache)\n\tsubject, zone, err := toSubjectZone(q.Name)\n\tif err != nil {\n\t\tsendNotificationMsg(oldToken, sender, section.NTRcvInconsistentMsg,\n\t\t\t\"query name must end with root zone dot '.'\", s)\n\t\tlog.Warn(\"failed to concert query name to subject and zone: %v\", err)\n\t\treturn\n\t}\n\tnegAssertion, ok := s.caches.NegAssertionCache.Get(zone, q.Context, section.StringInterval{Name: subject})\n\tif ok {\n\t\t//TODO CFE For each type check if one of the zone or shards contain the queried\n\t\t//assertion. If there is at least one assertion answer with it. If no assertion is\n\t\t//contained in a zone or shard for any of the queried connection, answer with the shortest\n\t\t//element. shortest according to what? size in bytes? how to efficiently determine that.\n\t\t//e.g. using gob encoding. alternatively we could also count the number of contained\n\t\t//elements.\n\t\tsendSection(negAssertion[0], oldToken, sender, s)\n\t\ttrace(oldToken, fmt.Sprintf(\"found negative assertion matching query: %v\", negAssertion[0]))\n\t\tlog.Info(\"Finished handling query by sending shard or zone from cache\", \"query\", q)\n\t\treturn\n\t}\n\tlog.Debug(\"No entry found in negAssertion cache matching the query\")\n\ttrace(oldToken, \"no entry found in negative assertion cache\")\n\n\t// If cached answers only option is set then stop after looking in the local cache.\n\tif q.ContainsOption(query.QOCachedAnswersOnly) {\n\t\tlog.Debug(\"Send a notification message back due to query option: 'Cached Answers only'\",\n\t\t\t\"destination\", sender)\n\t\tsendNotificationMsg(oldToken, sender, section.NTNoAssertionAvail, \"\", s)\n\t\ttrace(oldToken, \"returned no assertion available message due to CachedAnswersOnly query option\")\n\t\tlog.Info(\"Finished handling query (unsuccessful, cached answers only) \", \"query\", q)\n\t\treturn\n\t}\n\n\ttrace(oldToken, \"forwarding query\")\n\t//forward query (no answer in cache)\n\tvar delegate connection.Info\n\tif iterativeLookupAllowed() {\n\t\tif conns := s.caches.RedirectCache.GetConnsInfo(q.Name); len(conns) > 0 {\n\t\t\t//TODO CFE design policy which server to choose (same as pending query callback?)\n\t\t\tdelegate = conns[0]\n\t\t} else {\n\t\t\tsendNotificationMsg(oldToken, sender, section.NTNoAssertionAvail, \"\", s)\n\t\t\tlog.Error(\"no delegate found to send query to\")\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tdelegate = getRootAddr()\n\t}\n\tif delegate.Equal(s.config.ServerAddress) {\n\t\tsendNotificationMsg(oldToken, sender, section.NTNoAssertionAvail, \"\", s)\n\t\tlog.Error(\"Stop processing query. I am authoritative and have no answer in cache\")\n\t\treturn\n\t}\n\t//we have a valid delegation\n\ttok := oldToken\n\tif !q.ContainsOption(query.QOTokenTracing) {\n\t\ttok = token.New()\n\t}\n\tvalidUntil := time.Now().Add(s.config.QueryValidity).Unix() //Upper bound for forwarded query expiration time\n\tif q.Expiration < validUntil {\n\t\tvalidUntil = q.Expiration\n\t}\n\tisNew := s.caches.PendingQueries.Add(msgSectionSender{Section: q, Sender: sender, Token: oldToken})\n\tlog.Info(\"Added query into to pending query cache\", \"query\", q)\n\tif isNew {\n\t\tif s.caches.PendingQueries.AddToken(tok, validUntil, delegate, q.Name, q.Context, q.Types) {\n\t\t\tnewQuery := &query.Name{\n\t\t\t\tName: q.Name,\n\t\t\t\tContext: q.Context,\n\t\t\t\tExpiration: validUntil,\n\t\t\t\tTypes: q.Types,\n\t\t\t}\n\t\t\tif err := sendSection(newQuery, tok, delegate, s); err == nil {\n\t\t\t\tlog.Info(\"Sent query.\", \"destination\", delegate, \"query\", newQuery)\n\t\t\t}\n\t\t} //else answer already arrived and callback function has already been invoked\n\t} else {\n\t\tlog.Info(\"Query already sent.\")\n\t}\n}", "func ValidateHost(host string, allowNonCompliant string, hostPath *field.Path) field.ErrorList {\n\tresult := field.ErrorList{}\n\n\tif allowNonCompliant == \"true\" {\n\t\terrs := kvalidation.IsDNS1123Subdomain(host)\n\t\tif len(errs) != 0 {\n\t\t\tresult = append(result, field.Invalid(hostPath, host, fmt.Sprintf(\"host must conform to DNS naming conventions: %v\", errs)))\n\t\t}\n\t} else {\n\t\terrs := kvalidation.IsFullyQualifiedDomainName(hostPath, host)\n\t\tif len(errs) != 0 {\n\t\t\tresult = append(result, field.Invalid(hostPath, host, fmt.Sprintf(\"host must conform to DNS 1123 naming conventions: %v\", errs)))\n\t\t}\n\t}\n\treturn result\n}", "func isSameDomain(url1, url2 string) bool {\n\tu1, _ := url.Parse(url1)\n\tu2, _ := url.Parse(url2)\n\n\treturn u1.Host == u2.Host\n}", "func DNSName(str string) bool {\n\tif str == \"\" || len(strings.Replace(str, \".\", \"\", -1)) > 255 {\n\t\t// constraints already violated\n\t\treturn false\n\t}\n\treturn rxDNSName.MatchString(str)\n}" ]
[ "0.61967796", "0.59444106", "0.5883444", "0.58111125", "0.5794155", "0.56501865", "0.5649279", "0.5606844", "0.55810744", "0.557853", "0.55547315", "0.54801077", "0.5479285", "0.5403357", "0.5397834", "0.5353204", "0.5351625", "0.53264076", "0.530033", "0.52961653", "0.5251227", "0.5245285", "0.5220293", "0.52109766", "0.52104217", "0.52035666", "0.52004975", "0.51843864", "0.51822", "0.5171105", "0.51620746", "0.5143289", "0.5134479", "0.5121511", "0.5115387", "0.507001", "0.50474375", "0.50338477", "0.5033564", "0.5031965", "0.49663812", "0.49617013", "0.49549553", "0.49512425", "0.49448556", "0.49434415", "0.4938097", "0.49285662", "0.4911356", "0.49084747", "0.48913562", "0.48863828", "0.48836437", "0.4880356", "0.4870432", "0.48590007", "0.48387066", "0.48268583", "0.4822341", "0.48194918", "0.47959846", "0.47923094", "0.47897708", "0.47819617", "0.47816333", "0.47792438", "0.47780907", "0.4772992", "0.47729897", "0.47729897", "0.47724417", "0.4762086", "0.47614053", "0.47576788", "0.47573823", "0.4733256", "0.4731058", "0.4729144", "0.47288576", "0.47279072", "0.47278357", "0.4726241", "0.4717128", "0.47100222", "0.47095257", "0.47027898", "0.47012433", "0.47000483", "0.46883786", "0.4685343", "0.4683348", "0.4678868", "0.4676442", "0.46737564", "0.46686363", "0.4666195", "0.4660296", "0.4660248", "0.46596074", "0.46541902" ]
0.51648927
30
Check that all fields of m1 match those of m2, except for Header.ID and Additionals.
func queriesMostlyEqual(m1 dnsmessage.Message, m2 dnsmessage.Message) bool { // Make fields we don't care about match, so that equality check is easy. m1.Header.ID = m2.Header.ID m1.Additionals = m2.Additionals return reflect.DeepEqual(m1, m2) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (r *Record) Eq(other *Record) bool {\n\n\t// We disregard leader in equality tests, since LineMARC doesn't have one,\n\t// and it will be generated by decoders and encoder.\n\t/*\n\t\t// Leader equal?\n\t\tif r.Leader != other.Leader {\n\t\t\treturn false\n\t\t}\n\t*/\n\n\t// Control Fields equal?\n\tif len(r.CtrlFields) != len(other.CtrlFields) {\n\t\treturn false\n\t}\n\n\tsort.Sort(r.CtrlFields)\n\tsort.Sort(other.CtrlFields)\n\n\tfor i, f := range r.CtrlFields {\n\t\tif other.CtrlFields[i] != f {\n\t\t\treturn false\n\t\t}\n\t}\n\n\t// Data Fields equal?\n\tif len(r.DataFields) != len(other.DataFields) {\n\t\treturn false\n\t}\n\n\tsort.Sort(r.DataFields)\n\tsort.Sort(other.DataFields)\n\n\tfor i, f := range r.DataFields {\n\t\tif o := other.DataFields[i]; o.Tag != f.Tag || o.Ind1 != f.Ind1 || o.Ind2 != f.Ind2 {\n\t\t\treturn false\n\t\t}\n\t\t// SubFields equal?\n\t\tif len(f.SubFields) != len(other.DataFields[i].SubFields) {\n\t\t\treturn false\n\t\t}\n\n\t\tsort.Sort(f.SubFields)\n\t\tsort.Sort(other.DataFields[i].SubFields)\n\n\t\tfor j, s := range f.SubFields {\n\t\t\tif other.DataFields[i].SubFields[j] != s {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t}\n\n\t// All fields equal\n\treturn true\n}", "func compatFields(a, b *Type, seenA, seenB map[*Type]bool) bool {\n\t// All fields with the same name must be compatible, and at least one field\n\t// must match.\n\tif a.NumField() > b.NumField() {\n\t\ta, seenA, b, seenB = b, seenB, a, seenA\n\t}\n\tfieldMatch := false\n\tfor ax := 0; ax < a.NumField(); ax++ {\n\t\tafield := a.Field(ax)\n\t\tbfield, bindex := b.FieldByName(afield.Name)\n\t\tif bindex < 0 {\n\t\t\tcontinue\n\t\t}\n\t\tif !compat(afield.Type, bfield.Type, seenA, seenB) {\n\t\t\treturn false\n\t\t}\n\t\tfieldMatch = true\n\t}\n\treturn fieldMatch\n}", "func equalMetadatas(md, md2 Metadata) error {\n\t// Check Aggregate Fields\n\tif md.AggregateHealth != md2.AggregateHealth {\n\t\treturn fmt.Errorf(\"AggregateHealth not equal, %v and %v\", md.AggregateHealth, md2.AggregateHealth)\n\t}\n\tif md.AggregateLastHealthCheckTime != md2.AggregateLastHealthCheckTime {\n\t\treturn fmt.Errorf(\"AggregateLastHealthCheckTimes not equal, %v and %v\", md.AggregateLastHealthCheckTime, md2.AggregateLastHealthCheckTime)\n\t}\n\tif md.AggregateMinRedundancy != md2.AggregateMinRedundancy {\n\t\treturn fmt.Errorf(\"AggregateMinRedundancy not equal, %v and %v\", md.AggregateMinRedundancy, md2.AggregateMinRedundancy)\n\t}\n\tif md.AggregateModTime != md2.AggregateModTime {\n\t\treturn fmt.Errorf(\"AggregateModTimes not equal, %v and %v\", md.AggregateModTime, md2.AggregateModTime)\n\t}\n\tif md.AggregateNumFiles != md2.AggregateNumFiles {\n\t\treturn fmt.Errorf(\"AggregateNumFiles not equal, %v and %v\", md.AggregateNumFiles, md2.AggregateNumFiles)\n\t}\n\tif md.AggregateNumStuckChunks != md2.AggregateNumStuckChunks {\n\t\treturn fmt.Errorf(\"AggregateNumStuckChunks not equal, %v and %v\", md.AggregateNumStuckChunks, md2.AggregateNumStuckChunks)\n\t}\n\tif md.AggregateNumSubDirs != md2.AggregateNumSubDirs {\n\t\treturn fmt.Errorf(\"AggregateNumSubDirs not equal, %v and %v\", md.AggregateNumSubDirs, md2.AggregateNumSubDirs)\n\t}\n\tif md.AggregateRemoteHealth != md2.AggregateRemoteHealth {\n\t\treturn fmt.Errorf(\"AggregateRemoteHealth not equal, %v and %v\", md.AggregateRemoteHealth, md2.AggregateRemoteHealth)\n\t}\n\tif md.AggregateRepairSize != md2.AggregateRepairSize {\n\t\treturn fmt.Errorf(\"AggregateRepairSize not equal, %v and %v\", md.AggregateRepairSize, md2.AggregateRepairSize)\n\t}\n\tif md.AggregateSize != md2.AggregateSize {\n\t\treturn fmt.Errorf(\"AggregateSize not equal, %v and %v\", md.AggregateSize, md2.AggregateSize)\n\t}\n\tif md.AggregateStuckHealth != md2.AggregateStuckHealth {\n\t\treturn fmt.Errorf(\"AggregateStuckHealth not equal, %v and %v\", md.AggregateStuckHealth, md2.AggregateStuckHealth)\n\t}\n\tif md.AggregateStuckSize != md2.AggregateStuckSize {\n\t\treturn fmt.Errorf(\"AggregateStuckSize not equal, %v and %v\", md.AggregateStuckSize, md2.AggregateStuckSize)\n\t}\n\n\t// Aggregate Skynet Fields\n\tif md.AggregateSkynetFiles != md2.AggregateSkynetFiles {\n\t\treturn fmt.Errorf(\"AggregateSkynetFiles not equal, %v and %v\", md.AggregateSkynetFiles, md2.AggregateSkynetFiles)\n\t}\n\tif md.AggregateSkynetSize != md2.AggregateSkynetSize {\n\t\treturn fmt.Errorf(\"AggregateSkynetSize not equal, %v and %v\", md.AggregateSkynetSize, md2.AggregateSkynetSize)\n\t}\n\n\t// Check SiaDir Fields\n\tif md.Health != md2.Health {\n\t\treturn fmt.Errorf(\"Healths not equal, %v and %v\", md.Health, md2.Health)\n\t}\n\tif md.LastHealthCheckTime != md2.LastHealthCheckTime {\n\t\treturn fmt.Errorf(\"LastHealthCheckTime not equal, %v and %v\", md.LastHealthCheckTime, md2.LastHealthCheckTime)\n\t}\n\tif md.MinRedundancy != md2.MinRedundancy {\n\t\treturn fmt.Errorf(\"MinRedundancy not equal, %v and %v\", md.MinRedundancy, md2.MinRedundancy)\n\t}\n\tif md.ModTime != md2.ModTime {\n\t\treturn fmt.Errorf(\"ModTime not equal, %v and %v\", md.ModTime, md2.ModTime)\n\t}\n\tif md.NumFiles != md2.NumFiles {\n\t\treturn fmt.Errorf(\"NumFiles not equal, %v and %v\", md.NumFiles, md2.NumFiles)\n\t}\n\tif md.NumStuckChunks != md2.NumStuckChunks {\n\t\treturn fmt.Errorf(\"NumStuckChunks not equal, %v and %v\", md.NumStuckChunks, md2.NumStuckChunks)\n\t}\n\tif md.NumSubDirs != md2.NumSubDirs {\n\t\treturn fmt.Errorf(\"NumSubDirs not equal, %v and %v\", md.NumSubDirs, md2.NumSubDirs)\n\t}\n\tif md.RemoteHealth != md2.RemoteHealth {\n\t\treturn fmt.Errorf(\"RemoteHealth not equal, %v and %v\", md.RemoteHealth, md2.RemoteHealth)\n\t}\n\tif md.RepairSize != md2.RepairSize {\n\t\treturn fmt.Errorf(\"RepairSize not equal, %v and %v\", md.RepairSize, md2.RepairSize)\n\t}\n\tif md.Size != md2.Size {\n\t\treturn fmt.Errorf(\"Sizes not equal, %v and %v\", md.Size, md2.Size)\n\t}\n\tif md.StuckHealth != md2.StuckHealth {\n\t\treturn fmt.Errorf(\"StuckHealth not equal, %v and %v\", md.StuckHealth, md2.StuckHealth)\n\t}\n\tif md.StuckSize != md2.StuckSize {\n\t\treturn fmt.Errorf(\"StuckSize not equal, %v and %v\", md.StuckSize, md2.StuckSize)\n\t}\n\n\t// Skynet Fields\n\tif md.SkynetFiles != md2.SkynetFiles {\n\t\treturn fmt.Errorf(\"SkynetFiles not equal, %v and %v\", md.SkynetFiles, md2.SkynetFiles)\n\t}\n\tif md.SkynetSize != md2.SkynetSize {\n\t\treturn fmt.Errorf(\"SkynetSize not equal, %v and %v\", md.SkynetSize, md2.SkynetSize)\n\t}\n\n\treturn nil\n}", "func MatchHeaders(fromReq, fromDB map[string]string) bool {\n\tvar matched = false\n\tfor k, dbVal := range fromDB {\n\t\tif headerVal, ok := fromReq[k]; ok && headerVal == dbVal {\n\t\t\tmatched = true\n\t\t} else {\n\t\t\t// return early if we get a different value\n\t\t\treturn false\n\t\t}\n\t}\n\treturn matched\n}", "func (m M) Equals(m1 M) bool {\n\tif m.Cols() != m1.Cols() || m.Rows() != m1.Rows() {\n\t\treturn false\n\t}\n\n\tfor r := 1; r <= m.Rows(); r++ {\n\t\tfor c := 1; c <= m.Cols(); c++ {\n\t\t\tif !m.Get(r, c).Equals(m1.Get(r, c)) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t}\n\n\treturn true\n}", "func (o1 StructTestObject) Diff(o2 StructTestObject) metago.Diff {\n\tchgs := make([]metago.Chg, 0)\n\n\t{\n\t\tva, vb := o1.B, o2.B\n\t\tif !va.Equals(vb) {\n\t\t\tchgs = append(chgs, metago.NewStructChg(&StructTestObjectBSREF, va.Diff(vb)))\n\t\t}\n\t}\n\n\t{\n\t\tva, vb := o1.MB, o2.MB\n\t\tfor key, va1 := range va {\n\t\t\tif vb1, ok := vb[key]; ok {\n\t\t\t\t// \"key\" exists in both \"va\" and \"vb\"\n\t\t\t\tchgs1 := make([]metago.Chg, 0)\n\t\t\t\tif !va1.Equals(vb1) {\n\t\t\t\t\tchgs1 = append(chgs1, metago.NewStructChg(&StructTestObjectMBSREF, va1.Diff(vb1)))\n\t\t\t\t}\n\t\t\t\tif len(chgs1) != 0 {\n\t\t\t\t\tchgs = append(chgs, metago.NewIntMapChg(&StructTestObjectMBSREF, key, metago.ChangeTypeModify, chgs1))\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t// \"key\" exists in \"va\" but not in \"vb\"\n\t\t\t\tchgs1 := make([]metago.Chg, 0)\n\t\t\t\tt := BasicAttrTypesObject{}\n\t\t\t\tchgs1 = append(chgs1, metago.NewStructChg(&StructTestObjectMBSREF, va1.Diff(t)))\n\t\t\t\tif len(chgs1) != 0 {\n\t\t\t\t\tchgs = append(chgs, metago.NewIntMapChg(&StructTestObjectMBSREF, key, metago.ChangeTypeDelete, chgs1))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tfor key, vb1 := range vb {\n\t\t\tif _, ok := va[key]; ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// \"key\" exists in vb but not int va\"\n\t\t\tchgs1 := make([]metago.Chg, 0)\n\t\t\tt := BasicAttrTypesObject{}\n\t\t\tchgs1 = append(chgs1, metago.NewStructChg(&StructTestObjectMBSREF, t.Diff(vb1)))\n\t\t\tif len(chgs1) != 0 {\n\t\t\t\tchgs = append(chgs, metago.NewIntMapChg(&StructTestObjectMBSREF, key, metago.ChangeTypeInsert, chgs1))\n\t\t\t}\n\t\t}\n\t}\n\treturn metago.Diff{Chgs: chgs}\n}", "func (in *HeaderMatch) DeepEqual(other *HeaderMatch) bool {\n\tif other == nil {\n\t\treturn false\n\t}\n\n\tif in.Mismatch != other.Mismatch {\n\t\treturn false\n\t}\n\tif in.Name != other.Name {\n\t\treturn false\n\t}\n\tif (in.Secret == nil) != (other.Secret == nil) {\n\t\treturn false\n\t} else if in.Secret != nil {\n\t\tif !in.Secret.DeepEqual(other.Secret) {\n\t\t\treturn false\n\t\t}\n\t}\n\n\tif in.Value != other.Value {\n\t\treturn false\n\t}\n\n\treturn true\n}", "func FieldsEqual(f1, f2 []*querypb.Field) bool {\n\tif len(f1) != len(f2) {\n\t\treturn false\n\t}\n\tfor i, f := range f1 {\n\t\tif !proto.Equal(f, f2[i]) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func extIDsMatch(want, have map[string]string) bool {\n\tfor k, v := range want {\n\t\tactual, ok := have[k]\n\t\tif !ok {\n\t\t\treturn false\n\t\t}\n\t\tif actual != v {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}", "func equalBubbledMetadata(md1, md2 siadir.Metadata) error {\n\t// Check AggregateHealth\n\tif md1.AggregateHealth != md2.AggregateHealth {\n\t\treturn fmt.Errorf(\"AggregateHealth not equal, %v and %v\", md1.AggregateHealth, md2.AggregateHealth)\n\t}\n\t// Check AggregateNumFiles\n\tif md1.AggregateNumFiles != md2.AggregateNumFiles {\n\t\treturn fmt.Errorf(\"AggregateNumFiles not equal, %v and %v\", md1.AggregateNumFiles, md2.AggregateNumFiles)\n\t}\n\t// Check Size\n\tif md1.AggregateSize != md2.AggregateSize {\n\t\treturn fmt.Errorf(\"aggregate sizes not equal, %v and %v\", md1.AggregateSize, md2.AggregateSize)\n\t}\n\t// Check Health\n\tif md1.Health != md2.Health {\n\t\treturn fmt.Errorf(\"healths not equal, %v and %v\", md1.Health, md2.Health)\n\t}\n\t// Check LastHealthCheckTimes\n\tif md2.LastHealthCheckTime != md1.LastHealthCheckTime {\n\t\treturn fmt.Errorf(\"LastHealthCheckTimes not equal %v and %v\", md2.LastHealthCheckTime, md1.LastHealthCheckTime)\n\t}\n\t// Check MinRedundancy\n\tif md1.MinRedundancy != md2.MinRedundancy {\n\t\treturn fmt.Errorf(\"MinRedundancy not equal, %v and %v\", md1.MinRedundancy, md2.MinRedundancy)\n\t}\n\t// Check Mod Times\n\tif md2.ModTime != md1.ModTime {\n\t\treturn fmt.Errorf(\"ModTimes not equal %v and %v\", md2.ModTime, md1.ModTime)\n\t}\n\t// Check NumFiles\n\tif md1.NumFiles != md2.NumFiles {\n\t\treturn fmt.Errorf(\"NumFiles not equal, %v and %v\", md1.NumFiles, md2.NumFiles)\n\t}\n\t// Check NumStuckChunks\n\tif md1.NumStuckChunks != md2.NumStuckChunks {\n\t\treturn fmt.Errorf(\"NumStuckChunks not equal, %v and %v\", md1.NumStuckChunks, md2.NumStuckChunks)\n\t}\n\t// Check NumSubDirs\n\tif md1.NumSubDirs != md2.NumSubDirs {\n\t\treturn fmt.Errorf(\"NumSubDirs not equal, %v and %v\", md1.NumSubDirs, md2.NumSubDirs)\n\t}\n\t// Check StuckHealth\n\tif md1.StuckHealth != md2.StuckHealth {\n\t\treturn fmt.Errorf(\"stuck healths not equal, %v and %v\", md1.StuckHealth, md2.StuckHealth)\n\t}\n\treturn nil\n}", "func CheckIntersect(original, extras []string) bool {\n\tfor _, i := range original {\n\t\tfor _, x := range extras {\n\t\t\tif i == x {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\n\treturn false\n}", "func testEqField(a, b []zapcore.Field) bool {\n\n\tif a == nil && b == nil {\n\t\treturn true\n\t}\n\n\tif a == nil || b == nil {\n\t\treturn false\n\t}\n\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\n\ttemp := false\n\tfor _, i := range a {\n\t\tfor _, j := range b {\n\t\t\tif i != j {\n\t\t\t\ttemp = false\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\ttemp = true\n\t\t\tbreak\n\t\t}\n\t\tif !temp {\n\t\t\treturn temp\n\t\t}\n\t}\n\n\treturn temp\n}", "func Match(errs1, errs2 error) bool {\n\tif errs1 == nil && errs2 == nil {\n\t\treturn true\n\t}\n\n\tif errs1 != nil {\n\t\terr1, ok := errs1.(*Errs)\n\t\tif ok {\n\t\t\terrs1 = err1.err\n\t\t}\n\t} else {\n\t\terrs1 = errors.New(\"nil\")\n\t}\n\n\tif errs2 != nil {\n\t\terr2, ok := errs2.(*Errs)\n\t\tif ok {\n\t\t\terrs2 = err2.err\n\t\t}\n\t} else {\n\t\terrs2 = errors.New(\"nil\")\n\t}\n\n\tif errs1.Error() != errs2.Error() {\n\t\treturn false\n\t}\n\treturn true\n}", "func protoKnownFieldsEqual(a, b proto.Message) bool {\n\treturn cmp.Equal(a, b, cmp.FilterPath(func(path cmp.Path) bool {\n\t\tif field, ok := path.Last().(cmp.StructField); ok {\n\t\t\treturn strings.HasPrefix(field.Name(), \"XXX_\")\n\t\t}\n\t\treturn false\n\t}, cmp.Ignore()))\n}", "func compare(a, b *Record) bool {\n\tif a == nil || b == nil {\n\t\treturn false\n\t}\n\tif a.Name != b.Name {\n\t\treturn false\n\t}\n\tif a.Type != b.Type {\n\t\treturn false\n\t}\n\tif a.TTL != b.TTL {\n\t\treturn false\n\t}\n\tif len(a.Data) != len(b.Data) {\n\t\treturn false\n\t}\n\tfor i := 0; i < len(a.Data); i++ {\n\t\tif a.Data[i] != b.Data[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func isXLMetaSimilar(m, n xlMetaV1) bool {\n\tif m.Version != n.Version {\n\t\treturn false\n\t}\n\tif m.Format != n.Format {\n\t\treturn false\n\t}\n\tif len(m.Parts) != len(n.Parts) {\n\t\treturn false\n\t}\n\treturn true\n}", "func isXLMetaSimilar(m, n xlMetaV1) bool {\n\tif m.Version != n.Version {\n\t\treturn false\n\t}\n\tif m.Format != n.Format {\n\t\treturn false\n\t}\n\tif len(m.Parts) != len(n.Parts) {\n\t\treturn false\n\t}\n\treturn true\n}", "func assertDescriptionEqual(d1, d2 Description) error {\n\tif d1.Name != d2.Name {\n\t\treturn fmt.Errorf(\"name not equal: [%v] / [%v]\", d1.Name, d2.Name)\n\t}\n\tif d1.Identity != d2.Identity {\n\t\treturn fmt.Errorf(\"identity not equal: [%v] / [%v]\", d1.Identity, d2.Identity)\n\t}\n\tif d1.Website != d2.Website {\n\t\treturn fmt.Errorf(\"website not equal: [%v] / [%v]\", d1.Website, d2.Website)\n\t}\n\tif d1.SecurityContact != d2.SecurityContact {\n\t\treturn fmt.Errorf(\"security contact not equal: [%v] / [%v]\", d1.SecurityContact, d2.SecurityContact)\n\t}\n\tif d1.Details != d2.Details {\n\t\treturn fmt.Errorf(\"details not equal: [%v] / [%v]\", d1.Details, d2.Details)\n\t}\n\treturn nil\n}", "func (a *Mtx) Equals(b *Mtx) bool {\n\tfor i := 0; i < 4; i++ {\n\t\tfor j := 0; j < 4; j++ {\n\t\t\tif a.el[i][j] != b.el[i][j] {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t}\n\treturn true\n}", "func (params *headerParams) Equals(other interface{}) bool {\n\tq, ok := other.(*headerParams)\n\tif !ok {\n\t\treturn false\n\t}\n\n\tif params.Length() == 0 && q.Length() == 0 {\n\t\treturn true\n\t}\n\n\tif params.Length() != q.Length() {\n\t\treturn false\n\t}\n\n\tfor key, pVal := range params.Items() {\n\t\tqVal, ok := q.Get(key)\n\t\tif !ok {\n\t\t\treturn false\n\t\t}\n\t\tif pVal != qVal {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}", "func diffRemediations(old, new []*compv1alpha1.ComplianceRemediation) bool {\n\tif old == nil {\n\t\treturn new == nil\n\t}\n\n\tif len(old) != len(new) {\n\t\treturn false\n\t}\n\n\tfor idx := range old {\n\t\toldRem, newRem := old[idx], new[idx]\n\t\tif oldRem.Spec.Current.Object.GetKind() != newRem.Spec.Current.Object.GetKind() {\n\t\t\treturn false\n\t\t}\n\n\t\t// should we be more picky and just compare what can be set with the remediations? e.g. OSImageURL can't\n\t\t// be set with a remediation..\n\t\tif !cmp.Equal(oldRem.Spec.Current.Object, newRem.Spec.Current.Object) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func CheckDisjoint(src, trg *Item) bool {\n\ttype obj struct {\n\t\tsrc Attribute\n\t\ttrg Attribute\n\t}\n\tfor _, v := range []obj{\n\t\t{src.part, trg.part},\n\t\t{src.vendor, trg.vendor},\n\t\t{src.product, trg.product},\n\t\t{src.version, trg.version},\n\t\t{src.update, trg.update},\n\t\t{src.edition, trg.edition},\n\t\t{src.language, trg.language},\n\t\t{src.sw_edition, trg.sw_edition},\n\t\t{src.target_sw, trg.target_sw},\n\t\t{src.target_hw, trg.target_hw},\n\t\t{src.other, trg.other},\n\t} {\n\t\tswitch v.src.Comparison(v.trg) {\n\t\tcase Disjoint:\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func isSameUnstructured(obj1, obj2 *unstructured.Unstructured) bool {\n\tobj1Copy := obj1.DeepCopy()\n\tobj2Copy := obj2.DeepCopy()\n\n\t// Compare gvk, name, namespace at first\n\tif obj1Copy.GroupVersionKind() != obj2Copy.GroupVersionKind() {\n\t\treturn false\n\t}\n\tif obj1Copy.GetName() != obj2Copy.GetName() {\n\t\treturn false\n\t}\n\tif obj1Copy.GetNamespace() != obj2Copy.GetNamespace() {\n\t\treturn false\n\t}\n\n\t// Compare label and annotations\n\tif !equality.Semantic.DeepEqual(obj1Copy.GetLabels(), obj2Copy.GetLabels()) {\n\t\treturn false\n\t}\n\tif !equality.Semantic.DeepEqual(obj1Copy.GetAnnotations(), obj2Copy.GetAnnotations()) {\n\t\treturn false\n\t}\n\tif !equality.Semantic.DeepEqual(obj1Copy.GetOwnerReferences(), obj2Copy.GetOwnerReferences()) {\n\t\treturn false\n\t}\n\n\t// Compare semantically after removing metadata and status field\n\tdelete(obj1Copy.Object, \"metadata\")\n\tdelete(obj2Copy.Object, \"metadata\")\n\tdelete(obj1Copy.Object, \"status\")\n\tdelete(obj2Copy.Object, \"status\")\n\n\treturn equality.Semantic.DeepEqual(obj1Copy.Object, obj2Copy.Object)\n}", "func (s *OpenAPISchema) Equal(other *OpenAPISchema) bool {\n\tif s.ModelType() != other.ModelType() {\n\t\treturn false\n\t}\n\t// perform deep equality here.\n\tswitch s.ModelType() {\n\tcase \"any\":\n\t\treturn false\n\tcase MapType:\n\t\tif len(s.Properties) != len(other.Properties) {\n\t\t\treturn false\n\t\t}\n\t\tfor prop, nested := range s.Properties {\n\t\t\totherNested, found := other.Properties[prop]\n\t\t\tif !found || !nested.Equal(otherNested) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\tif s.AdditionalProperties != nil && other.AdditionalProperties != nil &&\n\t\t\t!s.AdditionalProperties.Equal(other.AdditionalProperties) {\n\t\t\treturn false\n\t\t}\n\t\tif s.AdditionalProperties != nil && other.AdditionalProperties == nil ||\n\t\t\ts.AdditionalProperties == nil && other.AdditionalProperties != nil {\n\t\t\treturn false\n\t\t}\n\t\treturn true\n\tcase ListType:\n\t\treturn s.Items.Equal(other.Items)\n\tdefault:\n\t\treturn true\n\t}\n}", "func sameAPI(apiA, apiB map[string]interface{}) bool {\n\tif len(apiA) != len(apiB) {\n\t\treturn false\n\t}\n\tfor field, valueA := range apiA {\n\t\tvalueB, inB := apiB[field]\n\t\tif !(inB && reflect.DeepEqual(valueA, valueB) || field == \"etag\" || field == \"revision\") {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func canMerge(a, b *jointRequest) bool {\n\tif !reflect.DeepEqual(a.tileConfig, b.tileConfig) {\n\t\treturn false\n\t}\n\tif !reflect.DeepEqual(a.query, b.query) {\n\t\treturn false\n\t}\n\treturn a.dataset == b.dataset\n}", "func (m *fetchMetadataReqMatcher) matchesV1(x interface{}) bool {\n\treq, ok := x.(*rpc.FetchBlocksMetadataRawRequest)\n\tif !ok {\n\t\treturn false\n\t}\n\n\tif m.shard != req.Shard {\n\t\treturn false\n\t}\n\n\tif m.limit != req.Limit {\n\t\treturn false\n\t}\n\n\tif m.pageToken == nil {\n\t\tif req.PageToken != nil {\n\t\t\treturn false\n\t\t}\n\t} else {\n\t\tif req.PageToken == nil {\n\t\t\treturn false\n\t\t}\n\t\tif *req.PageToken != *m.pageToken {\n\t\t\treturn false\n\t\t}\n\t}\n\n\tif m.includeSizes == nil {\n\t\tif req.IncludeSizes != nil {\n\t\t\treturn false\n\t\t}\n\t} else {\n\t\tif req.IncludeSizes == nil {\n\t\t\treturn false\n\t\t}\n\t\tif *req.IncludeSizes != *m.includeSizes {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}", "func Compatible(m1, m2 *Matrix) bool {\n\treturn false\n}", "func checkFields(data types.TableContent, fields map[string]string) error {\n\tvar seen = make(map[string]struct{}, len(fields))\n\n\tfor i := range data {\n\t\tfor k := range data[i] {\n\t\t\tseen[k] = struct{}{}\n\t\t}\n\t}\n\n\tfor _, f := range fields {\n\t\tif _, ok := seen[f]; !ok {\n\t\t\treturn fmt.Errorf(\"%w: %q missing\", ErrMissingField, f)\n\t\t}\n\t}\n\n\treturn nil\n}", "func mergeFields(segments []*SegmentBase) (bool, []string) {\n\tfieldsSame := true\n\n\tvar segment0Fields []string\n\tif len(segments) > 0 {\n\t\tsegment0Fields = segments[0].Fields()\n\t}\n\n\tfieldsExist := map[string]struct{}{}\n\tfor _, segment := range segments {\n\t\tfields := segment.Fields()\n\t\tfor fieldi, field := range fields {\n\t\t\tfieldsExist[field] = struct{}{}\n\t\t\tif len(segment0Fields) != len(fields) || segment0Fields[fieldi] != field {\n\t\t\t\tfieldsSame = false\n\t\t\t}\n\t\t}\n\t}\n\n\trv := make([]string, 0, len(fieldsExist))\n\t// ensure _id stays first\n\trv = append(rv, \"_id\")\n\tfor k := range fieldsExist {\n\t\tif k != \"_id\" {\n\t\t\trv = append(rv, k)\n\t\t}\n\t}\n\n\tsort.Strings(rv[1:]) // leave _id as first\n\n\treturn fieldsSame, rv\n}", "func compareManifests(r1, r2 *runtime.RawExtension) bool {\n\tu1, err := convertRawExtensiontoUnstructured(r1)\n\tif err != nil {\n\t\treturn false\n\t}\n\tu2, err := convertRawExtensiontoUnstructured(r2)\n\tif err != nil {\n\t\treturn false\n\t}\n\tif u1 == nil || u2 == nil {\n\t\treturn u2 == nil && u1 == nil\n\t}\n\tif u1.GetName() != u2.GetName() ||\n\t\tu1.GetNamespace() != u2.GetNamespace() ||\n\t\tu1.GetKind() != u2.GetKind() ||\n\t\tu1.GetAPIVersion() != u2.GetAPIVersion() {\n\t\treturn false\n\t}\n\thasDiff := false\n\tfor _, r := range rootAttributes {\n\t\tif newValue, ok := u2.Object[r]; ok {\n\t\t\tif !reflect.DeepEqual(newValue, u1.Object[r]) {\n\t\t\t\thasDiff = true\n\t\t\t}\n\t\t} else {\n\t\t\tif _, ok := u1.Object[r]; ok {\n\t\t\t\thasDiff = true\n\t\t\t}\n\t\t}\n\t}\n\treturn !hasDiff\n}", "func notMatchField(id int, fields []int) bool {\r\n\tfor _, i := range fields {\r\n\t\tif id == i {\r\n\t\t\treturn false\r\n\t\t}\r\n\t}\r\n\treturn true\r\n}", "func compareAuthSet(expected *model.AuthSet, actual *model.AuthSet, t *testing.T) {\n\tassert.Equal(t, expected.IdData, actual.IdData)\n\tassert.Equal(t, expected.PubKey, actual.PubKey)\n\tassert.Equal(t, expected.DeviceId, actual.DeviceId)\n\tassert.Equal(t, expected.IdDataStruct, actual.IdDataStruct)\n\tassert.Equal(t, expected.IdDataSha256, actual.IdDataSha256)\n\tassert.Equal(t, expected.Status, actual.Status)\n\tcompareTime(uto.Time(expected.Timestamp), uto.Time(actual.Timestamp), t)\n}", "func StrictMatches(n1, n2 xml.Name) bool {\n\treturn n1.Local == n2.Local && n1.Space == n2.Space\n}", "func (vb *Builder) Match(fieldName string, str1, str2 interface{}) {\n\tif str1 != str2 {\n\t\tvb.Append(fieldName, doesNotMatch)\n\t}\n}", "func (m1 Map) Equal(m2 Map) bool {\n\tif len(m1) != len(m2) {\n\t\treturn false\n\t}\n\n\tfor k, v := range m1 {\n\t\tif w, ok := m2[k]; !ok || v != w {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}", "func (m fieldMatcher) Matches(x interface{}) bool {\n\tval := reflect.ValueOf(x)\n\tfor i := 0; i < val.NumField(); i++ {\n\t\tfield := val.Type().Field(i)\n\t\tif field.Name == m.Key {\n\t\t\tif reflect.DeepEqual(getValue(val.Field(i)), m.Value) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}", "func MsgCidsEqual(m1, m2 *Message) bool {\n\tm1Cid, err := m1.Cid()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tm2Cid, err := m2.Cid()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn m1Cid.Equals(m2Cid)\n}", "func FullMatch(a cloudflare.DNSRecord, b cloudflare.DNSRecord) bool {\n\tif a.Type != b.Type {\n\t\treturn false\n\t}\n\n\tif a.Name != b.Name {\n\t\treturn false\n\t}\n\n\tif a.Proxied != b.Proxied {\n\t\treturn false\n\t}\n\n\tif a.TTL != b.TTL {\n\t\treturn false\n\t}\n\n\tswitch a.Type {\n\tcase \"A\", \"AAAA\", \"CNAME\", \"TXT\":\n\t\tif a.Content == b.Content {\n\t\t\treturn true\n\t\t}\n\n\tcase \"MX\":\n\t\tif a.Content == b.Content && a.Priority == b.Priority {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func (m *TwoValidOneofs) Validate() error {\n\tif m == nil {\n\t\treturn nil\n\t}\n\n\tswitch m.Message1.(type) {\n\n\tcase *TwoValidOneofs_M11:\n\t\t// no validation rules for M11\n\n\tcase *TwoValidOneofs_M12:\n\t\t// no validation rules for M12\n\n\t}\n\n\tswitch m.Message2.(type) {\n\n\tcase *TwoValidOneofs_M21:\n\t\t// no validation rules for M21\n\n\tcase *TwoValidOneofs_M22:\n\t\t// no validation rules for M22\n\n\t}\n\n\treturn nil\n}", "func CompareTX(tx1 *Transaction, tx2 *Transaction) bool {\n\tif !bytes.Equal(tx1.ID, tx2.ID) {\n\t\treturn false\n\t}\n\n\tif tx1.Type != tx2.Type {\n\t\treturn false\n\t}\n\n\tif tx1.Reason != tx2.Reason {\n\t\treturn false\n\t}\n\n\tif tx1.Value != tx2.Value {\n\t\treturn false\n\t}\n\n\ttx1RelevantTransactionIds := tx1.RelevantTransactionIds\n\ttx2RelevantTransactionIds := tx2.RelevantTransactionIds\n\n\tif len(tx1RelevantTransactionIds) != len(tx2RelevantTransactionIds) {\n\t\treturn false\n\t}\n\n\tfor i, tx1reltxid := range tx1RelevantTransactionIds {\n\t\ttx2reltxid := tx2RelevantTransactionIds[i]\n\n\t\tif !bytes.Equal(tx1reltxid, tx2reltxid) {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}", "func TestEquals(t *testing.T) {\n\tt.Parallel()\n\tfor ti, tt := range []struct {\n\t\tm1, m2 MatrixExp\n\t\teq bool\n\t}{\n\t\t{\n\t\t\tm1: GeneralZeros(1, 1),\n\t\t\tm2: GeneralZeros(1, 1),\n\t\t\teq: true,\n\t\t},\n\t\t{\n\t\t\tm1: GeneralZeros(1, 1),\n\t\t\tm2: GeneralZeros(1, 10),\n\t\t\teq: false,\n\t\t},\n\t\t{\n\t\t\tm1: GeneralZeros(10, 1),\n\t\t\tm2: GeneralZeros(1, 1),\n\t\t\teq: false,\n\t\t},\n\t\t{\n\t\t\tm1: GeneralZeros(1, 1),\n\t\t\tm2: GeneralOnes(1, 1),\n\t\t\teq: false,\n\t\t},\n\t} {\n\t\tif v := Equals(tt.m1, tt.m2); v != tt.eq {\n\t\t\tt.Errorf(\"%d: Equals(%v,%v) equals %v, want %v\", ti, tt.m1, tt.m2, v, tt.eq)\n\t\t}\n\t}\n}", "func RequireFilterEqual(t *testing.T, v1, v2 interface{}, ignoreTypes []interface{}) {\n\tt.Helper()\n\n\tdiff := cmp.Diff(v1, v2, cmpopts.IgnoreTypes(ignoreTypes...),\n\t\tcmp.Exporter(func(reflect.Type) bool { return true }))\n\tif diff != \"\" {\n\t\tt.Errorf(\"Not equal:\\n%s\", diff)\n\t\tt.FailNow()\n\t}\n}", "func (h *HeaderMatch) Equal(o *HeaderMatch) bool {\n\tif h.Mismatch != o.Mismatch ||\n\t\th.Name != o.Name ||\n\t\th.Value != o.Value ||\n\t\t!h.Secret.Equal(o.Secret) {\n\t\treturn false\n\t}\n\treturn true\n}", "func ErrIfMismatchedColumns(t1, t2 sql.Type) error {\n\tif NumColumns(t1) != NumColumns(t2) {\n\t\treturn sql.ErrInvalidOperandColumns.New(NumColumns(t1), NumColumns(t2))\n\t}\n\tv1, ok1 := t1.(TupleType)\n\tv2, ok2 := t2.(TupleType)\n\tif ok1 && ok2 {\n\t\tfor i := range v1 {\n\t\t\tif err := ErrIfMismatchedColumns(v1[i], v2[i]); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func HeaderMatcherLess(m1, m2 *envoy_config_route.HeaderMatcher) bool {\n\tswitch {\n\tcase m1.Name < m2.Name:\n\t\treturn true\n\tcase m1.Name > m2.Name:\n\t\treturn false\n\t}\n\n\t// Compare the header_match_specifier oneof field, by comparing each\n\t// possible field in the oneof individually:\n\t// - exactMatch\n\t// - regexMatch\n\t// - rangeMatch\n\t// - presentMatch\n\t// - prefixMatch\n\t// - suffixMatch\n\t// Use the getters to access the fields and return zero values when they\n\t// are not set.\n\n\ts1 := m1.GetExactMatch()\n\ts2 := m2.GetExactMatch()\n\tswitch {\n\tcase s1 < s2:\n\t\treturn true\n\tcase s1 > s2:\n\t\treturn false\n\t}\n\n\tsrm1 := m1.GetSafeRegexMatch()\n\tsrm2 := m2.GetSafeRegexMatch()\n\tswitch {\n\tcase srm1 == nil && srm2 != nil:\n\t\treturn true\n\tcase srm1 != nil && srm2 == nil:\n\t\treturn false\n\tcase srm1 != nil && srm2 != nil:\n\t\tswitch {\n\t\tcase srm1.Regex < srm2.Regex:\n\t\t\treturn true\n\t\tcase srm1.Regex > srm2.Regex:\n\t\t\treturn false\n\t\t}\n\t}\n\n\trm1 := m1.GetRangeMatch()\n\trm2 := m2.GetRangeMatch()\n\tswitch {\n\tcase rm1 == nil && rm2 != nil:\n\t\treturn true\n\tcase rm1 != nil && rm2 == nil:\n\t\treturn false\n\tcase rm1 != nil && rm2 != nil:\n\t\tswitch {\n\t\tcase rm1.Start < rm2.Start:\n\t\t\treturn true\n\t\tcase rm1.Start > rm2.Start:\n\t\t\treturn false\n\t\t}\n\t\tswitch {\n\t\tcase rm1.End < rm2.End:\n\t\t\treturn true\n\t\tcase rm1.End > rm2.End:\n\t\t\treturn false\n\t\t}\n\t}\n\n\tswitch {\n\tcase !m1.GetPresentMatch() && m2.GetPresentMatch():\n\t\treturn true\n\tcase m1.GetPresentMatch() && !m2.GetPresentMatch():\n\t\treturn false\n\t}\n\n\ts1 = m1.GetPrefixMatch()\n\ts2 = m2.GetPrefixMatch()\n\tswitch {\n\tcase s1 < s2:\n\t\treturn true\n\tcase s1 > s2:\n\t\treturn false\n\t}\n\n\ts1 = m1.GetSuffixMatch()\n\ts2 = m2.GetSuffixMatch()\n\tswitch {\n\tcase s1 < s2:\n\t\treturn true\n\tcase s1 > s2:\n\t\treturn false\n\t}\n\n\tswitch {\n\tcase !m1.InvertMatch && m2.InvertMatch:\n\t\treturn true\n\tcase m1.InvertMatch && !m2.InvertMatch:\n\t\treturn false\n\t}\n\n\t// Elements are equal.\n\treturn false\n}", "func (t *ContentType) Matches(other *ContentType) bool {\n\tif other == nil {\n\t\treturn false\n\t}\n\n\tif t.MediaType != other.MediaType {\n\t\treturn false\n\t}\n\n\tfor param, expect := range t.Parameters {\n\t\t// disregard quality\n\t\tif param == \"q\" {\n\t\t\tcontinue\n\t\t}\n\t\tif val, ok := other.Parameters[param]; !ok || val != expect {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n\n}", "func MapKeysEq(map1, map2 interface{}) bool {\n\trv1 := reflect.ValueOf(map1)\n\trv2 := reflect.ValueOf(map2)\n\tif rv1.Len() != rv2.Len() {\n\t\treturn false\n\t}\n\tr1keys := rv1.MapKeys()\n\tfor _, val := range r1keys {\n\t\tif !rv2.MapIndex(val).IsValid() {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func AssertMembersMatch(t T, actual []toolchainv1alpha1.Member, expected ...toolchainv1alpha1.Member) {\n\trequire.Equal(t, len(expected), len(actual))\n\tfor _, c := range expected {\n\t\tAssertContainsMember(t, actual, c)\n\t}\n}", "func (a BlogAuthor) shouldMatchAuthorBody() {\n\thasNonNilBodyFields := a.Avatar != \"\" || a.Bio != \"\" || a.Facebook != \"\" || a.Google_plus != \"\" || a.Linkedin != \"\" || a.Twitter != \"\" || a.Website != \"\"\n\tblankAuthorBody := authorBody{}\n\tif a.Body == blankAuthorBody && hasNonNilBodyFields {\n\t\tpanic(\"Some authors have empty bodies, but non-empty counterpart fields.\")\n\t}\n\tif a.Avatar != a.Body.Avatar {\n\t\tpanic(\"Not all authors avatars match their body avatars.\")\n\t}\n\tif a.Bio != a.Body.Bio {\n\t\tpanic(\"Not all author bios match their body bios.\")\n\t}\n\tif a.Facebook != a.Body.Facebook {\n\t\tpanic(\"Not all author facebooks math their body facebooks.\")\n\t}\n\tif a.Google_plus != a.Body.Google_plus {\n\t\tpanic(\"Not all author google_pluses math their body google_plus.\")\n\t}\n\tif a.Linkedin != a.Body.Linkedin {\n\t\tpanic(\"Not all author linkedin match their body linkedin.\")\n\t}\n\tif a.Twitter != a.Body.Twitter {\n\t\tpanic(\"Not all author twitter match their body twitter.\")\n\t}\n\tif a.Website != a.Body.Website {\n\t\tpanic(\"Not all author websites match their body websites.\")\n\t}\n}", "func parseSpecsIdentical(x, y parseSpec) bool {\n\tif len(x) != len(y) {\n\t\treturn false\n\t}\n\n\tfor i, v := range x {\n\t\tif v != y[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n\n}", "func AssertFilterEqual(t *testing.T, v1, v2 interface{}, ignoreTypes []interface{}) {\n\tt.Helper()\n\n\tdiff := cmp.Diff(v1, v2, cmpopts.IgnoreTypes(ignoreTypes...),\n\t\tcmp.Exporter(func(reflect.Type) bool { return true }))\n\tif diff != \"\" {\n\t\tt.Errorf(\"Not equal:\\n%s\", diff)\n\t}\n}", "func (m *TwoOneofs) Validate() error {\n\tif m == nil {\n\t\treturn nil\n\t}\n\n\tswitch m.Message1.(type) {\n\n\tcase *TwoOneofs_M11:\n\t\t// no validation rules for M11\n\n\tcase *TwoOneofs_M12:\n\t\t// no validation rules for M12\n\n\t}\n\n\tswitch m.Message2.(type) {\n\n\tcase *TwoOneofs_M21:\n\t\t// no validation rules for M21\n\n\tcase *TwoOneofs_M22:\n\t\t// no validation rules for M22\n\n\t}\n\n\treturn nil\n}", "func (a BlogAuthor) CheckThatAuthorMatchesDuplicates(as []BlogAuthor) {\n\tfor _, da := range as {\n\t\tif a.Id == da.Id && a != da {\n\t\t\tpanic(\"Author does not match another field-by-field with same id\")\n\t\t}\n\t}\n}", "func (a joinedTable) equal(b joinedTable) bool {\n\treturn a.secondaryTable == b.secondaryTable && a.primaryColumn == b.primaryColumn && a.secondaryColumn == b.secondaryColumn\n}", "func ColumnsEqual(col1, col2 *array.Column) (bool, string) {\n\n\tif col1.DataType().ID() != col2.DataType().ID() {\n\t\treturn false, \"Inconsistent types\"\n\t}\n\n\tchunks1 := col1.Data().Chunks()\n\tchunks2 := col2.Data().Chunks()\n\n\tif len(chunks1) != len(chunks2) {\n\t\treturn false, fmt.Sprintf(\"Unequal chunk counts, %d != %d\", len(chunks1), len(chunks2))\n\t}\n\n\tfor k := range chunks1 {\n\n\t\tchunk1 := chunks1[k]\n\t\tchunk2 := chunks2[k]\n\n\t\tswitch col1.DataType() {\n\t\tcase arrow.PrimitiveTypes.Uint8:\n\t\t\ty1 := array.NewUint8Data(chunk1.Data())\n\t\t\ty2 := array.NewUint8Data(chunk2.Data())\n\t\t\tif !assert.ObjectsAreEqualValues(y1.Uint8Values(), y2.Uint8Values()) {\n\t\t\t\treturn false, fmt.Sprintf(\"Unequal uint8 values in chunk %d.\\n\", k)\n\t\t\t}\n\t\t\tif !assert.ObjectsAreEqualValues(y1.NullBitmapBytes(), y2.NullBitmapBytes()) {\n\t\t\t\treturn false, fmt.Sprintf(\"Unequal valid mask in chunk %d.\\n\", k)\n\t\t\t}\n\t\tcase arrow.PrimitiveTypes.Uint16:\n\t\t\ty1 := array.NewUint16Data(chunk1.Data())\n\t\t\ty2 := array.NewUint16Data(chunk2.Data())\n\t\t\tif !assert.ObjectsAreEqualValues(y1.Uint16Values(), y2.Uint16Values()) {\n\t\t\t\treturn false, fmt.Sprintf(\"Unequal uint16 values in chunk %d.\\n\", k)\n\t\t\t}\n\t\t\tif !assert.ObjectsAreEqualValues(y1.NullBitmapBytes(), y2.NullBitmapBytes()) {\n\t\t\t\treturn false, fmt.Sprintf(\"Unequal valid mask in chunk %d.\\n\", k)\n\t\t\t}\n\t\tcase arrow.PrimitiveTypes.Uint32:\n\t\t\ty1 := array.NewUint32Data(chunk1.Data())\n\t\t\ty2 := array.NewUint32Data(chunk2.Data())\n\t\t\tif !assert.ObjectsAreEqualValues(y1.Uint32Values(), y2.Uint32Values()) {\n\t\t\t\treturn false, fmt.Sprintf(\"Unequal uint32 values in chunk %d.\\n\", k)\n\t\t\t}\n\t\t\tif !assert.ObjectsAreEqualValues(y1.NullBitmapBytes(), y2.NullBitmapBytes()) {\n\t\t\t\treturn false, fmt.Sprintf(\"Unequal valid mask in chunk %d.\\n\", k)\n\t\t\t}\n\t\tcase arrow.PrimitiveTypes.Uint64:\n\t\t\ty1 := array.NewUint64Data(chunk1.Data())\n\t\t\ty2 := array.NewUint64Data(chunk2.Data())\n\t\t\tif !assert.ObjectsAreEqualValues(y1.Uint64Values(), y2.Uint64Values()) {\n\t\t\t\treturn false, fmt.Sprintf(\"Unequal uint64 values in chunk %d.\\n\", k)\n\t\t\t}\n\t\t\tif !assert.ObjectsAreEqualValues(y1.NullBitmapBytes(), y2.NullBitmapBytes()) {\n\t\t\t\treturn false, fmt.Sprintf(\"Unequal valid mask in chunk %d.\\n\", k)\n\t\t\t}\n\t\tcase arrow.PrimitiveTypes.Int8:\n\t\t\ty1 := array.NewInt8Data(chunk1.Data())\n\t\t\ty2 := array.NewInt8Data(chunk2.Data())\n\t\t\tif !assert.ObjectsAreEqualValues(y1.Int8Values(), y2.Int8Values()) {\n\t\t\t\treturn false, fmt.Sprintf(\"Unequal int8 values in chunk %d.\\n\", k)\n\t\t\t}\n\t\t\tif !assert.ObjectsAreEqualValues(y1.NullBitmapBytes(), y2.NullBitmapBytes()) {\n\t\t\t\treturn false, fmt.Sprintf(\"Unequal valid mask in chunk %d.\\n\", k)\n\t\t\t}\n\t\tcase arrow.PrimitiveTypes.Int16:\n\t\t\ty1 := array.NewInt16Data(chunk1.Data())\n\t\t\ty2 := array.NewInt16Data(chunk2.Data())\n\t\t\tif !assert.ObjectsAreEqualValues(y1.Int16Values(), y2.Int16Values()) {\n\t\t\t\treturn false, fmt.Sprintf(\"Unequal int16 values in chunk %d.\\n\", k)\n\t\t\t}\n\t\t\tif !assert.ObjectsAreEqualValues(y1.NullBitmapBytes(), y2.NullBitmapBytes()) {\n\t\t\t\treturn false, fmt.Sprintf(\"Unequal valid mask in chunk %d.\\n\", k)\n\t\t\t}\n\t\tcase arrow.PrimitiveTypes.Int32:\n\t\t\ty1 := array.NewInt32Data(chunk1.Data())\n\t\t\ty2 := array.NewInt32Data(chunk2.Data())\n\t\t\tif !assert.ObjectsAreEqualValues(y1.Int32Values(), y2.Int32Values()) {\n\t\t\t\treturn false, fmt.Sprintf(\"Unequal int32 values in chunk %d.\\n\", k)\n\t\t\t}\n\t\t\tif !assert.ObjectsAreEqualValues(y1.NullBitmapBytes(), y2.NullBitmapBytes()) {\n\t\t\t\treturn false, fmt.Sprintf(\"Unequal valid mask in chunk %d.\\n\", k)\n\t\t\t}\n\t\tcase arrow.PrimitiveTypes.Int64:\n\t\t\ty1 := array.NewInt64Data(chunk1.Data())\n\t\t\ty2 := array.NewInt64Data(chunk2.Data())\n\t\t\tif !assert.ObjectsAreEqualValues(y1.Int64Values(), y2.Int64Values()) {\n\t\t\t\treturn false, fmt.Sprintf(\"Unequal int64 values in chunk %d.\\n\", k)\n\t\t\t}\n\t\t\tif !assert.ObjectsAreEqualValues(y1.NullBitmapBytes(), y2.NullBitmapBytes()) {\n\t\t\t\treturn false, fmt.Sprintf(\"Unequal valid mask in chunk %d.\\n\", k)\n\t\t\t}\n\t\tcase arrow.PrimitiveTypes.Float32:\n\t\t\ty1 := array.NewFloat32Data(chunk1.Data())\n\t\t\ty2 := array.NewFloat32Data(chunk2.Data())\n\t\t\tif !assert.ObjectsAreEqualValues(y1.Float32Values(), y2.Float32Values()) {\n\t\t\t\treturn false, fmt.Sprintf(\"Unequal float32 values in chunk %d.\\n\", k)\n\t\t\t}\n\t\t\tif !assert.ObjectsAreEqualValues(y1.NullBitmapBytes(), y2.NullBitmapBytes()) {\n\t\t\t\treturn false, fmt.Sprintf(\"Unequal valid mask in chunk %d.\\n\", k)\n\t\t\t}\n\t\tcase arrow.PrimitiveTypes.Float64:\n\t\t\ty1 := array.NewFloat64Data(chunk1.Data())\n\t\t\ty2 := array.NewFloat64Data(chunk2.Data())\n\t\t\tif !assert.ObjectsAreEqualValues(y1.Float64Values(), y2.Float64Values()) {\n\t\t\t\treturn false, fmt.Sprintf(\"Unequal float64 values in chunk %d.\\n\", k)\n\t\t\t}\n\t\t\tif !assert.ObjectsAreEqualValues(y1.NullBitmapBytes(), y2.NullBitmapBytes()) {\n\t\t\t\treturn false, fmt.Sprintf(\"Unequal valid mask in chunk %d.\\n\", k)\n\t\t\t}\n\t\tcase arrow.BinaryTypes.String:\n\t\t\ty1 := array.NewStringData(chunk1.Data())\n\t\t\ty2 := array.NewStringData(chunk2.Data())\n\t\t\tif y1.Len() != y2.Len() {\n\t\t\t\treturn false, fmt.Sprintf(\"Unequal lengths of string values in chunk %d\\n\", k)\n\t\t\t}\n\t\t\tif !assert.ObjectsAreEqualValues(y1.NullBitmapBytes(), y2.NullBitmapBytes()) {\n\t\t\t\treturn false, fmt.Sprintf(\"Unequal valid mask in chunk %d.\\n\", k)\n\t\t\t}\n\t\t\tfor i := 0; i < y1.Len(); i++ {\n\t\t\t\tif y1.Value(i) != y2.Value(i) {\n\t\t\t\t\treturn false, fmt.Sprintf(\"Unequal string values in chunk %d\\n\", k)\n\t\t\t\t}\n\t\t\t}\n\t\tdefault:\n\t\t\tpanic(\"unknown type\")\n\t\t}\n\t}\n\n\treturn true, \"\"\n}", "func missingRequiredFields(m meminfomap, fields []string) bool {\n\tfor _, f := range fields {\n\t\tif _, ok := m[f]; !ok {\n\t\t\tlog.Printf(\"Missing field '%v'\", f)\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (m *Match) Equals(other *Match) bool {\n\tif m == nil && other == nil {\n\t\treturn true\n\t} else if m == nil {\n\t\treturn false\n\t} else if other == nil {\n\t\treturn false\n\t}\n\treturn m.PC == other.PC &&\n\t\tm.StartLine == other.StartLine &&\n\t\tm.StartColumn == other.StartColumn &&\n\t\tm.EndLine == other.EndLine &&\n\t\tm.EndColumn == other.EndColumn &&\n\t\tbytes.Equal(m.Bytes, other.Bytes)\n}", "func (d Descriptor) Same(d2 Descriptor) bool {\n\tif d.Digest != d2.Digest || d.Size != d2.Size {\n\t\treturn false\n\t}\n\t// loosen the check on media type since this can be converted from a build\n\tif d.MediaType != d2.MediaType {\n\t\tif _, ok := mtToOCI[d.MediaType]; !ok {\n\t\t\treturn false\n\t\t} else if mtToOCI[d.MediaType] != mtToOCI[d2.MediaType] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (p Perms) Include(other Perms) bool {\n\treturn p&other == other\n}", "func (pid *ParticipantID) Matches(other *ParticipantID) bool {\n\tif pid.GaiaID != \"\" && pid.GaiaID == other.GaiaID {\n\t\treturn true\n\t}\n\tif pid.ChatID != \"\" && pid.ChatID == other.ChatID {\n\t\treturn true\n\t}\n\treturn false\n}", "func headerMatch(tmplHeaders, reqHeaders map[string][]string) bool {\n\n\tfor headerName, headerVal := range tmplHeaders {\n\t\t// TODO: case insensitive lookup\n\t\t// TODO: is order of values in slice really important?\n\n\t\treqHeaderVal, ok := reqHeaders[headerName]\n\t\tif ok && reflect.DeepEqual(headerVal, reqHeaderVal) {\n\t\t\tcontinue\n\t\t} else {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (ft *FieldType) PartialEqual(other *FieldType, unsafe bool) bool {\n\tif !unsafe || ft.EvalType() != ETString || other.EvalType() != ETString {\n\t\treturn ft.Equal(other)\n\t}\n\n\tpartialEqual := ft.charset == other.charset && ft.collate == other.collate && mysql.HasUnsignedFlag(ft.flag) == mysql.HasUnsignedFlag(other.flag)\n\tif !partialEqual || len(ft.elems) != len(other.elems) {\n\t\treturn false\n\t}\n\tfor i := range ft.elems {\n\t\tif ft.elems[i] != other.elems[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func FieldsEqual(a Field, b Field) FormValidator {\n\treturn func(form *Form) {\n\t\tif a.GetValue() != b.GetValue() {\n\t\t\terr := \"The value of this field is different.\"\n\t\t\tb.AddError(err)\n\t\t}\n\t}\n}", "func (in *MismatchedField) DeepCopy() *MismatchedField {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(MismatchedField)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (t *Table) Same(t2 *Table) bool {\n\treturn false\n}", "func (m *matcher) Matches(x interface{}) bool {\n\td1 := m.StoredData\n\td2, ok := x.(*repo.StoredData)\n\tif !ok {\n\t\treturn false\n\t}\n\n\treturn d1.Data == d2.Data && d1.ID == d2.ID\n}", "func (a *List) M__eq__(other Object) (Object, error) {\n\tb, ok := other.(*List)\n\tif !ok {\n\t\treturn NotImplemented, nil\n\t}\n\tif len(a.Items) != len(b.Items) {\n\t\treturn False, nil\n\t}\n\tfor i := range a.Items {\n\t\teq, err := Eq(a.Items[i], b.Items[i])\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif eq == False {\n\t\t\treturn False, nil\n\t\t}\n\t}\n\treturn True, nil\n}", "func AlmostEqual(m1, m2 *Matrix) bool {\n\treturn false\n}", "func (fm *FinalModelStructBytes) VerifyFields() int {\n fbeCurrentOffset := 0\n fbeFieldSize := 0\n\n\n fm.F1.SetFBEOffset(fbeCurrentOffset)\n if fbeFieldSize = fm.F1.Verify(); fbeFieldSize == fbe.MaxInt {\n return fbe.MaxInt\n }\n fbeCurrentOffset += fbeFieldSize\n\n fm.F2.SetFBEOffset(fbeCurrentOffset)\n if fbeFieldSize = fm.F2.Verify(); fbeFieldSize == fbe.MaxInt {\n return fbe.MaxInt\n }\n fbeCurrentOffset += fbeFieldSize\n\n fm.F3.SetFBEOffset(fbeCurrentOffset)\n if fbeFieldSize = fm.F3.Verify(); fbeFieldSize == fbe.MaxInt {\n return fbe.MaxInt\n }\n fbeCurrentOffset += fbeFieldSize\n\n return fbeCurrentOffset\n}", "func (jo *Object) Equals(other *Object) bool {\n\tif jo == nil || other == nil || len(jo.Properties) != len(other.Properties) {\n\t\treturn false\n\t}\n\n\tvalues := make(map[string][]Value)\n\n\tfor k, l := range jo.fields {\n\t\tr, ok := other.fields[k]\n\t\tif ok {\n\t\t\tvalues[k] = []Value{\n\t\t\t\tjo.Properties[l].Value,\n\t\t\t\tother.Properties[r].Value,\n\t\t\t}\n\t\t} else {\n\t\t\treturn false\n\t\t}\n\t}\n\n\tfor k := range other.fields {\n\t\tif _, ok := jo.fields[k]; !ok {\n\t\t\treturn false\n\t\t}\n\t}\n\n\tfor _, v := range values {\n\t\tl := v[0]\n\t\tr := v[1]\n\t\tif !compareValues(l, r) {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}", "func checkTargetMapContainsSourceMap(source, target map[string]interface{}) bool {\n\tfor key, sourceValue := range source {\n\t\ttargetValue, exist := target[key]\n\t\tif !exist && !reflect.DeepEqual(sourceValue, targetValue) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func checkInclusion1_1(s2 string, s1 string) bool {\n\tif len(s1) < len(s2) {\n\t\treturn false\n\t}\n\n\tl, r, matches, count1, count2 := 0, 0, 0, make(map[byte]int), make(map[byte]int)\n\n\tfor i := 0; i < len(s2); i++ {\n\t\tcount2[s2[i]]++\n\t}\n\n\tfor r < len(s1) {\n\t\tif _, find := count2[s1[r]]; !find {\n\t\t\tr++\n\t\t\tl, count1, matches = r, make(map[byte]int), 0\n\t\t} else {\n\t\t\tcount1[s1[r]]++\n\t\t\tif count1[s1[r]] == count2[s1[r]] {\n\t\t\t\tmatches++\n\t\t\t}\n\n\t\t\tr++\n\t\t\tif r-l == len(s2) {\n\t\t\t\tif len(count2) == matches {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\n\t\t\t\tif count1[s1[l]] == count2[s1[l]] {\n\t\t\t\t\tmatches--\n\t\t\t\t}\n\n\t\t\t\tcount1[s1[l]]--\n\t\t\t\tl++\n\t\t\t}\n\n\t\t}\n\t}\n\n\treturn false\n}", "func checkEqual(t *testing.T, a interface{}, b interface{}, messagePrefix string) {\n\tif a == b {\n\t\treturn\n\t}\n\tmessage := fmt.Sprintf(\"%v != %v\", a, b)\n\tif len(messagePrefix) != 0 {\n\t\tmessage = messagePrefix + \": \" + message\n\t}\n\tt.Error(message)\n}", "func colMatch(a *ast.ColumnName, b *ast.ColumnName) bool {\n\tif a.Schema.L == \"\" || a.Schema.L == b.Schema.L {\n\t\tif a.Table.L == \"\" || a.Table.L == b.Table.L {\n\t\t\treturn a.Name.L == b.Name.L\n\t\t}\n\t}\n\treturn false\n}", "func (m Modifiers) Contain(m2 Modifiers) bool {\n\treturn m&m2 == m2\n}", "func compareAndFindChangesOnImmutableFields(obj map[string]interface{}, oldObj map[string]interface{}, schemaMap map[string]*schema.Schema, prefix string, resourceConfig *corekccv1alpha1.ResourceConfig, ignoredFields map[string]bool, fields *list.List) {\n\tfor k, s := range schemaMap {\n\t\tqualifiedName := getQualifiedFieldName(prefix, k)\n\t\tif ignoredFields[qualifiedName] {\n\t\t\tcontinue\n\t\t}\n\n\t\tif ok, refConfig := krmtotf.IsReferenceField(qualifiedName, resourceConfig); ok {\n\t\t\tif !s.ForceNew {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tmodified, refKey := isReferenceValRawModified(obj, oldObj, refConfig)\n\t\t\tif modified {\n\t\t\t\trefKey = getQualifiedFieldName(prefix, refKey)\n\t\t\t\tfields.PushBack(refKey)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tcamelCaseKey := text.SnakeCaseToLowerCamelCase(k)\n\t\tv1 := obj[camelCaseKey]\n\t\tv2 := oldObj[camelCaseKey]\n\t\tif v1 == nil && v2 == nil {\n\t\t\tcontinue\n\t\t}\n\t\tif (v1 == nil || v2 == nil) && s.ForceNew {\n\t\t\tfields.PushBack(qualifiedName)\n\t\t\tcontinue\n\t\t}\n\n\t\tswitch s.Type {\n\t\t// TODO: terraform schema doc says that TypeMap only support Elem to be a *Schema with a Type that is one of the primitives\n\t\t// Is there any edge cases to handle?\n\t\tcase schema.TypeBool, schema.TypeFloat, schema.TypeString, schema.TypeInt, schema.TypeMap:\n\t\t\tif s.ForceNew && !reflect.DeepEqual(v1, v2) {\n\t\t\t\tfields.PushBack(qualifiedName)\n\t\t\t}\n\t\tcase schema.TypeList, schema.TypeSet:\n\t\t\tswitch s.Elem.(type) {\n\t\t\tcase *schema.Schema:\n\t\t\t\t// it's a list of primitives\n\t\t\t\tif s.ForceNew && !reflect.DeepEqual(v1, v2) {\n\t\t\t\t\tfields.PushBack(qualifiedName)\n\t\t\t\t}\n\t\t\tcase *schema.Resource:\n\t\t\t\tif s.MaxItems == 1 {\n\t\t\t\t\t// A list with MaxItems == 1 is actually a nested object due to limitations with TF schemas.\n\t\t\t\t\ttfObjSchemaMap := s.Elem.(*schema.Resource).Schema\n\t\t\t\t\tvar o1 map[string]interface{}\n\t\t\t\t\tvar o2 map[string]interface{}\n\t\t\t\t\tif v1 != nil {\n\t\t\t\t\t\to1 = v1.(map[string]interface{})\n\t\t\t\t\t}\n\t\t\t\t\tif v2 != nil {\n\t\t\t\t\t\to2 = v2.(map[string]interface{})\n\t\t\t\t\t}\n\t\t\t\t\tcompareAndFindChangesOnImmutableFields(o1, o2, tfObjSchemaMap, qualifiedName, resourceConfig, ignoredFields, fields)\n\t\t\t\t} else {\n\t\t\t\t\t// TODO(kcc-eng): Kubernetes considers all lists of objects to be atomic, and so all subsequent\n\t\t\t\t\t// applies will currently wipe out defaulted immutable fields. Temporarily delegate validation\n\t\t\t\t\t// to the controller, which will determine via comparing the config with calculated fields in\n\t\t\t\t\t// the live state to detect if a diff in immutable fields is present.\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func (a Attributes) Equal(b Attributes) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tfor _, attr := range a {\n\t\tv := b.Value(attr.Key)\n\t\tif !bytes.Equal(v, attr.Value) {\n\t\t\treturn false\n\t\t}\n\t}\n\tfor _, attr := range b {\n\t\tv := a.Value(attr.Key)\n\t\tif !bytes.Equal(v, attr.Value) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func Verify(a, b *Release, minTitleLength int) MatchResult {\n\tif a.ExtIDs.DOI != \"\" && a.ExtIDs.DOI == b.ExtIDs.DOI {\n\t\treturn MatchResult{StatusExact, ReasonDOI}\n\t}\n\tif a.WorkID != \"\" && a.WorkID == b.WorkID {\n\t\treturn MatchResult{StatusExact, ReasonWorkID}\n\t}\n\taTitleLower := strings.ToLower(a.Title)\n\tbTitleLower := strings.ToLower(b.Title)\n\tif utf8.RuneCountInString(a.Title) < minTitleLength {\n\t\treturn MatchResult{StatusAmbiguous, ReasonShortTitle}\n\t}\n\tif BlacklistTitle.Contains(aTitleLower) {\n\t\treturn MatchResult{StatusAmbiguous, ReasonBlacklisted}\n\t}\n\tif BlacklistTitle.Contains(bTitleLower) {\n\t\treturn MatchResult{StatusAmbiguous, ReasonBlacklisted}\n\t}\n\tfor _, fragment := range BlacklistTitleFragments.Slice() {\n\t\tif strings.Contains(aTitleLower, fragment) {\n\t\t\treturn MatchResult{StatusAmbiguous, ReasonBlacklistedFragment}\n\t\t}\n\t}\n\tif strings.Contains(aTitleLower, \"subject index\") && strings.Contains(bTitleLower, \"subject index\") {\n\t\tif a.ContainerID != \"\" && a.ContainerID != b.ContainerID {\n\t\t\treturn MatchResult{StatusDifferent, ReasonContainer}\n\t\t}\n\t}\n\tif a.Title != \"\" && a.Title == b.Title &&\n\t\ta.Extra.DataCite.MetadataVersion > 0 && b.Extra.DataCite.MetadataVersion > 0 &&\n\t\ta.Extra.DataCite.MetadataVersion != b.Extra.DataCite.MetadataVersion {\n\t\treturn MatchResult{StatusExact, ReasonDataciteVersion}\n\t}\n\tif strings.HasPrefix(a.ExtIDs.DOI, \"10.14288/\") && strings.HasPrefix(b.ExtIDs.DOI, \"10.14288/\") &&\n\t\ta.ExtIDs.DOI != b.ExtIDs.DOI {\n\t\treturn MatchResult{StatusDifferent, ReasonCustomPrefix1014288}\n\t}\n\tif strings.HasPrefix(a.ExtIDs.DOI, \"10.3403\") && strings.HasPrefix(b.ExtIDs.DOI, \"10.3403\") {\n\t\tif a.ExtIDs.DOI+\"u\" == b.ExtIDs.DOI || b.ExtIDs.DOI+\"u\" == a.ExtIDs.DOI {\n\t\t\treturn MatchResult{StatusStrong, ReasonCustomBSIUndated}\n\t\t}\n\t\taSubtitle := a.Subtitle()\n\t\tbSubtitle := b.Subtitle()\n\t\tif a.Title != \"\" && a.Title == b.Title &&\n\t\t\t((len(aSubtitle) > 0 && aSubtitle[0] != \"\" && len(bSubtitle) == 0) ||\n\t\t\t\t(len(aSubtitle) == 0 && len(bSubtitle) > 0 && bSubtitle[0] != \"\")) {\n\t\t\treturn MatchResult{StatusStrong, ReasonCustomBSISubdoc}\n\t\t}\n\t}\n\tif strings.HasPrefix(a.ExtIDs.DOI, \"10.1149\") && strings.HasPrefix(b.ExtIDs.DOI, \"10.1149\") {\n\t\tv := \"10.1149/ma\"\n\t\tif (strings.HasPrefix(a.ExtIDs.DOI, v) && !strings.HasPrefix(b.ExtIDs.DOI, v)) ||\n\t\t\t(!strings.HasPrefix(a.ExtIDs.DOI, v) && strings.HasPrefix(b.ExtIDs.DOI, v)) {\n\t\t\treturn MatchResult{StatusDifferent, ReasonCustomIOPMAPattern}\n\t\t}\n\t}\n\tif strings.Contains(a.Title, \"Zweckverband Volkshochschule\") && a.Title != b.Title {\n\t\treturn MatchResult{StatusDifferent, ReasonCustomVHS}\n\t}\n\tif PatAppendix.MatchString(a.Title) {\n\t\treturn MatchResult{StatusAmbiguous, ReasonAppendix}\n\t}\n\tif strings.HasPrefix(a.ExtIDs.DOI, \"10.6084/\") && strings.HasPrefix(b.ExtIDs.DOI, \"10.6084/\") {\n\t\tav := PatFigshareVersion.ReplaceAllString(a.ExtIDs.DOI, \"\")\n\t\tbv := PatFigshareVersion.ReplaceAllString(b.ExtIDs.DOI, \"\")\n\t\tif av == bv {\n\t\t\treturn MatchResult{StatusStrong, ReasonFigshareVersion}\n\t\t}\n\t}\n\tif PatVersionedDOI.MatchString(a.ExtIDs.DOI) && PatVersionedDOI.MatchString(b.ExtIDs.DOI) {\n\t\treturn MatchResult{StatusStrong, ReasonVersionedDOI}\n\t}\n\tif looksLikeComponent(a.ExtIDs.DOI, b.ExtIDs.DOI) {\n\t\treturn MatchResult{StatusStrong, ReasonVersionedDOI}\n\t}\n\tif len(a.Extra.DataCite.Relations) > 0 || len(b.Extra.DataCite.Relations) > 0 {\n\t\tgetRelatedDOI := func(rel *Release) *set.Set {\n\t\t\tss := set.New()\n\t\t\tfor _, rel := range rel.Extra.DataCite.Relations {\n\t\t\t\tif strings.ToLower(rel.RelatedIdentifierType) != \"doi\" {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tss.Add(rel.RelatedIdentifier())\n\t\t\t}\n\t\t\treturn ss\n\t\t}\n\t\taRelated := getRelatedDOI(a)\n\t\tbRelated := getRelatedDOI(b)\n\t\tif aRelated.Contains(b.ExtIDs.DOI) || bRelated.Contains(a.ExtIDs.DOI) {\n\t\t\treturn MatchResult{StatusStrong, ReasonDataciteRelatedID}\n\t\t}\n\t}\n\tif a.ExtIDs.Arxiv != \"\" && b.ExtIDs.Arxiv != \"\" {\n\t\taSub := PatArxivVersion.FindStringSubmatch(a.ExtIDs.Arxiv)\n\t\tbSub := PatArxivVersion.FindStringSubmatch(b.ExtIDs.Arxiv)\n\t\tif len(aSub) == 2 && len(bSub) == 2 && aSub[1] == bSub[1] {\n\t\t\treturn MatchResult{StatusStrong, ReasonArxivVersion}\n\t\t}\n\t}\n\tif a.ReleaseType != b.ReleaseType {\n\t\ttypes := set.FromSlice([]string{a.ReleaseType, b.ReleaseType})\n\t\tignoreTypes := set.FromSlice([]string{\"article\", \"article-journal\", \"report\", \"paper-conference\"})\n\t\tif types.Intersection(ignoreTypes).IsEmpty() {\n\t\t\treturn MatchResult{StatusDifferent, ReasonReleaseType}\n\t\t}\n\t\tif types.Contains(\"dataset\") && (types.Contains(\"article\") || types.Contains(\"article-journal\")) {\n\t\t\treturn MatchResult{StatusDifferent, ReasonReleaseType}\n\t\t}\n\t\tif types.Contains(\"book\") && (types.Contains(\"article\") || types.Contains(\"article-journal\")) {\n\t\t\treturn MatchResult{StatusDifferent, ReasonReleaseType}\n\t\t}\n\t}\n\tif a.ReleaseType == \"dataset\" && b.ReleaseType == \"dataset\" && a.ExtIDs.DOI != b.ExtIDs.DOI {\n\t\treturn MatchResult{StatusDifferent, ReasonDatasetDOI}\n\t}\n\tif a.ReleaseType == \"chapter\" && b.ReleaseType == \"chapter\" &&\n\t\ta.Extra.ContainerName != \"\" && a.Extra.ContainerName != b.Extra.ContainerName {\n\t\treturn MatchResult{StatusDifferent, ReasonBookChapter}\n\t}\n\tif a.Extra.Crossref.Type == \"component\" && a.Title != b.Title {\n\t\treturn MatchResult{StatusDifferent, ReasonComponent}\n\t}\n\tif a.ReleaseType == \"component\" && b.ReleaseType == \"component\" {\n\t\tif a.ExtIDs.DOI != \"\" && a.ExtIDs.DOI != b.ExtIDs.DOI {\n\t\t\treturn MatchResult{StatusDifferent, ReasonComponent}\n\t\t}\n\t}\n\taSlugTitle := strings.TrimSpace(strings.Replace(slugifyString(a.Title), \"\\n\", \" \", -1))\n\tbSlugTitle := strings.TrimSpace(strings.Replace(slugifyString(b.Title), \"\\n\", \" \", -1))\n\n\tif aSlugTitle == bSlugTitle {\n\t\tif a.ReleaseYear() != 0 && b.ReleaseYear() != 0 && absInt(a.ReleaseYear()-b.ReleaseYear()) > 40 {\n\t\t\treturn MatchResult{StatusDifferent, ReasonYear}\n\t\t}\n\t}\n\tif aSlugTitle == bSlugTitle {\n\t\tieeeArxivCheck := func(a, b *Release) (ok bool) {\n\t\t\treturn doiPrefix(a.ExtIDs.DOI) == \"10.1109\" && b.ExtIDs.Arxiv != \"\"\n\t\t}\n\t\tif ieeeArxivCheck(a, b) || ieeeArxivCheck(b, a) {\n\t\t\treturn MatchResult{StatusStrong, ReasonCustomIEEEArxiv}\n\t\t}\n\t}\n\tif aSlugTitle == bSlugTitle {\n\t\tif strings.HasPrefix(a.ExtIDs.DOI, \"10.7916/\") && strings.HasPrefix(b.ExtIDs.DOI, \"10.7916/\") {\n\t\t\treturn MatchResult{StatusAmbiguous, ReasonCustomPrefix107916}\n\t\t}\n\t}\n\tif aSlugTitle == bSlugTitle {\n\t\taSubtitle := a.Subtitle()\n\t\tbSubtitle := b.Subtitle()\n\t\tfor _, aSub := range aSubtitle {\n\t\t\tfor _, bSub := range bSubtitle {\n\t\t\t\tif slugifyString(aSub) != slugifyString(bSub) {\n\t\t\t\t\treturn MatchResult{StatusDifferent, ReasonSubtitle}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\trawAuthors := func(rel *Release) (names []string) {\n\t\tfor _, c := range rel.Contribs {\n\t\t\tname := strings.TrimSpace(c.RawName)\n\t\t\tif name == \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tnames = append(names, name)\n\t\t}\n\t\treturn names\n\t}\n\taAuthors := set.FromSlice(rawAuthors(a))\n\tbAuthors := set.FromSlice(rawAuthors(b))\n\taSlugAuthors := set.FromSlice(mapString(slugifyString, aAuthors.Slice()))\n\tbSlugAuthors := set.FromSlice(mapString(slugifyString, bAuthors.Slice()))\n\tif aTitleLower == bTitleLower {\n\t\tif aAuthors.Len() > 0 && aAuthors.Equals(bAuthors) {\n\t\t\tif a.ReleaseYear() > 0 && b.ReleaseYear() > 0 && absInt(a.ReleaseYear()-b.ReleaseYear()) > 4 {\n\t\t\t\treturn MatchResult{StatusDifferent, ReasonYear}\n\t\t\t}\n\t\t\treturn MatchResult{StatusExact, ReasonTitleAuthorMatch}\n\t\t}\n\t}\n\tif looksLikeFilename(a.Title) || looksLikeFilename(b.Title) {\n\t\tif a.Title != b.Title {\n\t\t\treturn MatchResult{StatusDifferent, ReasonTitleFilename}\n\t\t}\n\t}\n\tif a.Title != \"\" && a.Title == b.Title {\n\t\tif a.ReleaseYear() > 0 && b.ReleaseYear() > 0 && absInt(a.ReleaseYear()-b.ReleaseYear()) > 2 {\n\t\t\treturn MatchResult{StatusDifferent, ReasonYear}\n\t\t}\n\t}\n\t// XXX: skipping chemical formula detection (to few cases; https://git.io/Jtdax)\n\tif len(aSlugTitle) < 10 && aSlugTitle != bSlugTitle {\n\t\treturn MatchResult{StatusAmbiguous, ReasonShortTitle}\n\t}\n\tif PatDigits.MatchString(aSlugTitle) &&\n\t\taSlugTitle != bSlugTitle &&\n\t\tunifyDigits(aSlugTitle) == unifyDigits(bSlugTitle) {\n\t\treturn MatchResult{StatusDifferent, ReasonNumDiff}\n\t}\n\tif aSlugTitle != \"\" && bSlugTitle != \"\" &&\n\t\tstrings.ReplaceAll(aSlugTitle, \" \", \"\") == strings.ReplaceAll(bSlugTitle, \" \", \"\") {\n\t\tif aSlugAuthors.Intersection(bSlugAuthors).Len() > 0 {\n\t\t\tif a.ReleaseYear() > 0 && b.ReleaseYear() > 0 && absInt(a.ReleaseYear()-b.ReleaseYear()) > 4 {\n\t\t\t\treturn MatchResult{StatusDifferent, ReasonYear}\n\t\t\t}\n\t\t\treturn MatchResult{StatusStrong, ReasonSlugTitleAuthorMatch}\n\t\t}\n\t}\n\tif a.ReleaseYear() > 0 && a.ReleaseYear() == b.ReleaseYear() && aTitleLower == bTitleLower {\n\t\tif (a.ExtIDs.PMID != \"\" && b.ExtIDs.DOI != \"\") || (b.ExtIDs.PMID != \"\" && a.ExtIDs.DOI != \"\") {\n\t\t\treturn MatchResult{StatusStrong, ReasonPMIDDOIPair}\n\t\t}\n\t}\n\tif a.ExtIDs.Jstor != \"\" && b.ExtIDs.Jstor != \"\" && a.ExtIDs.Jstor != b.ExtIDs.Jstor {\n\t\treturn MatchResult{StatusDifferent, ReasonJstorID}\n\t}\n\tif a.ContainerID != \"\" && a.ContainerID == b.ContainerID && a.ExtIDs.DOI != b.ExtIDs.DOI &&\n\t\tdoiPrefix(a.ExtIDs.DOI) != \"10.1126\" &&\n\t\tdoiPrefix(a.ExtIDs.DOI) == doiPrefix(b.ExtIDs.DOI) {\n\t\treturn MatchResult{StatusDifferent, ReasonSharedDOIPrefix}\n\t}\n\tif aAuthors.Len() > 0 && aSlugAuthors.Intersection(bSlugAuthors).IsEmpty() {\n\t\tnumAuthors := set.Min(aSlugAuthors, bSlugAuthors)\n\t\tscore := averageScore(aSlugAuthors, bSlugAuthors)\n\t\tif (numAuthors < 3 && score > 0.9) || (numAuthors >= 3 && score > 0.5) {\n\t\t\treturn MatchResult{StatusStrong, ReasonTokenizedAuthors}\n\t\t}\n\t\taTok := set.FromSlice(strings.Fields(aSlugAuthors.Join(\" \")))\n\t\tbTok := set.FromSlice(strings.Fields(bSlugAuthors.Join(\" \")))\n\t\taTok = set.Filter(aTok, func(s string) bool {\n\t\t\treturn len(s) > 2\n\t\t})\n\t\tbTok = set.Filter(bTok, func(s string) bool {\n\t\t\treturn len(s) > 2\n\t\t})\n\t\tif aTok.Len() > 0 && bTok.Len() > 0 {\n\t\t\tif aTok.Jaccard(bTok) > 0.35 {\n\t\t\t\treturn MatchResult{StatusStrong, ReasonJaccardAuthors}\n\t\t\t}\n\t\t}\n\t\treturn MatchResult{StatusDifferent, ReasonContribIntersectionEmpty}\n\t}\n\tif doiPrefix(a.ExtIDs.DOI) == \"10.5860\" || doiPrefix(b.ExtIDs.DOI) == \"10.5860\" {\n\t\treturn MatchResult{StatusAmbiguous, ReasonCustomPrefix105860ChoiceReview}\n\t}\n\t// XXX: parse pages\n\taParsedPages := parsePageString(a.Pages)\n\tbParsedPages := parsePageString(b.Pages)\n\tif aParsedPages.Err != nil && bParsedPages.Err != nil {\n\t\tif absInt(aParsedPages.Count()-bParsedPages.Count()) > 5 {\n\t\t\treturn MatchResult{StatusDifferent, ReasonPageCount}\n\t\t}\n\t}\n\tif aAuthors.Equals(bAuthors) &&\n\t\ta.ContainerID == b.ContainerID &&\n\t\ta.ReleaseYear() == b.ReleaseYear() &&\n\t\ta.Title != b.Title &&\n\t\t(strings.Contains(a.Title, b.Title) || strings.Contains(b.Title, a.Title)) {\n\t\treturn MatchResult{StatusStrong, ReasonTitleArtifact}\n\t}\n\treturn MatchResult{\n\t\tStatusAmbiguous,\n\t\tReasonUnknown,\n\t}\n}", "func (fm *FieldModelOrder) VerifyFields(fbeStructSize int) bool {\n fbeCurrentSize := 4 + 4\n\n if (fbeCurrentSize + fm.Id.FBESize()) > fbeStructSize {\n return true\n }\n if !fm.Id.Verify() {\n return false\n }\n fbeCurrentSize += fm.Id.FBESize()\n\n if (fbeCurrentSize + fm.Symbol.FBESize()) > fbeStructSize {\n return true\n }\n if !fm.Symbol.Verify() {\n return false\n }\n fbeCurrentSize += fm.Symbol.FBESize()\n\n if (fbeCurrentSize + fm.Side.FBESize()) > fbeStructSize {\n return true\n }\n if !fm.Side.Verify() {\n return false\n }\n fbeCurrentSize += fm.Side.FBESize()\n\n if (fbeCurrentSize + fm.Type.FBESize()) > fbeStructSize {\n return true\n }\n if !fm.Type.Verify() {\n return false\n }\n fbeCurrentSize += fm.Type.FBESize()\n\n if (fbeCurrentSize + fm.Price.FBESize()) > fbeStructSize {\n return true\n }\n if !fm.Price.Verify() {\n return false\n }\n fbeCurrentSize += fm.Price.FBESize()\n\n if (fbeCurrentSize + fm.Volume.FBESize()) > fbeStructSize {\n return true\n }\n if !fm.Volume.Verify() {\n return false\n }\n fbeCurrentSize += fm.Volume.FBESize()\n\n return true\n}", "func (reader *Reader) IsEqual(other *Reader) bool {\n\tif reader == other {\n\t\treturn true\n\t}\n\tif reader.Input != other.Input {\n\t\treturn false\n\t}\n\n\tl, r := len(reader.InputMetadata), len(other.InputMetadata)\n\n\tif l != r {\n\t\treturn false\n\t}\n\n\tfor a := 0; a < l; a++ {\n\t\tif !reader.InputMetadata[a].IsEqual(&other.InputMetadata[a]) {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}", "func (cmp *ConfigMapPropagation) CheckImmutableFields(ctx context.Context, original *ConfigMapPropagation) *apis.FieldError {\n\tif original == nil {\n\t\treturn nil\n\t}\n\n\tif diff, err := kmp.ShortDiff(original.Spec, cmp.Spec); err != nil {\n\t\treturn &apis.FieldError{\n\t\t\tMessage: \"Failed to diff ConfigMapPropagation\",\n\t\t\tPaths: []string{\"spec\"},\n\t\t\tDetails: err.Error(),\n\t\t}\n\t} else if diff != \"\" {\n\t\treturn &apis.FieldError{\n\t\t\tMessage: \"Immutable fields changed (-old +new)\",\n\t\t\tPaths: []string{\"spec\"},\n\t\t\tDetails: diff,\n\t\t}\n\t}\n\treturn nil\n}", "func (mf *MetricFilter) ShouldMerge(mf2 MetricFilter) bool {\n\tif mf.MonitorType != mf2.MonitorType {\n\t\treturn false\n\t}\n\tif mf.Negated != mf2.Negated {\n\t\treturn false\n\t}\n\tif len(mf.Dimensions) != len(mf2.Dimensions) {\n\t\treturn false\n\t}\n\t// Ensure no differing dimension values\n\tfor k, v := range mf.Dimensions {\n\t\tif mf2.Dimensions[k] != v {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func CheckEqual(src, trg *Item) bool {\n\ttype obj struct {\n\t\tsrc Attribute\n\t\ttrg Attribute\n\t}\n\tfor _, v := range []obj{\n\t\t{src.part, trg.part},\n\t\t{src.vendor, trg.vendor},\n\t\t{src.product, trg.product},\n\t\t{src.version, trg.version},\n\t\t{src.update, trg.update},\n\t\t{src.edition, trg.edition},\n\t\t{src.language, trg.language},\n\t\t{src.sw_edition, trg.sw_edition},\n\t\t{src.target_sw, trg.target_sw},\n\t\t{src.target_hw, trg.target_hw},\n\t\t{src.other, trg.other},\n\t} {\n\t\tswitch v.src.Comparison(v.trg) {\n\t\tcase Equal:\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func EqualKeys(a, b VectorClock) bool {\n if len(a.Vector) != len(b.Vector) {\n return false\n }\n for k, _:= range a.Vector{\n _, exists := b.Vector[k]\n if exists == false{\n return false\n }\n }\n return true\n}", "func (checker *deepEqualsChecker) Check(params []interface{}, names []string) (result bool, error string) {\n\ta, b := params[0], params[1]\n\tresult = reflect.DeepEqual(a, b)\n\tif !result {\n\t\terror = Diff(a, b)\n\t}\n\treturn result, error\n}", "func (v ConstructionMetadataResponse) Equal(o ConstructionMetadataResponse) bool {\n\treturn string(v.Metadata) == string(o.Metadata) &&\n\t\tlen(v.SuggestedFee) == len(o.SuggestedFee) &&\n\t\tamountSliceEqual(v.SuggestedFee, o.SuggestedFee)\n}", "func assertSubset(t *testing.T, expected, actual *Object) {\nOuter:\n\tfor _, pair := range expected.Pairs {\n\t\tfor _, value := range actual.GetAll(pair.Key) {\n\t\t\tif pair.Value == value {\n\t\t\t\tcontinue Outer\n\t\t\t}\n\t\t}\n\t\tt.Fatalf(\n\t\t\t\"Did not find expected pair %q = %q in\\n%+v\",\n\t\t\tpair.Key,\n\t\t\tpair.Value,\n\t\t\tactual)\n\t}\n}", "func (p Params) Equal(p2 Params) bool {\n\tbz1 := MsgCdc.MustMarshalBinary(&p)\n\tbz2 := MsgCdc.MustMarshalBinary(&p2)\n\treturn bytes.Equal(bz1, bz2)\n}", "func (l list) Verify() error {\n\tfor _, p := range l.Fields {\n\t\tfor _, s := range p.Values {\n\t\t\tif err := p.Verify(s); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func (v MetadataRequest) Equal(o MetadataRequest) bool {\n\treturn string(v.Metadata) == string(o.Metadata)\n}", "func TestDedicatedServer_IsEqual(t *testing.T) {\n\tcases := []struct {\n\t\thost1 types.DedicatedServer\n\t\thost2 types.DedicatedServer\n\t\tisEqual bool\n\t}{\n\t\t{\n\t\t\thost1: types.DedicatedServer{\n\t\t\t\tInventory: []*types.InventoryItem{\n\t\t\t\t\t{\n\t\t\t\t\t\tManufacturer: \"unknown\",\n\t\t\t\t\t\tModel: \"unknown\",\n\t\t\t\t\t\tAssetTag: \"asset tag\",\n\t\t\t\t\t\tPartNumber: \"part number\",\n\t\t\t\t\t\tSerialNumber: \"serial number\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\thost2: types.DedicatedServer{\n\t\t\t\tInventory: []*types.InventoryItem{\n\t\t\t\t\t{\n\t\t\t\t\t\tManufacturer: \"unknown\",\n\t\t\t\t\t\tModel: \"unknown\",\n\t\t\t\t\t\tAssetTag: \"asset tag\",\n\t\t\t\t\t\tPartNumber: \"part number\",\n\t\t\t\t\t\tSerialNumber: \"serial number\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tisEqual: true,\n\t\t},\n\t\t{\n\t\t\thost1: types.DedicatedServer{\n\t\t\t\tInventory: []*types.InventoryItem{\n\t\t\t\t\t{Manufacturer: \"unknown\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\thost2: types.DedicatedServer{\n\t\t\t\tInventory: []*types.InventoryItem{\n\t\t\t\t\t{Manufacturer: \"u. n. owen\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\tisEqual: false,\n\t\t},\n\t\t{\n\t\t\thost1: types.DedicatedServer{\n\t\t\t\tInventory: []*types.InventoryItem{\n\t\t\t\t\t{Model: \"unknown\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\thost2: types.DedicatedServer{\n\t\t\t\tInventory: []*types.InventoryItem{\n\t\t\t\t\t{Model: \"u. n. owen\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\tisEqual: false,\n\t\t},\n\t\t{\n\t\t\thost1: types.DedicatedServer{\n\t\t\t\tInventory: []*types.InventoryItem{\n\t\t\t\t\t{PartNumber: \"unknown\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\thost2: types.DedicatedServer{\n\t\t\t\tInventory: []*types.InventoryItem{\n\t\t\t\t\t{PartNumber: \"u. n. owen\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\tisEqual: false,\n\t\t},\n\t\t{\n\t\t\thost1: types.DedicatedServer{\n\t\t\t\tInventory: []*types.InventoryItem{\n\t\t\t\t\t{AssetTag: \"unknown\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\thost2: types.DedicatedServer{\n\t\t\t\tInventory: []*types.InventoryItem{\n\t\t\t\t\t{AssetTag: \"u. n. owen\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\tisEqual: false,\n\t\t},\n\t\t{\n\t\t\thost1: types.DedicatedServer{\n\t\t\t\tInventory: []*types.InventoryItem{\n\t\t\t\t\t{SerialNumber: \"unknown\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\thost2: types.DedicatedServer{\n\t\t\t\tInventory: []*types.InventoryItem{\n\t\t\t\t\t{SerialNumber: \"u. n. owen\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\tisEqual: false,\n\t\t},\n\t\t{\n\t\t\thost1: types.DedicatedServer{\n\t\t\t\tInventory: []*types.InventoryItem{\n\t\t\t\t\t{Manufacturer: \"Intel\"},\n\t\t\t\t\t{Manufacturer: \"AMD\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\thost2: types.DedicatedServer{\n\t\t\t\tInventory: []*types.InventoryItem{\n\t\t\t\t\t{Manufacturer: \"AMD\"},\n\t\t\t\t\t{Manufacturer: \"Intel\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\tisEqual: true,\n\t\t},\n\t\t{\n\t\t\thost1: types.DedicatedServer{\n\t\t\t\tInventory: []*types.InventoryItem{\n\t\t\t\t\t{Manufacturer: \"Intel\"},\n\t\t\t\t\t{Manufacturer: \"AMD\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\thost2: types.DedicatedServer{\n\t\t\t\tInventory: []*types.InventoryItem{\n\t\t\t\t\t{Manufacturer: \"AMD\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\tisEqual: false,\n\t\t},\n\t\t//TODO: Details\n\t}\n\n\tfor key, testcase := range cases {\n\t\tif testcase.isEqual {\n\t\t\tassert.True(t, testcase.host1.IsEqual(testcase.host2, true), \"Case ID: \"+strconv.Itoa(key))\n\t\t} else {\n\t\t\tassert.False(t, testcase.host1.IsEqual(testcase.host2, true), \"Case ID: \"+strconv.Itoa(key))\n\t\t}\n\t}\n}", "func verifyAnd(a, b, and *bitmap.Wahl) error {\n\ta_map := mapArray(a.Bits())\n\tb_map := mapArray(b.Bits())\n\tand_map := mapArray(and.Bits())\n\tref_map := andMaps(a_map, b_map)\n\tcompareMaps(\"verify AND map\", and_map, ref_map)\n\tfor _, bit := range and.Bits() {\n\t\t// bit must be in both maps for AND\n\t\tif !(a_map[bit] && b_map[bit]) {\n\t\t\treturn errors.Bug(\"AND: bit %d is not set in both maps\\n\", bit)\n\t\t}\n\t}\n\treturn nil\n}", "func (d paths) ConfirmEq(other paths) error {\n\tif len(other) != len(d) {\n\t\t// slight abuse of error, but this only really used by tests\n\t\treturn fmt.Errorf(\"expected 'paths' of size %d, but was %d:\\n%v\\n%v\",\n\t\t\tlen(d), len(other), d, other)\n\t}\n\tfor k := range d {\n\t\tif _, ok := other[k]; !ok {\n\t\t\treturn fmt.Errorf(\"expected %q, but didn't find it:\\n%v\", k, other)\n\t\t}\n\t}\n\treturn nil // success\n}", "func compareMaps(t *testing.T, a, b map[string]string) {\n\tif len(a) != len(b) {\n\t\tt.Error(\"Maps different sizes\", a, b)\n\t}\n\tfor ka, va := range a {\n\t\tif vb, ok := b[ka]; !ok || va != vb {\n\t\t\tt.Error(\"Difference in key\", ka, va, b[ka])\n\t\t}\n\t}\n\tfor kb, vb := range b {\n\t\tif va, ok := a[kb]; !ok || vb != va {\n\t\t\tt.Error(\"Difference in key\", kb, vb, a[kb])\n\t\t}\n\t}\n}", "func assertRouteEqual(a, b *route.Route) error {\n\terr := assertRouteHopRecordsEqual(a, b)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// TLV records have already been compared and need to be cleared to\n\t// properly compare the remaining fields using DeepEqual.\n\tcopyRouteNoHops := func(r *route.Route) *route.Route {\n\t\tcopy := *r\n\t\tcopy.Hops = make([]*route.Hop, len(r.Hops))\n\t\tfor i, hop := range r.Hops {\n\t\t\thopCopy := *hop\n\t\t\thopCopy.TLVRecords = nil\n\t\t\tcopy.Hops[i] = &hopCopy\n\t\t}\n\t\treturn &copy\n\t}\n\n\tif !reflect.DeepEqual(copyRouteNoHops(a), copyRouteNoHops(b)) {\n\t\treturn fmt.Errorf(\"PaymentAttemptInfos don't match: %v vs %v\",\n\t\t\tspew.Sdump(a), spew.Sdump(b))\n\t}\n\n\treturn nil\n}", "func compareXLMetaV1(t *testing.T, unMarshalXLMeta, jsoniterXLMeta xlMetaV1) {\n\t// Start comparing the fields of xlMetaV1 obtained from jsoniter parsing with one parsed using json unmarshaling.\n\tif unMarshalXLMeta.Version != jsoniterXLMeta.Version {\n\t\tt.Errorf(\"Expected the Version to be \\\"%s\\\", but got \\\"%s\\\".\", unMarshalXLMeta.Version, jsoniterXLMeta.Version)\n\t}\n\tif unMarshalXLMeta.Format != jsoniterXLMeta.Format {\n\t\tt.Errorf(\"Expected the format to be \\\"%s\\\", but got \\\"%s\\\".\", unMarshalXLMeta.Format, jsoniterXLMeta.Format)\n\t}\n\tif unMarshalXLMeta.Stat.Size != jsoniterXLMeta.Stat.Size {\n\t\tt.Errorf(\"Expected the stat size to be %v, but got %v.\", unMarshalXLMeta.Stat.Size, jsoniterXLMeta.Stat.Size)\n\t}\n\tif !unMarshalXLMeta.Stat.ModTime.Equal(jsoniterXLMeta.Stat.ModTime) {\n\t\tt.Errorf(\"Expected the modTime to be \\\"%v\\\", but got \\\"%v\\\".\", unMarshalXLMeta.Stat.ModTime, jsoniterXLMeta.Stat.ModTime)\n\t}\n\tif unMarshalXLMeta.Erasure.Algorithm != jsoniterXLMeta.Erasure.Algorithm {\n\t\tt.Errorf(\"Expected the erasure algorithm to be \\\"%v\\\", but got \\\"%v\\\".\", unMarshalXLMeta.Erasure.Algorithm, jsoniterXLMeta.Erasure.Algorithm)\n\t}\n\tif unMarshalXLMeta.Erasure.DataBlocks != jsoniterXLMeta.Erasure.DataBlocks {\n\t\tt.Errorf(\"Expected the erasure data blocks to be %v, but got %v.\", unMarshalXLMeta.Erasure.DataBlocks, jsoniterXLMeta.Erasure.DataBlocks)\n\t}\n\tif unMarshalXLMeta.Erasure.ParityBlocks != jsoniterXLMeta.Erasure.ParityBlocks {\n\t\tt.Errorf(\"Expected the erasure parity blocks to be %v, but got %v.\", unMarshalXLMeta.Erasure.ParityBlocks, jsoniterXLMeta.Erasure.ParityBlocks)\n\t}\n\tif unMarshalXLMeta.Erasure.BlockSize != jsoniterXLMeta.Erasure.BlockSize {\n\t\tt.Errorf(\"Expected the erasure block size to be %v, but got %v.\", unMarshalXLMeta.Erasure.BlockSize, jsoniterXLMeta.Erasure.BlockSize)\n\t}\n\tif unMarshalXLMeta.Erasure.Index != jsoniterXLMeta.Erasure.Index {\n\t\tt.Errorf(\"Expected the erasure index to be %v, but got %v.\", unMarshalXLMeta.Erasure.Index, jsoniterXLMeta.Erasure.Index)\n\t}\n\tif len(unMarshalXLMeta.Erasure.Distribution) != len(jsoniterXLMeta.Erasure.Distribution) {\n\t\tt.Errorf(\"Expected the size of Erasure Distribution to be %d, but got %d.\", len(unMarshalXLMeta.Erasure.Distribution), len(jsoniterXLMeta.Erasure.Distribution))\n\t} else {\n\t\tfor i := 0; i < len(unMarshalXLMeta.Erasure.Distribution); i++ {\n\t\t\tif unMarshalXLMeta.Erasure.Distribution[i] != jsoniterXLMeta.Erasure.Distribution[i] {\n\t\t\t\tt.Errorf(\"Expected the Erasure Distribution to be %d, got %d.\", unMarshalXLMeta.Erasure.Distribution[i], jsoniterXLMeta.Erasure.Distribution[i])\n\t\t\t}\n\t\t}\n\t}\n\n\tif len(unMarshalXLMeta.Erasure.Checksums) != len(jsoniterXLMeta.Erasure.Checksums) {\n\t\tt.Errorf(\"Expected the size of Erasure Checksums to be %d, but got %d.\", len(unMarshalXLMeta.Erasure.Checksums), len(jsoniterXLMeta.Erasure.Checksums))\n\t} else {\n\t\tfor i := 0; i < len(unMarshalXLMeta.Erasure.Checksums); i++ {\n\t\t\tif unMarshalXLMeta.Erasure.Checksums[i].Name != jsoniterXLMeta.Erasure.Checksums[i].Name {\n\t\t\t\tt.Errorf(\"Expected the Erasure Checksum Name to be \\\"%s\\\", got \\\"%s\\\".\", unMarshalXLMeta.Erasure.Checksums[i].Name, jsoniterXLMeta.Erasure.Checksums[i].Name)\n\t\t\t}\n\t\t\tif unMarshalXLMeta.Erasure.Checksums[i].Algorithm != jsoniterXLMeta.Erasure.Checksums[i].Algorithm {\n\t\t\t\tt.Errorf(\"Expected the Erasure Checksum Algorithm to be \\\"%s\\\", got \\\"%s\\\".\", unMarshalXLMeta.Erasure.Checksums[i].Algorithm, jsoniterXLMeta.Erasure.Checksums[i].Algorithm)\n\t\t\t}\n\t\t\tif !bytes.Equal(unMarshalXLMeta.Erasure.Checksums[i].Hash, jsoniterXLMeta.Erasure.Checksums[i].Hash) {\n\t\t\t\tt.Errorf(\"Expected the Erasure Checksum Hash to be \\\"%s\\\", got \\\"%s\\\".\", unMarshalXLMeta.Erasure.Checksums[i].Hash, jsoniterXLMeta.Erasure.Checksums[i].Hash)\n\t\t\t}\n\t\t}\n\t}\n\n\tif unMarshalXLMeta.Minio.Release != jsoniterXLMeta.Minio.Release {\n\t\tt.Errorf(\"Expected the Release string to be \\\"%s\\\", but got \\\"%s\\\".\", unMarshalXLMeta.Minio.Release, jsoniterXLMeta.Minio.Release)\n\t}\n\tif len(unMarshalXLMeta.Parts) != len(jsoniterXLMeta.Parts) {\n\t\tt.Errorf(\"Expected info of %d parts to be present, but got %d instead.\", len(unMarshalXLMeta.Parts), len(jsoniterXLMeta.Parts))\n\t} else {\n\t\tfor i := 0; i < len(unMarshalXLMeta.Parts); i++ {\n\t\t\tif unMarshalXLMeta.Parts[i].Name != jsoniterXLMeta.Parts[i].Name {\n\t\t\t\tt.Errorf(\"Expected the name of part %d to be \\\"%s\\\", got \\\"%s\\\".\", i+1, unMarshalXLMeta.Parts[i].Name, jsoniterXLMeta.Parts[i].Name)\n\t\t\t}\n\t\t\tif unMarshalXLMeta.Parts[i].ETag != jsoniterXLMeta.Parts[i].ETag {\n\t\t\t\tt.Errorf(\"Expected the ETag of part %d to be \\\"%s\\\", got \\\"%s\\\".\", i+1, unMarshalXLMeta.Parts[i].ETag, jsoniterXLMeta.Parts[i].ETag)\n\t\t\t}\n\t\t\tif unMarshalXLMeta.Parts[i].Number != jsoniterXLMeta.Parts[i].Number {\n\t\t\t\tt.Errorf(\"Expected the number of part %d to be \\\"%d\\\", got \\\"%d\\\".\", i+1, unMarshalXLMeta.Parts[i].Number, jsoniterXLMeta.Parts[i].Number)\n\t\t\t}\n\t\t\tif unMarshalXLMeta.Parts[i].Size != jsoniterXLMeta.Parts[i].Size {\n\t\t\t\tt.Errorf(\"Expected the size of part %d to be %v, got %v.\", i+1, unMarshalXLMeta.Parts[i].Size, jsoniterXLMeta.Parts[i].Size)\n\t\t\t}\n\t\t}\n\t}\n\n\tfor key, val := range unMarshalXLMeta.Meta {\n\t\tjsoniterVal, exists := jsoniterXLMeta.Meta[key]\n\t\tif !exists {\n\t\t\tt.Errorf(\"No meta data entry for Key \\\"%s\\\" exists.\", key)\n\t\t}\n\t\tif val != jsoniterVal {\n\t\t\tt.Errorf(\"Expected the value for Meta data key \\\"%s\\\" to be \\\"%s\\\", but got \\\"%s\\\".\", key, val, jsoniterVal)\n\t\t}\n\n\t}\n}", "func assertEqual(t *testing.T, a interface{}, b interface{}) bool {\n\tm := getMessage()\n\tif a != b {\n\t\tt.Errorf(\"Expected to be equal. %v != %v. %v\", a, b, m)\n\t\treturn false\n\t}\n\treturn true\n}", "func compareSnmpData(received map[string]*snmpData, expected map[string]snmpData) error {\n\tfor k, v := range received {\n\t\tif expectedVal, ok := expected[k]; !ok {\n\t\t\treturn fmt.Errorf(\"key '%v' in the result, but not in expected.\", k)\n\t\t} else {\n\t\t\tif v == nil {\n\t\t\t\treturn fmt.Errorf(\"key '%v' in result had a nil value.\", k)\n\t\t\t}\n\t\t\tif *v != expectedVal {\n\t\t\t\treturn fmt.Errorf(\"key '%v' had value '%v' in result, but '%v' in expected.\", k, *v, expectedVal)\n\t\t\t}\n\t\t}\n\t}\n\n\tfor k := range expected {\n\t\tif _, ok := received[k]; !ok {\n\t\t\treturn fmt.Errorf(\"key '%v' found in expected, but not in the result.\", k)\n\t\t}\n\t}\n\treturn nil\n}", "func TablesEqual(tbl1, tbl2 array.Table) (bool, string) {\n\n\tm1 := tbl1.NumCols()\n\tm2 := tbl2.NumCols()\n\tif m1 != m2 {\n\t\treturn false, fmt.Sprintf(\"Inconsistent number of columns, %d != %d\", m1, m2)\n\t}\n\n\tfor i := 0; i < int(m1); i++ {\n\n\t\tcol1 := tbl1.Column(i)\n\t\tcol2 := tbl2.Column(i)\n\n\t\tb, msg := ColumnsEqual(col1, col2)\n\t\tif !b {\n\t\t\treturn false, msg\n\t\t}\n\t}\n\n\treturn true, \"\"\n}" ]
[ "0.59522516", "0.54425323", "0.5349307", "0.53088397", "0.5259998", "0.5258071", "0.5201605", "0.519218", "0.51761544", "0.51652557", "0.50507516", "0.5047003", "0.50291055", "0.49821675", "0.49660936", "0.48716882", "0.48716882", "0.48586023", "0.48566115", "0.48440102", "0.48233062", "0.48132974", "0.4812188", "0.47956923", "0.47866455", "0.47732466", "0.47515842", "0.47475746", "0.47364628", "0.47352147", "0.47321102", "0.47133687", "0.4700891", "0.46987432", "0.4681197", "0.46710303", "0.4669332", "0.46636793", "0.46614656", "0.4659741", "0.464961", "0.46435454", "0.46434778", "0.46388826", "0.46374407", "0.46326056", "0.4629236", "0.46277073", "0.4611581", "0.46034232", "0.46023786", "0.45984492", "0.4595259", "0.45793462", "0.45745707", "0.45668697", "0.45591593", "0.4551368", "0.45477915", "0.45417246", "0.45410013", "0.45365372", "0.45358193", "0.45325136", "0.45285803", "0.45226365", "0.45157042", "0.45122334", "0.45030057", "0.44988614", "0.44969642", "0.44874883", "0.4477387", "0.4474688", "0.44706434", "0.44674653", "0.44664165", "0.44603676", "0.44564176", "0.44552666", "0.4450542", "0.44499415", "0.44472206", "0.4445318", "0.4444812", "0.44380245", "0.44295806", "0.4423572", "0.44194496", "0.44193268", "0.44172055", "0.44169337", "0.44154683", "0.44128242", "0.44004273", "0.43988958", "0.43984506", "0.43963847", "0.43903542", "0.4385142" ]
0.6223601
0
Check that a DOH response is returned correctly.
func TestResponse(t *testing.T) { doh, _ := NewTransport(testURL, ips, nil, nil, nil) transport := doh.(*transport) rt := makeTestRoundTripper() transport.client.Transport = rt // Fake server. go func() { <-rt.req r, w := io.Pipe() rt.resp <- &http.Response{ StatusCode: 200, Body: r, Request: &http.Request{URL: parsedURL}, } // The DOH response should have a zero query ID. var modifiedQuery dnsmessage.Message = simpleQuery modifiedQuery.Header.ID = 0 w.Write(mustPack(&modifiedQuery)) w.Close() }() resp, err := doh.Query(simpleQueryBytes) if err != nil { t.Error(err) } // Parse the response as a DNS message. respParsed := mustUnpack(resp) // Query() should reconstitute the query ID in the response. if respParsed.Header.ID != simpleQuery.Header.ID || !queriesMostlyEqual(*respParsed, simpleQuery) { t.Errorf("Unexpected response %v", resp) } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func CheckResponse(r *http.Response) error {\n\tif r.StatusCode == 200 {\n\t\treader := bufio.NewReader(r.Body)\n\t\tfirstByte, err := reader.ReadByte()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treader.UnreadByte()\n\n\t\tif string(firstByte) == \"-\" {\n\t\t\terrorString, _ := reader.ReadString('\\n')\n\t\t\tif matched, _ := regexp.MatchString(\"-\\\\d+,.+\", errorString); matched == false {\n\t\t\t\treturn fmt.Errorf(\"invalid message format\")\n\t\t\t}\n\t\t\terrors := strings.Split(errorString, \",\")\n\t\t\terrorCode, _ := strconv.Atoi(errors[0])\n\n\t\t\treturn &ErrorResponse{\n\t\t\t\tResponse: r,\n\t\t\t\tMessage: strings.TrimSpace(errors[1]),\n\t\t\t\tErrorCode: StatusCode(errorCode),\n\t\t\t}\n\t\t}\n\n\t\t// reset the response body to the original unread state\n\t\tr.Body = ioutil.NopCloser(reader)\n\n\t\treturn nil\n\t}\n\n\t// EVERY8D API always return status code 200\n\treturn fmt.Errorf(\"unexpected status code: %d\", r.StatusCode)\n}", "func checkResponse(r io.Reader) error {\n\tresponse, err := ParseResponse(r)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif response.IsFailure() {\n\t\treturn errors.New(response.GetMessage())\n\t}\n\n\treturn nil\n\n}", "func checkResponse(resp *http.Response) error {\n\t// Make sure we receive application/json\n\tif resp.Header.Get(\"Content-Type\") != \"application/json; charset=utf-8\" {\n\t\treturn fmt.Errorf(\n\t\t\t\"content type not application/json; charset=utf-8, actual value %s\",\n\t\t\tresp.Header.Get(\"Content-Type\"),\n\t\t)\n\t}\n\n\t// Check status code\n\tif !(resp.StatusCode >= 200 && resp.StatusCode < 300) {\n\t\treturn fmt.Errorf(\"response status not in range [200, 300], actual code %d\", resp.StatusCode)\n\t}\n\n\treturn nil\n}", "func checkResponse(t *testing.T, resp *http.Response, err error) {\n\tassert.Nil(t, err)\n\tassert.Equal(t, 200, resp.StatusCode)\n}", "func (monitor *Monitor) checkResponseValidity(packet *connection.Packet) bool {\n\treturn packet != nil &&\n\t\tpacket.Code == connection.HvsResponse &&\n\t\tpacket.Height == monitor.Height &&\n\t\tpacket.ID != \"\" &&\n\t\tpacket.Hvs != nil &&\n\t\tpacket.Hvs.IsValid(packet.ID)\n}", "func CheckResponse(err error, expectedStatusCode int, response *http.Response) {\n\tMust(err, \"Command failed because error occurred: %s\", err)\n\n\tif response.StatusCode != expectedStatusCode {\n\t\tout, err := io.ReadAll(response.Body)\n\t\tif err != nil {\n\t\t\tout = []byte{}\n\t\t}\n\t\tpretty, err := json.MarshalIndent(json.RawMessage(out), \"\", \"\\t\")\n\t\tif err == nil {\n\t\t\tout = pretty\n\t\t}\n\n\t\tFatalf(\n\t\t\t`Command failed because status code %d was expected but code %d was received.\n\nResponse payload:\n\n%s`,\n\t\t\texpectedStatusCode,\n\t\t\tresponse.StatusCode,\n\t\t\tout,\n\t\t)\n\t}\n}", "func CheckResponse(body *[]byte) (err error) {\n\n\tpre := new(PxgRetError)\n\n\tdecErr := json.Unmarshal(*body, &pre)\n\n\tif decErr == io.EOF {\n\t\tdecErr = nil // ignore EOF errors caused by empty response body\n\t}\n\tif decErr != nil {\n\t\terr = decErr\n\t}\n\n\tif pre.Error != nil {\n\t\terr = pre.Error\n\t}\n\n\treturn err\n}", "func CheckResponse(r *http.Response) error {\n\tif c := r.StatusCode; 200 <= c && c <= 299 {\n\t\treturn nil\n\t}\n\n\tbody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn fmt.Errorf(string(body))\n}", "func CheckResponse(resp *http.Response) error {\n\tcode := resp.StatusCode\n\tif code >= 200 && code <= 299 {\n\t\treturn nil\n\t}\n\n\terrorResponse := &ErrorResponse{Response: resp}\n\terr := json.NewDecoder(resp.Body).Decode(errorResponse)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn errorResponse\n}", "func isInvalidResponse(response DownloadUriResponse) bool {\n\tif response.DownloadSessionId == \"\" || response.FileUri == \"\" {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (cl *RestClient) CheckResponse() {\n\tfor i := range cl.messages {\n\t\tcl.messages[i] = RemoveTime(cl.messages[i])\n\t}\n\tif len(cl.res.Results) != len(cl.messages) {\n\t\tcl.test.Errorf(\"Results # != Messages #\")\n\t\treturn\n\t}\n\tfor i := range cl.res.Results {\n\t\tif cl.res.Results[i] != cl.messages[i] {\n\t\t\tcl.test.Errorf(\"Rest CheckResponse got %v want %v.\", cl.messages[i], cl.res.Results[i])\n\t\t\tcl.test.Errorf(\"Name: %v\\nResults: %v\\nMessages: %v\\n\", cl.name, cl.res.Results, cl.messages)\n\t\t\tfor x := range cl.res.Results {\n\t\t\t\tcl.test.Errorf(\"\\nResult: %v\\nMessage: %v\\n\\n\\n\", cl.res.Results[x], cl.messages[x])\n\t\t\t}\n\t\t\tcl.test.Errorf(\"\\nResult: %v\", cl.res.Results[len(cl.res.Results)-1])\n\t\t\tcl.test.Errorf(\"\\nResult#: %v Message#: %v\\n\", len(cl.res.Results), len(cl.messages))\n\t\t\tbreak\n\t\t}\n\t}\n}", "func CheckResponse(r *http.Response) error {\n\tif c := r.StatusCode; 200 <= c && c <= 299 {\n\t\treturn nil\n\t}\n\terrorResponse := &ErrorResponse{Response: r}\n\tdata, err := ioutil.ReadAll(r.Body)\n\tif err == nil && data != nil {\n\t\tjson.Unmarshal(data, errorResponse)\n\t}\n\treturn errorResponse\n}", "func CheckResponse(r *http.Response) error {\n\tif c := r.StatusCode; 200 <= c && c <= 299 {\n\t\treturn nil\n\t}\n\t// errorResp := &Error{}\n\tvar e struct {\n\t\tE Error `json:\"error\"`\n\t}\n\tdata, err := ioutil.ReadAll(r.Body)\n\tif err == nil && data != nil {\n\t\tjson.Unmarshal(data, &e)\n\t}\n\treturn e.E\n}", "func ValidateResponse(res *http.Response) (err error) {\n\tvar resLength int\n\t// non 200 errors\n\tif res.StatusCode != 200 {\n\t\terr = fmt.Errorf(\"Received %d status code\\n\", res.StatusCode)\n\t} else if res.Header[\"Content-Type\"][0] != \"application/json\" {\n\t\terr = fmt.Errorf(\"Content type not spplication/json. Received => %s\\n\", res.Header[\"Content-Type\"][0])\n\t} else {\n\t\tif len(res.Header[\"Content-Length\"]) > 0 {\n\t\t\tresLength, err = strconv.Atoi(res.Header[\"Content-Length\"][0])\n\t\t\tif err == nil && resLength < (CONTENT_LENGTH-100) || resLength > (CONTENT_LENGTH+100) {\n\t\t\t\terr = fmt.Errorf(\"content-Length mismatch 905 vs %d\\n\", resLength)\n\t\t\t}\n\t\t}\n\t}\n\treturn err\n}", "func CheckResponse(r *http.Response) error {\n\tif c := r.StatusCode; 200 <= c && c <= 299 {\n\t\treturn nil\n\t}\n\n\terrorResponse := &ErrorResponse{Response: r}\n\tdata, err := ioutil.ReadAll(r.Body)\n\tif err == nil && data != nil {\n\t\tjson.Unmarshal(data, &errorResponse.ErrorStatus)\n\t}\n\n\treturn errorResponse\n}", "func CheckResponse(r *http.Response) error {\n\tif c := r.StatusCode; 200 <= c && c <= 299 {\n\t\treturn nil\n\t}\n\n\terrorResponse := ErrorResponse{Response: r}\n\tdata, err := ioutil.ReadAll(r.Body)\n\tif err == nil {\n\t\terrorResponse.Message = string(data)\n\t}\n\n\treturn &errorResponse\n}", "func CheckResponse(r *http.Response) error {\n\tif c := r.StatusCode; 200 <= c && c <= 299 {\n\t\treturn nil\n\t}\n\n\terr := fmt.Errorf(\"request failed. Please analyze the request body for more details. Status code: %d\", r.StatusCode)\n\treturn err\n}", "func checkResponse(r *Response) error {\n\terrorResponse := &ErrorResponse{Response: r}\n\terr := json.Unmarshal(r.Body, errorResponse)\n\n\tif err != nil {\n\t\terrorResponse.Message = \"Error decoding response error message. \" +\n\t\t\t\"Please see response body for more information.\"\n\t} else if !(errorResponse.Message == \"\") {\n\t\treturn errorResponse\n\t}\n\n\treturn nil\n}", "func checkResponse(message string, b *Board, conn net.Conn) (r bool) {\n data := strings.Split(strings.TrimSuffix(message, \"\\r\\n\"), \" \")\n if len(data) == 3 {\n x, err1 := strconv.Atoi(data[1])\n y, err2 := strconv.Atoi(data[2])\n if (data[0] == \"SHOOT\" && err1 == nil && err2 == nil) {\n // if x == b.zombie.x && y == b.zombie.y {\n if x == 0 && y == 0 {\n b.won = true\n }\n return true\n }\n }\n conn.Write([]byte(\"INVALID INPUT\\n\"))\n return false\n}", "func checkResponse(r *http.Response) error {\n\tlog.WithFields(log.Fields{\n\t\t\"status\": r.Status,\n\t}).Debugln(\"checking response\")\n\tif 200 <= r.StatusCode && r.StatusCode <= 299 {\n\t\treturn nil\n\t}\n\n\tvar msg, errMsg string\n\tscanner := bufio.NewScanner(io.LimitReader(r.Body, 512))\n\tif scanner.Scan() {\n\t\tmsg = scanner.Text()\n\t}\n\n\tif msg == \"\" {\n\t\terrMsg = fmt.Sprintf(\"server returned HTTP status %s\", r.Status)\n\t} else {\n\t\terrMsg = fmt.Sprintf(\"server returned HTTP status %s: %s\", r.Status, msg)\n\t}\n\n\tif r.StatusCode == http.StatusNotFound {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"status\": r.Status,\n\t\t\t\"msg\": msg,\n\t\t}).Debugln(errMsg)\n\t\treturn ErrResourceNotFound\n\t}\n\n\tlog.WithFields(log.Fields{\n\t\t\"status\": r.Status,\n\t\t\"msg\": msg,\n\t}).Errorln(errMsg)\n\n\treturn errors.New(errMsg)\n}", "func ValidateResponse(response *resty.Response, err error) bool {\n\tvar validation bool\n\n\tif response.RawResponse.StatusCode != 200 {\n\t\tlog.Println(response.RawResponse.Status + \" \" + response.Request.URL)\n\t\tvalidation = false\n\t} else if err != nil {\n\t\tlog.Println(err)\n\t\tvalidation = false\n\t} else {\n\t\tvalidation = true\n\t}\n\n\treturn validation\n}", "func CheckResponse(resp *http.Response) error {\n\tif resp.StatusCode >= 200 && resp.StatusCode <= 299 {\n\t\treturn nil\n\t}\n\treturn &ErrorResponse{Response: resp}\n}", "func checkValidResponse(resp *http.Response) CustError {\n\tif resp.StatusCode < 200 || resp.StatusCode > 226 {\n\t\tif resp.StatusCode == 404 {\n\t\t\treturn CustError{http.StatusNotFound, \"Repository NOT FOUND - Check URL or repository details\"}\n\t\t}\n\t\treturn CustError{http.StatusBadRequest, \"Check URL or repository details\"}\n\t}\n\n\t// Treat 206 as error, we're missing some vital repo info\n\tif resp.StatusCode == 206 {\n\t\treturn CustError{http.StatusPartialContent, \"Repo missing either name or owner\"}\n\t}\n\n\t// Nothing bad happened\n\treturn CustError{0, errorStr[0]}\n}", "func (dr *DeleteResponse) IsOk() bool {\n\treturn dr.ok\n}", "func (resp *Response) OK() bool {\n\treturn resp.StatusCode < 400\n}", "func checkForEmptyResponseData(responseBody []byte) (bool, error) {\n\tvar basicResponse BasicResponse\n\terr := json.Unmarshal(responseBody, &basicResponse)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\temptyData := make([]interface{}, 0)\n\treturn reflect.DeepEqual(emptyData, basicResponse.Data), nil\n}", "func unauthorizedResponse(body []byte) bool {\n\n\t// Parse response to simplejson object\n\tjs, err := simplejson.NewJson(body)\n\tif err != nil {\n\t\tfmt.Println(\"[unauthorizedResponse] Error parsing Json!\")\n\t\treturn true\n\t}\n\n\t// check whether we got an error or not.\n\t_, exists := js.CheckGet(\"error\")\n\tif exists {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func checkResponseForErrors(response *influxdb.Response) error {\n\tconst msg = \"failed to write stats to influxDb - %s\"\n\n\tif response != nil && response.Err != nil {\n\t\treturn fmt.Errorf(msg, response.Err)\n\t}\n\tif response != nil && response.Results != nil {\n\t\tfor _, result := range response.Results {\n\t\t\tif result.Err != nil {\n\t\t\t\treturn fmt.Errorf(msg, result.Err)\n\t\t\t}\n\t\t\tif result.Series != nil {\n\t\t\t\tfor _, row := range result.Series {\n\t\t\t\t\tif row.Err != nil {\n\t\t\t\t\t\treturn fmt.Errorf(msg, row.Err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func CheckResponse(r *http.Response) error {\n\tif c := r.StatusCode; c >= 200 && c <= 299 {\n\t\treturn nil\n\t}\n\treturn &ErrorResponse{Response: r}\n}", "func generateResponse(h *header, recvTime ntpTime, authErr error) *Response {\n\tr := &Response{\n\t\tTime: h.TransmitTime.Time(),\n\t\tClockOffset: offset(h.OriginTime, h.ReceiveTime, h.TransmitTime, recvTime),\n\t\tRTT: rtt(h.OriginTime, h.ReceiveTime, h.TransmitTime, recvTime),\n\t\tPrecision: toInterval(h.Precision),\n\t\tStratum: h.Stratum,\n\t\tReferenceID: h.ReferenceID,\n\t\tReferenceTime: h.ReferenceTime.Time(),\n\t\tRootDelay: h.RootDelay.Duration(),\n\t\tRootDispersion: h.RootDispersion.Duration(),\n\t\tLeap: h.getLeap(),\n\t\tMinError: minError(h.OriginTime, h.ReceiveTime, h.TransmitTime, recvTime),\n\t\tPoll: toInterval(h.Poll),\n\t\tauthErr: authErr,\n\t}\n\n\t// Calculate values depending on other calculated values\n\tr.RootDistance = rootDistance(r.RTT, r.RootDelay, r.RootDispersion)\n\n\t// If a kiss of death was received, interpret the reference ID as\n\t// a kiss code.\n\tif r.Stratum == 0 {\n\t\tr.KissCode = kissCode(r.ReferenceID)\n\t}\n\n\treturn r\n}", "func requireOK(d time.Duration, resp *http.Response, e error) (time.Duration, *http.Response, error) {\n\tif e != nil {\n\t\tif resp != nil {\n\t\t\tresp.Body.Close()\n\t\t}\n\t\treturn d, nil, e\n\t}\n\tif resp.StatusCode != 200 {\n\t\tvar buf bytes.Buffer\n\t\tio.Copy(&buf, resp.Body)\n\t\tresp.Body.Close()\n\t\treturn d, nil, fmt.Errorf(\"Unexpected response code: %d (%s)\", resp.StatusCode, buf.Bytes())\n\t}\n\treturn d, resp, nil\n}", "func requireOK(d time.Duration, resp *http.Response, e error) (time.Duration, *http.Response, error) {\n\tif e != nil {\n\t\tif resp != nil {\n\t\t\tresp.Body.Close()\n\t\t}\n\t\treturn d, nil, e\n\t}\n\tif resp.StatusCode != 200 {\n\t\tvar buf bytes.Buffer\n\t\tio.Copy(&buf, resp.Body)\n\t\tresp.Body.Close()\n\t\treturn d, nil, fmt.Errorf(\"Unexpected response code: %d (%s)\", resp.StatusCode, buf.Bytes())\n\t}\n\treturn d, resp, nil\n}", "func TestGetDataAndReturnResponse(t *testing.T) {\n\tdata := getDataAndReturnResponse()\n\tif data.Message != \"hello world\" {\n\t\tt.Errorf(\"Expected string 'hello world' but received: '%s'\", data)\n\t}\n}", "func vrfPdShowOneResp(resp *halproto.VrfGetResponse) {\n\tif resp.GetStatus().GetEpdStatus() != nil {\n\t\tvrfEPdShowOneResp(resp)\n\t}\n}", "func (o *Ga4ghChemotherapy) GetDoseOk() (string, bool) {\n\tif o == nil || o.Dose == nil {\n\t\tvar ret string\n\t\treturn ret, false\n\t}\n\treturn *o.Dose, true\n}", "func (c *_Crawler) CheckTestResponse(ctx context.Context, resp *http.Response) error {\n\tif err := c.Parse(ctx, resp, func(c context.Context, i interface{}) error {\n\t\treturn nil\n\t}); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (c *_Crawler) CheckTestResponse(ctx context.Context, resp *http.Response) error {\n\tif err := c.Parse(ctx, resp, func(c context.Context, i interface{}) error {\n\t\treturn nil\n\t}); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (c *_Crawler) CheckTestResponse(ctx context.Context, resp *http.Response) error {\n\tif err := c.Parse(ctx, resp, func(c context.Context, i interface{}) error {\n\t\treturn nil\n\t}); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (cl *DoHClient) waitResponse() {\n\tsuccess, ok := <-cl.responded\n\tif !success || !ok {\n\t\tcl.w.WriteHeader(http.StatusGatewayTimeout)\n\t}\n}", "func TestGetDataAndParseResponse(t *testing.T) {\n\telectricCount, boogalooCount := getDataAndParseResponse()\n\tif electricCount < 1 {\n\t\tt.Errorf(\"expected more than one name 'Electric', recieved: %d\", electricCount)\n\t}\n\tif boogalooCount < 1 {\n\t\tt.Errorf(\"expected more than one name 'Boogaloo', recieved: %d\", boogalooCount)\n\t}\n}", "func verifyResponse(cfg config, res *ocsp.Response) error {\n\tif err := verifyExtendedKeyUsage(cfg, res); err != nil {\n\t\treturn err\n\t}\n\n\tcurrTime := time.Now().UTC()\n\tif res.ThisUpdate.After(currTime) {\n\t\treturn fmt.Errorf(\"reported thisUpdate time %s is after current time %s\", res.ThisUpdate, currTime)\n\t}\n\tif !res.NextUpdate.IsZero() && res.NextUpdate.Before(currTime) {\n\t\treturn fmt.Errorf(\"reported nextUpdate time %s is before current time %s\", res.NextUpdate, currTime)\n\t}\n\treturn nil\n}", "func ParseDeleteaspecificDomainResponse(rsp *http.Response) (*DeleteaspecificDomainResponse, error) {\n\tbodyBytes, err := ioutil.ReadAll(rsp.Body)\n\tdefer rsp.Body.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresponse := &DeleteaspecificDomainResponse{\n\t\tBody: bodyBytes,\n\t\tHTTPResponse: rsp,\n\t}\n\n\tswitch {\n\t}\n\n\treturn response, nil\n}", "func (g *HTTPGateway) isValidResponse(response *http.Response) error {\n\tif response == nil {\n\t\treturn errors.New(\"response is nil\")\n\t}\n\t// client error if 400 <= status code < 500\n\tif response.StatusCode >= http.StatusBadRequest && response.StatusCode < http.StatusInternalServerError {\n\n\t\treturn platform.NewRequestError(\n\t\t\tresponse.StatusCode,\n\t\t\tresponse.Body,\n\t\t\tfmt.Errorf(\"%d Client Error: %s for url: %s\", response.StatusCode, response.Status, response.Request.URL))\n\t}\n\t// server error if status code >= 500\n\tif response.StatusCode >= http.StatusInternalServerError {\n\n\t\treturn platform.NewRequestError(\n\t\t\tresponse.StatusCode,\n\t\t\tresponse.Body,\n\t\t\tfmt.Errorf(\"%d Server Error: %s for url: %s\", response.StatusCode, response.Status, response.Request.URL))\n\t}\n\treturn nil\n}", "func (ctx *GetByIDHostContext) OK(r *Hostwithdogs) error {\n\tif ctx.ResponseData.Header().Get(\"Content-Type\") == \"\" {\n\t\tctx.ResponseData.Header().Set(\"Content-Type\", \"hostwithdogs\")\n\t}\n\treturn ctx.ResponseData.Service.Send(ctx.Context, 200, r)\n}", "func (h Hotel) AsResponse() (*Response, bool) {\n\treturn nil, false\n}", "func (s) TestLDSHandleResponse(t *testing.T) {\n\tfakeServer, cc, cleanup := startServerAndGetCC(t)\n\tdefer cleanup()\n\n\tv2c := newV2Client(cc, goodNodeProto, func(int) time.Duration { return 0 }, nil)\n\tdefer v2c.close()\n\n\ttests := []struct {\n\t\tname string\n\t\tldsResponse *xdspb.DiscoveryResponse\n\t\twantErr bool\n\t\twantUpdate *ldsUpdate\n\t\twantUpdateErr bool\n\t}{\n\t\t// Badly marshaled LDS response.\n\t\t{\n\t\t\tname: \"badly-marshaled-response\",\n\t\t\tldsResponse: badlyMarshaledLDSResponse,\n\t\t\twantErr: true,\n\t\t\twantUpdate: nil,\n\t\t\twantUpdateErr: false,\n\t\t},\n\t\t// Response does not contain Listener proto.\n\t\t{\n\t\t\tname: \"no-listener-proto-in-response\",\n\t\t\tldsResponse: badResourceTypeInLDSResponse,\n\t\t\twantErr: true,\n\t\t\twantUpdate: nil,\n\t\t\twantUpdateErr: false,\n\t\t},\n\t\t// No APIListener in the response. Just one test case here for a bad\n\t\t// ApiListener, since the others are covered in\n\t\t// TestGetRouteConfigNameFromListener.\n\t\t{\n\t\t\tname: \"no-apiListener-in-response\",\n\t\t\tldsResponse: noAPIListenerLDSResponse,\n\t\t\twantErr: true,\n\t\t\twantUpdate: nil,\n\t\t\twantUpdateErr: false,\n\t\t},\n\t\t// Response contains one listener and it is good.\n\t\t{\n\t\t\tname: \"one-good-listener\",\n\t\t\tldsResponse: goodLDSResponse1,\n\t\t\twantErr: false,\n\t\t\twantUpdate: &ldsUpdate{routeName: goodRouteName1},\n\t\t\twantUpdateErr: false,\n\t\t},\n\t\t// Response contains multiple good listeners, including the one we are\n\t\t// interested in.\n\t\t{\n\t\t\tname: \"multiple-good-listener\",\n\t\t\tldsResponse: ldsResponseWithMultipleResources,\n\t\t\twantErr: false,\n\t\t\twantUpdate: &ldsUpdate{routeName: goodRouteName1},\n\t\t\twantUpdateErr: false,\n\t\t},\n\t\t// Response contains two good listeners (one interesting and one\n\t\t// uninteresting), and one badly marshaled listener.\n\t\t{\n\t\t\tname: \"good-bad-ugly-listeners\",\n\t\t\tldsResponse: goodBadUglyLDSResponse,\n\t\t\twantErr: false,\n\t\t\twantUpdate: &ldsUpdate{routeName: goodRouteName1},\n\t\t\twantUpdateErr: false,\n\t\t},\n\t\t// Response contains one listener, but we are not interested in it.\n\t\t{\n\t\t\tname: \"one-uninteresting-listener\",\n\t\t\tldsResponse: goodLDSResponse2,\n\t\t\twantErr: false,\n\t\t\twantUpdate: &ldsUpdate{routeName: \"\"},\n\t\t\twantUpdateErr: true,\n\t\t},\n\t\t// Response constains no resources. This is the case where the server\n\t\t// does not know about the target we are interested in.\n\t\t{\n\t\t\tname: \"empty-response\",\n\t\t\tldsResponse: emptyLDSResponse,\n\t\t\twantErr: false,\n\t\t\twantUpdate: &ldsUpdate{routeName: \"\"},\n\t\t\twantUpdateErr: true,\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\ttestWatchHandle(t, &watchHandleTestcase{\n\t\t\t\tresponseToHandle: test.ldsResponse,\n\t\t\t\twantHandleErr: test.wantErr,\n\t\t\t\twantUpdate: test.wantUpdate,\n\t\t\t\twantUpdateErr: test.wantUpdateErr,\n\n\t\t\t\tldsWatch: v2c.watchLDS,\n\t\t\t\twatchReqChan: fakeServer.XDSRequestChan,\n\t\t\t\thandleXDSResp: v2c.handleLDSResponse,\n\t\t\t})\n\t\t})\n\t}\n}", "func checkResponse(resp *http.Response) error {\n\tswitch resp.StatusCode {\n\tcase http.StatusOK:\n\t\treturn nil\n\tcase http.StatusBadRequest:\n\t\treturn ErrBadRequest\n\tcase http.StatusUnauthorized:\n\t\treturn ErrUnauthorized\n\tcase http.StatusForbidden:\n\t\treturn ErrForbidden\n\tcase http.StatusInternalServerError:\n\t\treturn ErrInternalError\n\tcase http.StatusTooManyRequests:\n\t\treturn ErrManyRequests\n\t}\n\n\tb, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar e ServerError\n\n\terr = json.Unmarshal(b, &e)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"could not unmarshal server error message\")\n\t}\n\n\treturn e\n}", "func testCabService_CheckZombieStatus_ValidOutput(t *testing.T) {\n\ts, c := MustOpenTestServerHttpClient()\n\tdefer s.Close()\n\n\t// Create sample data.\n\tid := 123\n\texpectedOut := zombie_driver.Status{ID: zombie_driver.DriverID(id), Zombie: false}\n\n\t// Mock service.\n\ts.Handler.DataHandler.CabService.CheckZombieStatusFn = func(id string) (*zombie_driver.Status, error) {\n\t\treturn &expectedOut, nil\n\t}\n\n\t// Send a request.\n\tout, err := c.Connect().CheckZombieStatus(string(id))\n\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if !reflect.DeepEqual(expectedOut, *out) {\n\t\tt.Fatalf(\"Output:%#v, Expected:%#v\", *out, expectedOut)\n\t}\n}", "func (rig *testRig) checkResponse(user *tUser, txType string) error {\n\tmsg, resp := rig.auth.getResp(user.acct)\n\tif msg == nil {\n\t\treturn fmt.Errorf(\"unexpected nil response to %s's '%s'\", user.lbl, txType)\n\t}\n\tif resp.Error != nil {\n\t\treturn fmt.Errorf(\"%s swap rpc error. code: %d, msg: %s\", user.lbl, resp.Error.Code, resp.Error.Message)\n\t}\n\treturn nil\n}", "func (a *api) theResponseShouldContainFields(theDocString *gherkin.DocString) error {\n\t//fmt.Println(\"the string \", string(theDocString.Content))\n\tdefer a.resp.Body.Close()\n\thtmlData, err := ioutil.ReadAll(a.resp.Body)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n\tdata := string(htmlData)\n\tw := Structworkitem{}\n\tjson.Unmarshal([]byte(data), &w)\n\n\tbyt := []byte(string(string(theDocString.Content)))\n\tvar keys map[string]interface{}\n\tjson.Unmarshal(byt, &keys)\n\tfor key, value := range keys {\n\t\t//\t\tfmt.Printf(\"the key = %v\", key)\n\t\t//\t\tfmt.Printf(\"the value = %v\", value)\n\t\tif key == \"assignee\" {\n\t\t\tif value != w.Fields.Assignee {\n\t\t\t\treturn fmt.Errorf(\"Expected %s but was %s\", value, w.Fields.Assignee)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func (c *Operation) validateAuthResp(authResp []byte, holder, domain, challenge string) error { // nolint:gocyclo\n\tvp, err := verifiable.ParsePresentation(authResp, verifiable.WithPresDisabledProofCheck(),\n\t\tverifiable.WithPresJSONLDDocumentLoader(c.documentLoader))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif vp.Holder != holder {\n\t\treturn fmt.Errorf(\"invalid auth response, invalid holder proof\")\n\t}\n\n\tproofOfInterest := vp.Proofs[0]\n\n\tvar proofChallenge, proofDomain string\n\n\t{\n\t\td, ok := proofOfInterest[\"challenge\"]\n\t\tif ok && d != nil {\n\t\t\tproofChallenge, ok = d.(string)\n\t\t}\n\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"invalid auth response proof, missing challenge\")\n\t\t}\n\t}\n\n\t{\n\t\td, ok := proofOfInterest[\"domain\"]\n\t\tif ok && d != nil {\n\t\t\tproofDomain, ok = d.(string)\n\t\t}\n\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"invalid auth response proof, missing domain\")\n\t\t}\n\t}\n\n\tif proofChallenge != challenge || proofDomain != domain {\n\t\treturn fmt.Errorf(\"invalid proof and challenge in response\")\n\t}\n\n\treturn nil\n}", "func (p PlatformMessage) ExpectsResponse() bool {\n\treturn p.ResponseHandle != 0\n}", "func handleShardOrZoneQueryResponse(sec section.WithSigForward, subjectName, subjectZone,\n\tcontext string, queryType object.Type, sender connection.Info, token token.Token, s *Server) bool {\n\tassertions := []*section.Assertion{}\n\tswitch sec := sec.(type) {\n\tcase *section.Shard:\n\t\tassertions = sec.Content\n\tcase *section.Zone:\n\t\tfor _, sec := range sec.Content {\n\t\t\tswitch sec := sec.(type) {\n\t\t\tcase *section.Assertion:\n\t\t\t\tassertions = append(assertions, sec)\n\t\t\tcase *section.Shard:\n\t\t\t\tassertions = append(assertions, sec.Content...)\n\t\t\tdefault:\n\t\t\t\tlog.Warn(fmt.Sprintf(\"Unsupported zone.Content Expected assertion or shard. actual=%T\", sec))\n\t\t\t}\n\t\t}\n\tdefault:\n\t\tlog.Warn(fmt.Sprintf(\"Unexpected WithSigForward. Expected zone or shard. actual=%T\", sec))\n\t}\n\tif entryFound, hasSig := containedAssertionQueryResponse(assertions, subjectName,\n\t\tsubjectZone, context, queryType, sender, token, s); entryFound {\n\t\treturn hasSig\n\t}\n\tsendSection(sec, token, sender, s)\n\treturn true\n}", "func (c *Client) parseCheckResponse(r io.Reader) (float64, error) {\n\tvar checkResponse struct {\n\t\tSuccess bool\n\t\tError string\n\t\tNude float64\n\t}\n\tif err := json.NewDecoder(r).Decode(&checkResponse); err != nil {\n\t\treturn 0, errors.Wrap(err, \"decoding response\")\n\t}\n\tif !checkResponse.Success {\n\t\treturn 0, ErrNudebox(checkResponse.Error)\n\t}\n\treturn checkResponse.Nude, nil\n}", "func (t Thing) AsResponse() (*Response, bool) {\n\treturn nil, false\n}", "func (dpr DeletePathResponse) Response() *http.Response {\n\treturn dpr.rawResponse\n}", "func ParsedeleteDomainResponse(rsp *http.Response) (*deleteDomainResponse, error) {\n\tbodyBytes, err := ioutil.ReadAll(rsp.Body)\n\tdefer rsp.Body.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresponse := &deleteDomainResponse{\n\t\tBody: bodyBytes,\n\t\tHTTPResponse: rsp,\n\t}\n\n\tswitch {\n\t}\n\n\treturn response, nil\n}", "func ParseDeleteaspecificPbxDeviceResponse(rsp *http.Response) (*DeleteaspecificPbxDeviceResponse, error) {\n\tbodyBytes, err := ioutil.ReadAll(rsp.Body)\n\tdefer rsp.Body.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresponse := &DeleteaspecificPbxDeviceResponse{\n\t\tBody: bodyBytes,\n\t\tHTTPResponse: rsp,\n\t}\n\n\tswitch {\n\t}\n\n\treturn response, nil\n}", "func checkResponseCode(resp http.Response) (respBody []byte, err error) {\n\trespBody, err = ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"failed to read tank response: %d %w\", resp.StatusCode, err)\n\t\treturn\n\t}\n\tif resp.StatusCode != 200 {\n\t\terr = fmt.Errorf(\"%d: %s\", resp.StatusCode, string(respBody))\n\t}\n\treturn\n}", "func (ctx *DeleteDogContext) OK(resp []byte) error {\n\tif ctx.ResponseData.Header().Get(\"Content-Type\") == \"\" {\n\t\tctx.ResponseData.Header().Set(\"Content-Type\", \"text/plain\")\n\t}\n\tctx.ResponseData.WriteHeader(200)\n\t_, err := ctx.ResponseData.Write(resp)\n\treturn err\n}", "func (r Realm) CheckResponse(challenge *Challenge, authenticate *Authenticate) (*Welcome, error) {\n\tif auth, ok := r.CRAuthenticators[challenge.AuthMethod]; !ok {\n\t\treturn nil, fmt.Errorf(\"authentication method has been removed\")\n\t} else {\n\t\tif details, err := auth.Authenticate(challenge.Extra, authenticate.Signature); err != nil {\n\t\t\treturn nil, err\n\t\t} else {\n\t\t\treturn &Welcome{Details: addAuthMethod(details, challenge.AuthMethod)}, nil\n\t\t}\n\t}\n}", "func CheckOKResponse(r *http.Response) error {\n\tisOK := r.StatusCode >= 200 && r.StatusCode <= 299\n\tif !isOK {\n\t\treturn errors.New(\"response error with http status: \"+r.Status, errors.Internal)\n\t}\n\treturn nil\n}", "func (suite *APIContainerInspectSuite) TestRespValid(c *check.C) {\n\t// TODO\n}", "func (ctx *GetDogsByHostIDHostContext) OK(r *Dogs) error {\n\tif ctx.ResponseData.Header().Get(\"Content-Type\") == \"\" {\n\t\tctx.ResponseData.Header().Set(\"Content-Type\", \"dogs\")\n\t}\n\treturn ctx.ResponseData.Service.Send(ctx.Context, 200, r)\n}", "func TestCheckResponse_noBody(t *testing.T) {\n\tres := &http.Response{\n\t\tRequest: &http.Request{},\n\t\tStatusCode: http.StatusBadRequest,\n\t\tBody: ioutil.NopCloser(strings.NewReader(\"\")),\n\t}\n\terr := CheckResponse(res).(*ErrorResponse)\n\n\tif err == nil {\n\t\tt.Errorf(\"Expected error response.\")\n\t}\n\n\twant := &ErrorResponse{\n\t\tResponse: res,\n\t}\n\tif !reflect.DeepEqual(err, want) {\n\t\tt.Errorf(\"Error = %#v, want %#v\", err, want)\n\t}\n}", "func checkContentLength(res *http.Response) (int64, string, error) {\n\tif res.ContentLength > 0 {\n\t\treturn res.ContentLength, \"Good\", nil\n\t}\n\tif res.ContentLength == 0 {\n\t\t//Means exactly none\n\t\tif res.Body != nil {\n\t\t\treturn res.ContentLength, \"Artifact content is empty\", nil\n\t\t}\n\t\treturn res.ContentLength, \"None\", nil\n\t}\n\tif res.ContentLength < 0 {\n\t\t//Means Unknown\n\t\treturn res.ContentLength, \"Chunked\", nil\n\t}\n\treturn 0, \"\", nil\n}", "func ParsegetDomainResponse(rsp *http.Response) (*getDomainResponse, error) {\n\tbodyBytes, err := ioutil.ReadAll(rsp.Body)\n\tdefer rsp.Body.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresponse := &getDomainResponse{\n\t\tBody: bodyBytes,\n\t\tHTTPResponse: rsp,\n\t}\n\n\tswitch {\n\tcase strings.Contains(rsp.Header.Get(\"Content-Type\"), \"json\") && rsp.StatusCode == 200:\n\t\tresponse.JSON200 = &Record{}\n\t\tif err := json.Unmarshal(bodyBytes, response.JSON200); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\tcase strings.Contains(rsp.Header.Get(\"Content-Type\"), \"json\") && rsp.StatusCode == 404:\n\t\tresponse.JSON404 = &Error{}\n\t\tif err := json.Unmarshal(bodyBytes, response.JSON404); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t}\n\n\treturn response, nil\n}", "func (client *DedicatedHostsClient) getHandleResponse(resp *http.Response) (DedicatedHostsGetResponse, error) {\n\tresult := DedicatedHostsGetResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.DedicatedHost); err != nil {\n\t\treturn DedicatedHostsGetResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func CheckAccountResponse(t *testing.T, resp *models.Account, expectedAccount *models.Account) {\n\tif resp.ID != expectedAccount.ID {\n\t\tt.Errorf(\"Response contains wrong ID, got %v expected %v\", resp.ID, expectedAccount.ID)\n\t}\n\tif resp.Type != expectedAccount.Type {\n\t\tt.Errorf(\"Response contains wrong Type, got %v expected %v\", resp.Type, expectedAccount.Type)\n\t}\n\tif resp.OrganisationID != expectedAccount.OrganisationID {\n\t\tt.Errorf(\"Response contains wrong OrganisationID, got %v expected %v\", resp.OrganisationID, expectedAccount.OrganisationID)\n\t}\n\tif resp.Version != expectedAccount.Version {\n\t\tt.Errorf(\"Response contains wrong Version, got %v expected %v\", resp.Version, expectedAccount.Version)\n\t}\n\tif resp.Attributes.Country != expectedAccount.Attributes.Country {\n\t\tt.Errorf(\"Response contains wrong Country, got %v expected %v\", resp.Attributes.Country, expectedAccount.Attributes.Country)\n\t}\n\tif resp.Attributes.BaseCurrency != expectedAccount.Attributes.BaseCurrency {\n\t\tt.Errorf(\"Response contains wrong BaseCurrency, got %v expected %v\", resp.Attributes.BaseCurrency, expectedAccount.Attributes.BaseCurrency)\n\t}\n\tif resp.Attributes.BankID != expectedAccount.Attributes.BankID {\n\t\tt.Errorf(\"Response contains wrong BankID, got %v expected %v\", resp.Attributes.BankID, expectedAccount.Attributes.BankID)\n\t}\n\tif resp.Attributes.BankIDCode != expectedAccount.Attributes.BankIDCode {\n\t\tt.Errorf(\"Response contains wrong BankIDCode, got %v expected %v\", resp.Attributes.BankIDCode, expectedAccount.Attributes.BankIDCode)\n\t}\n\tif resp.Attributes.Bic != expectedAccount.Attributes.Bic {\n\t\tt.Errorf(\"Response contains wrong Bic, got %v expected %v\", resp.Attributes.Bic, expectedAccount.Attributes.Bic)\n\t}\n\tif resp.Attributes.AccountNumber != expectedAccount.Attributes.AccountNumber {\n\t\tt.Errorf(\"Response contains wrong AccountNumber, got %v expected %v\", resp.Attributes.AccountNumber, expectedAccount.Attributes.AccountNumber)\n\t}\n\tif resp.Attributes.CustomerID != expectedAccount.Attributes.CustomerID {\n\t\tt.Errorf(\"Response contains wrong CustomerID, got %v expected %v\", resp.Attributes.CustomerID, expectedAccount.Attributes.CustomerID)\n\t}\n\tif resp.Attributes.FirstName != expectedAccount.Attributes.FirstName {\n\t\tt.Errorf(\"Response contains wrong FirstName, got %v expected %v\", resp.Attributes.FirstName, expectedAccount.Attributes.FirstName)\n\t}\n\tif resp.Attributes.BankAccountName != expectedAccount.Attributes.BankAccountName {\n\t\tt.Errorf(\"Response contains wrong BankAccountName, got %v expected %v\", resp.Attributes.BankAccountName, expectedAccount.Attributes.BankAccountName)\n\t}\n\n\tresponseLength := len(resp.Attributes.AlternativeBankAccountNames)\n\texpectedLength := len(expectedAccount.Attributes.AlternativeBankAccountNames)\n\n\tif responseLength != expectedLength {\n\t\tt.Errorf(\"AlternativeBankAccountNames array is wrong, got %v expected %v\", responseLength, expectedLength)\n\t} else {\n\t\ti := 0\n\t\tfor i < responseLength {\n\t\t\tif resp.Attributes.AlternativeBankAccountNames[i] != expectedAccount.Attributes.AlternativeBankAccountNames[i] {\n\t\t\t\tt.Errorf(\"Response contains wrong AlternativeBankAccountNames, got %v expected %v\", resp.Attributes.AlternativeBankAccountNames[i], expectedAccount.Attributes.AlternativeBankAccountNames[i])\n\t\t\t}\n\t\t\ti = i + 1\n\t\t}\n\t}\n\n\tif resp.Attributes.AlternativeBankAccountNames[0] != expectedAccount.Attributes.AlternativeBankAccountNames[0] {\n\t\tt.Errorf(\"Response contains wrong AlternativeBankAccountNames, got %v expected %v\", resp.Attributes.AlternativeBankAccountNames[0], expectedAccount.Attributes.AlternativeBankAccountNames[0])\n\t}\n\tif resp.Attributes.AccountClassification != expectedAccount.Attributes.AccountClassification {\n\t\tt.Errorf(\"Response contains wrong AccountClassification, got %v expected %v\", resp.Attributes.AccountClassification, expectedAccount.Attributes.AccountClassification)\n\t}\n\tif resp.Attributes.JointAccount != expectedAccount.Attributes.JointAccount {\n\t\tt.Errorf(\"Response contains wrong JointAccount, got %v expected %v\", resp.Attributes.JointAccount, expectedAccount.Attributes.JointAccount)\n\t}\n\tif resp.Attributes.Switched != expectedAccount.Attributes.Switched {\n\t\tt.Errorf(\"Response contains wrong Switched, got %v expected %v\", resp.Attributes.Switched, expectedAccount.Attributes.Switched)\n\t}\n\tif resp.Attributes.AccountMatchingOptOut != expectedAccount.Attributes.AccountMatchingOptOut {\n\t\tt.Errorf(\"Response contains wrong AccountMatchingOptOut, got %v expected %v\", resp.Attributes.AccountMatchingOptOut, expectedAccount.Attributes.AccountMatchingOptOut)\n\t}\n\tif resp.Attributes.Status != expectedAccount.Attributes.Status {\n\t\tt.Errorf(\"Response contains wrong Status, got %v expected %v\", resp.Attributes.Status, expectedAccount.Attributes.Status)\n\t}\n\tif resp.Attributes.SecondaryIdentification != expectedAccount.Attributes.SecondaryIdentification {\n\t\tt.Errorf(\"Response contains wrong SecondaryIdentification, got %v expected %v\", resp.Attributes.SecondaryIdentification, expectedAccount.Attributes.SecondaryIdentification)\n\t}\n}", "func (client *DedicatedHostsClient) getHandleResponse(resp *azcore.Response) (DedicatedHostResponse, error) {\n\tvar val *DedicatedHost\n\tif err := resp.UnmarshalAsJSON(&val); err != nil {\n\t\treturn DedicatedHostResponse{}, err\n\t}\n\treturn DedicatedHostResponse{RawResponse: resp.Response, DedicatedHost: val}, nil\n}", "func (vr *VersionResponse) IsOk() bool {\n\treturn len(vr.version) > 0\n}", "func wait_for_response(w dns.ResponseWriter, conn *dns.Conn, request *dns.Msg) (response *dns.Msg) {\n\tfor {\n\t\tresponse, err := conn.ReadMsg()\n\t\t// some sort of error reading reply\n\t\tif err != nil {\n\t\t\t_D(\"%s QID:%d error reading message: %s\", w.RemoteAddr(), request.Id, err)\n\t\t\tSRVFAIL(w, request)\n\t\t\treturn nil\n\t\t}\n\t\t// got a response, life is good\n\t\tif response.Id == request.Id {\n\t\t\t_D(\"%s QID:%d got reply\", w.RemoteAddr(), request.Id)\n\t\t\treturn response\n\t\t}\n\t\t// got a response, but it was for a different QID... ignore\n\t\t_D(\"%s QID:%d ignoring reply to wrong QID:%d\", w.RemoteAddr(), request.Id, response.Id)\n\t}\n}", "func getResponseOK(args ...interface{}) docs.Response {\n\tdescription := \"OK\"\n\tif args != nil {\n\t\tdescription = args[0].(string)\n\t}\n\n\treturn docs.Response{\n\t\tCode: 200,\n\t\tDescription: description,\n\t}\n}", "func (cblr ContainersBreakLeaseResponse) Response() *http.Response {\n\treturn cblr.rawResponse\n}", "func (cr *ClientResponse) Ok() bool {\n\treturn cr.ok\n}", "func (dfr DeleteFilesystemResponse) Response() *http.Response {\n\treturn dfr.rawResponse\n}", "func ParseDeleteOrderResponse(rsp *http.Response) (*DeleteOrderResponse, error) {\n\tbodyBytes, err := ioutil.ReadAll(rsp.Body)\n\tdefer rsp.Body.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresponse := &DeleteOrderResponse{\n\t\tBody: bodyBytes,\n\t\tHTTPResponse: rsp,\n\t}\n\n\tswitch {\n\t}\n\n\treturn response, nil\n}", "func (c *DogHouseClient) Check(ctx context.Context, req *doghouse.CheckRequest) (*doghouse.CheckResponse, error) {\n\tcheckURL := c.BaseURL.String() + \"/check\"\n\tb, err := json.Marshal(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\thttpReq, err := http.NewRequest(http.MethodPost, checkURL, bytes.NewReader(b))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\thttpReq = httpReq.WithContext(ctx)\n\thttpReq.Header.Set(\"Content-Type\", \"application/json\")\n\thttpReq.Header.Set(\"User-Agent\", fmt.Sprintf(\"reviewdog/%s\", commands.Version))\n\n\thttpResp, err := c.Client.Do(httpReq)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Check request failed: %w\", err)\n\t}\n\tdefer httpResp.Body.Close()\n\n\trespb, err := ioutil.ReadAll(httpResp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif httpResp.StatusCode != http.StatusOK {\n\t\treturn nil, fmt.Errorf(\"status=%v: %s\", httpResp.StatusCode, respb)\n\t}\n\n\tvar resp doghouse.CheckResponse\n\tif err := json.Unmarshal(respb, &resp); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to decode response: error=%w, resp=%s\", err, respb)\n\t}\n\treturn &resp, nil\n}", "func (r *Response) Validate() error {\n\t// Forward authentication errors.\n\tif r.authErr != nil {\n\t\treturn r.authErr\n\t}\n\n\t// Handle invalid stratum values.\n\tif r.Stratum == 0 {\n\t\treturn ErrKissOfDeath\n\t}\n\tif r.Stratum >= maxStratum {\n\t\treturn ErrInvalidStratum\n\t}\n\n\t// Estimate the \"freshness\" of the time. If it exceeds the maximum\n\t// polling interval (~36 hours), then it cannot be considered \"fresh\".\n\tfreshness := r.Time.Sub(r.ReferenceTime)\n\tif freshness > maxPollInterval {\n\t\treturn ErrServerClockFreshness\n\t}\n\n\t// Calculate the peer synchronization distance, lambda:\n\t// \tlambda := RootDelay/2 + RootDispersion\n\t// If this value exceeds MAXDISP (16s), then the time is not suitable\n\t// for synchronization purposes.\n\t// https://tools.ietf.org/html/rfc5905#appendix-A.5.1.1.\n\tlambda := r.RootDelay/2 + r.RootDispersion\n\tif lambda > maxDispersion {\n\t\treturn ErrInvalidDispersion\n\t}\n\n\t// If the server's transmit time is before its reference time, the\n\t// response is invalid.\n\tif r.Time.Before(r.ReferenceTime) {\n\t\treturn ErrInvalidTime\n\t}\n\n\t// Handle invalid leap second indicator.\n\tif r.Leap == LeapNotInSync {\n\t\treturn ErrInvalidLeapSecond\n\t}\n\n\t// nil means the response is valid.\n\treturn nil\n}", "func (ctx *DeleteHostContext) OK(resp []byte) error {\n\tif ctx.ResponseData.Header().Get(\"Content-Type\") == \"\" {\n\t\tctx.ResponseData.Header().Set(\"Content-Type\", \"text/plain\")\n\t}\n\tctx.ResponseData.WriteHeader(200)\n\t_, err := ctx.ResponseData.Write(resp)\n\treturn err\n}", "func HandleResponse(c *Client, resp *http.Response, pretty bool) {\n\tdefer resp.Body.Close()\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"failed to read body: %s\", err)\n\t\tos.Exit(-1)\n\t}\n\tif resp.StatusCode < 200 || resp.StatusCode > 299 {\n\t\t// Let user know if something went wrong\n\t\tvar sbody string\n\t\tif len(body) > 0 {\n\t\t\tsbody = \": \" + string(body)\n\t\t}\n\t\tfmt.Printf(\"error: %d%s\", resp.StatusCode, sbody)\n\t} else if !c.Dump && len(body) > 0 {\n\t\tvar out string\n\t\tif pretty {\n\t\t\tvar jbody interface{}\n\t\t\terr = json.Unmarshal(body, &jbody)\n\t\t\tif err != nil {\n\t\t\t\tout = string(body)\n\t\t\t} else {\n\t\t\t\tvar b []byte\n\t\t\t\tb, err = json.MarshalIndent(jbody, \"\", \" \")\n\t\t\t\tif err == nil {\n\t\t\t\t\tout = string(b)\n\t\t\t\t} else {\n\t\t\t\t\tout = string(body)\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tout = string(body)\n\t\t}\n\t\tfmt.Print(out)\n\t}\n\n\t// Figure out exit code\n\texitStatus := 0\n\tswitch {\n\tcase resp.StatusCode == 401:\n\t\texitStatus = 1\n\tcase resp.StatusCode == 403:\n\t\texitStatus = 3\n\tcase resp.StatusCode == 404:\n\t\texitStatus = 4\n\tcase resp.StatusCode > 399 && resp.StatusCode < 500:\n\t\texitStatus = 2\n\tcase resp.StatusCode > 499:\n\t\texitStatus = 5\n\t}\n\tos.Exit(exitStatus)\n}", "func (lb LodgingBusiness) AsResponse() (*Response, bool) {\n\treturn nil, false\n}", "func TestEmptyResponse(t *testing.T) {\n\tdoh, _ := NewTransport(testURL, ips, nil, nil, nil)\n\ttransport := doh.(*transport)\n\trt := makeTestRoundTripper()\n\ttransport.client.Transport = rt\n\n\t// Fake server.\n\tgo func() {\n\t\t<-rt.req\n\t\t// Make an empty body.\n\t\tr, w := io.Pipe()\n\t\tw.Close()\n\t\trt.resp <- &http.Response{\n\t\t\tStatusCode: 200,\n\t\t\tBody: r,\n\t\t\tRequest: &http.Request{URL: parsedURL},\n\t\t}\n\t}()\n\n\t_, err := doh.Query(simpleQueryBytes)\n\tvar qerr *queryError\n\tif err == nil {\n\t\tt.Error(\"Empty body should cause an error\")\n\t} else if !errors.As(err, &qerr) {\n\t\tt.Errorf(\"Wrong error type: %v\", err)\n\t} else if qerr.status != BadResponse {\n\t\tt.Errorf(\"Wrong error status: %d\", qerr.status)\n\t}\n}", "func (c *Client) checkResp(v reflect.Value) error {\n\tif v.Kind() != reflect.Ptr {\n\t\treturn fmt.Errorf(\"bug: resp argument must a *struct, was %T\", v.Interface())\n\t}\n\tv = v.Elem()\n\tif v.Kind() != reflect.Struct {\n\t\treturn fmt.Errorf(\"bug: resp argument must be a *struct, was %T\", v.Interface())\n\t}\n\treturn nil\n}", "func (cdr ContainersDeleteResponse) Response() *http.Response {\n\treturn cdr.rawResponse\n}", "func isResponseSuccess(resp *http.Response) bool {\n\tif resp == nil {\n\t\treturn false\n\t}\n\tstatusCode := resp.StatusCode\n\n\treturn statusCode >= http.StatusOK && statusCode <= 299\n}", "func waitForGuestbookResponse(ctx context.Context, c clientset.Interface, cmd, arg, expectedResponse string, timeout time.Duration, ns string) bool {\n\tfor start := time.Now(); time.Since(start) < timeout && ctx.Err() == nil; time.Sleep(5 * time.Second) {\n\t\tres, err := makeRequestToGuestbook(ctx, c, cmd, arg, ns)\n\t\tif err == nil && res == expectedResponse {\n\t\t\treturn true\n\t\t}\n\t\tframework.Logf(\"Failed to get response from guestbook. err: %v, response: %s\", err, res)\n\t}\n\treturn false\n}", "func assertResponse(t *testing.T, r *httpexpect.Request, respBody string, status int) string {\n\tresp := r.Expect().Status(status).JSON().Raw()\n\n\t// return id field if response is a single map[string]interface with an ID field\n\tvar id string\n\tif rm, ok := resp.(map[string]interface{}); ok {\n\t\t// ignore failure to map or cast - id will just remain blank\n\t\tid, _ = rm[\"id\"].(string)\n\t}\n\tassertEqualJSON(t, resp, respBody)\n\treturn id\n}", "func BebDeleteResponseSuccess(w http.ResponseWriter) {\n\tw.WriteHeader(http.StatusNoContent)\n}", "func doesDeviceExist(deviceID int, meta interface{}) bool {\n\tlog.Printf(\"[Dotcom-Monitor] [DEBUG] Checking if device exists with ID: %v\", deviceID)\n\tdevice := &client.Device{\n\t\tID: deviceID,\n\t}\n\n\t// Since an empty HTTP response is a valid 200 from the API, we will determine if\n\t// the device exists by comparing the hash of the struct before and after the HTTP call.\n\t// If the has does not change, it means nothing else was added, therefore it does not exist.\n\t// If the hash changes, the API found the device and added the rest of the fields.\n\th := sha256.New()\n\tt := fmt.Sprintf(\"%v\", device)\n\tsum := h.Sum([]byte(t))\n\tlog.Printf(\"[Dotcom-Monitor] [DEBUG] Hash before: %x\", sum)\n\n\t// Try to get device from API\n\tapi := meta.(*client.APIClient)\n\terr := api.GetDevice(device)\n\n\tt2 := fmt.Sprintf(\"%v\", device)\n\tsum2 := h.Sum([]byte(t2))\n\tlog.Printf(\"[Dotcom-Monitor] [DEBUG] Hash after: %x\", sum2)\n\n\t// Compare the hashes, and if there was an error from the API we will assume the device exists\n\t// to be safe that we do not improperly remove an existing device from state\n\tif bytes.Equal(sum, sum2) && err == nil {\n\t\tlog.Println(\"[Dotcom-Monitor] [DEBUG] No new fields added to the device, therefore the device did not exist\")\n\t\treturn false\n\t}\n\n\t// If we get here, we can assume the device does exist\n\treturn true\n}", "func checkUserResponse(user, resp User) (err error) {\n\tif user.Name != resp.Name {\n\t\terr = errors.New(\"Name isn't equal\")\n\t\treturn\n\t}\n\tif user.Username != resp.Username {\n\t\terr = errors.New(\"Username isn't equal\")\n\t\treturn\n\t}\n\tif user.Phone != resp.Phone {\n\t\terr = errors.New(\"Phone isn't equal\")\n\t\treturn\n\t}\n\tif user.Password != \"\" {\n\t\terr = errors.New(\"Password isn't empty\")\n\t\treturn\n\t}\n\treturn\n}", "func createResponse(r *http.Request, message string) (*slashResponse, bool) {\n\n\tctx := appengine.NewContext(r)\n\tclient := urlfetch.Client(ctx)\n\trespMessage := \"\"\n\tv := url.Values{}\n\tv.Set(\"restaurant_id\", \"23\")\n\n\tresp, err := client.PostForm(\"https://www.kanresta.fi/app/lunchlist/view/\", v)\n\tif err != nil {\n\t\tlog.Errorf(ctx, \"Unable to get lunchsite: %s\", err)\n\t\trespMessage = \"Unable to get lunchlist\"\n\t}\n\n\tdoc, err := goquery.NewDocumentFromResponse(resp)\n\tif err != nil {\n\t\tlog.Errorf(ctx, \"Unable to get lunchsite: %s\", err)\n\t\trespMessage = \"Unable to get lunchlist\"\n\t}\n\n\tdoc.Find(\".lunchlist-day\").Each(func(i int, s *goquery.Selection) {\n\t\tday := s.Children().First().Text()\n\t\tday = strings.TrimSpace(day)\n\t\tif checkWeekday(day,message) || len(message) == 0 {\n\n\t\t\tdescription := s.Find(\".description\").Text()\n\t\t\tdescription = strings.TrimSpace(description)\n\n\t\t\trespMessage = respMessage + day+\"\\n\\n\"+ description +\"\\n\"\n\t\t\trespMessage = respMessage + \"\\n-----------------\\n\"\n\t\t}\n\t})\n\n\tif len(respMessage) == 0 {\n\t\trespMessage = \"Unable to get lunchlist with given arguments. Please provide weekday in finnish, or no message for the whole week's menu\"\n\t}\n\n\tresp.Body.Close()\n\n\treturn &slashResponse{\n\t\tResponseType: \"ephemeral\",\n\t\tText: respMessage,\n\t}, false\n}", "func readResponse(p packetType) (response responseType, err error) {\n\t// The calls to bencode.Unmarshal() can be fragile.\n\tdefer func() {\n\t\tif x := recover(); x != nil {\n\t\t\tlogger.Infof(\"DHT: !!! Recovering from panic() after bencode.Unmarshal %q, %v\", string(p.b), x)\n\t\t}\n\t}()\n\tif e2 := bencode.Unmarshal(bytes.NewBuffer(p.b), &response); e2 == nil {\n\t\terr = nil\n\t\treturn\n\t} else {\n\t\tlogger.Infof(\"DHT: unmarshal error, odd or partial data during UDP read? %v, err=%s\", string(p.b), e2)\n\t\treturn response, e2\n\t}\n\treturn\n}", "func (fe FoodEstablishment) AsResponse() (*Response, bool) {\n\treturn nil, false\n}", "func (i Identifiable) AsResponse() (*Response, bool) {\n\treturn nil, false\n}", "func Response(s *discordgo.Session, m *discordgo.MessageCreate) (bool, string) {\n d := false\n\n for i := 0; i < len(rfs) && !d; i++ {\n if d, r := rfs[i](m.Content, m.Author.ID); d {\n return true, r\n }\n }\n\n return false, \"\"\n}", "func IsResponse(msg *Message) bool {\n\tswitch msg.Head.Type {\n\tcase AckChallenge, AckAuth, AckLogout, AckInfo:\n\t\treturn true\n\t}\n\treturn false\n}", "func (bdr BlobsDeleteResponse) Response() *http.Response {\n\treturn bdr.rawResponse\n}", "func (client *WebAppsClient) getDomainOwnershipIdentifierHandleResponse(resp *http.Response) (WebAppsGetDomainOwnershipIdentifierResponse, error) {\n\tresult := WebAppsGetDomainOwnershipIdentifierResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.Identifier); err != nil {\n\t\treturn WebAppsGetDomainOwnershipIdentifierResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func ParseDeleteaspecificVoucherResponse(rsp *http.Response) (*DeleteaspecificVoucherResponse, error) {\n\tbodyBytes, err := ioutil.ReadAll(rsp.Body)\n\tdefer rsp.Body.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresponse := &DeleteaspecificVoucherResponse{\n\t\tBody: bodyBytes,\n\t\tHTTPResponse: rsp,\n\t}\n\n\tswitch {\n\t}\n\n\treturn response, nil\n}" ]
[ "0.61948586", "0.60625297", "0.6035303", "0.5975617", "0.59202296", "0.5916416", "0.5899773", "0.5872947", "0.585373", "0.5781684", "0.576798", "0.5761874", "0.5718958", "0.5677509", "0.56607926", "0.5654915", "0.55763286", "0.5567104", "0.55537856", "0.55441964", "0.55430424", "0.5522067", "0.55193794", "0.55122906", "0.5502205", "0.5484503", "0.54831827", "0.5476989", "0.5472563", "0.5461902", "0.546063", "0.546063", "0.54472095", "0.54379576", "0.5416784", "0.541568", "0.541568", "0.541568", "0.5411813", "0.54081094", "0.54063475", "0.5370848", "0.53524315", "0.53075653", "0.5278603", "0.5277314", "0.52694696", "0.5260062", "0.5257065", "0.5250315", "0.52376467", "0.5225642", "0.51973784", "0.519436", "0.51896626", "0.51882863", "0.51721215", "0.5172099", "0.51681006", "0.51662076", "0.51590765", "0.51559716", "0.51539654", "0.51538", "0.51461244", "0.5132749", "0.5126083", "0.50856864", "0.50745624", "0.5067655", "0.50660217", "0.50630397", "0.50601655", "0.5058409", "0.5055734", "0.5050545", "0.5040586", "0.5038999", "0.5032723", "0.5031268", "0.5030761", "0.502809", "0.5008927", "0.50027424", "0.49974898", "0.49885798", "0.49883485", "0.49869314", "0.49840268", "0.49788415", "0.4978223", "0.49753904", "0.49752793", "0.49728408", "0.497156", "0.49713552", "0.49706143", "0.49633127", "0.49618497", "0.4961385" ]
0.62793565
0
Simulate an empty response. (This is not a compliant server behavior.)
func TestEmptyResponse(t *testing.T) { doh, _ := NewTransport(testURL, ips, nil, nil, nil) transport := doh.(*transport) rt := makeTestRoundTripper() transport.client.Transport = rt // Fake server. go func() { <-rt.req // Make an empty body. r, w := io.Pipe() w.Close() rt.resp <- &http.Response{ StatusCode: 200, Body: r, Request: &http.Request{URL: parsedURL}, } }() _, err := doh.Query(simpleQueryBytes) var qerr *queryError if err == nil { t.Error("Empty body should cause an error") } else if !errors.As(err, &qerr) { t.Errorf("Wrong error type: %v", err) } else if qerr.status != BadResponse { t.Errorf("Wrong error status: %d", qerr.status) } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func CreateEmptyResponse(w http.ResponseWriter) {\n\tw.WriteHeader(http.StatusNoContent)\n}", "func (r *Responder) NoContent() { r.write(http.StatusNoContent) }", "func RespondEmpty(w http.ResponseWriter, code int) {\n\tw.Header().Set(\"X-XSS-Protection\", \"1; mode=block\")\n\tw.Header().Set(\"X-Content-Type-Options\", \"nosniff\")\n\tw.Header().Set(\"X-Frame-Options\", \"DENY\")\n\tw.WriteHeader(code)\n\tw.Write(nil)\n}", "func noop(w http.ResponseWriter, r *http.Request) {\n\tw.WriteHeader(http.StatusOK)\n\tw.Write([]byte(\"not yet implemented\"))\n}", "func newEmptyResponse() *Response {\n\treturn &Response{\n\t\tBody: &HTTPResponse{},\n\t\tError: &HTTPResponse{},\n\t}\n}", "func TestEmptyReply(t *testing.T) {\n\t// Initialize webwire server given only the request\n\tserver := setupServer(\n\t\tt,\n\t\t&serverImpl{\n\t\t\tonRequest: func(\n\t\t\t\t_ context.Context,\n\t\t\t\t_ wwr.Connection,\n\t\t\t\t_ wwr.Message,\n\t\t\t) (wwr.Payload, error) {\n\t\t\t\t// Return empty reply\n\t\t\t\treturn nil, nil\n\t\t\t},\n\t\t},\n\t\twwr.ServerOptions{},\n\t)\n\n\t// Initialize client\n\tclient := newCallbackPoweredClient(\n\t\tserver.Addr().String(),\n\t\twebwireClient.Options{\n\t\t\tDefaultRequestTimeout: 2 * time.Second,\n\t\t},\n\t\tcallbackPoweredClientHooks{},\n\t)\n\n\tif err := client.connection.Connect(); err != nil {\n\t\tt.Fatalf(\"Couldn't connect: %s\", err)\n\t}\n\n\t// Send request and await reply\n\treply, err := client.connection.Request(\n\t\tcontext.Background(),\n\t\t\"\",\n\t\twwr.NewPayload(wwr.EncodingBinary, []byte(\"test\")),\n\t)\n\tif err != nil {\n\t\tt.Fatalf(\"Request failed: %s\", err)\n\t}\n\n\t// Verify reply is empty\n\treplyEncoding := reply.Encoding()\n\tif replyEncoding != wwr.EncodingBinary {\n\t\tt.Fatalf(\n\t\t\t\"Expected empty binary reply, but encoding was: %s\",\n\t\t\treplyEncoding.String(),\n\t\t)\n\t}\n\treplyData := reply.Data()\n\tif len(replyData) > 0 {\n\t\tt.Fatalf(\"Expected empty binary reply, but payload was: %v\", replyData)\n\t}\n}", "func emptyHandler(w http.ResponseWriter, req *http.Request) {}", "func NoContent() Response {\n\treturn Response{\n\t\tStatusCode: http.StatusNoContent,\n\t}\n}", "func NoContent(logging ...interface{}) Response {\n\treturn Response{\n\t\tStatus: http.StatusNoContent,\n\t\tData: Bytes(nil),\n\t\tLogging: logging,\n\t}\n}", "func (r Response) NoContent(code string, payload Payload, header ...ResponseHeader) {\n\tr.Response(code, http.NoContent, payload, header...)\n}", "func writeSuccessNoContent(w http.ResponseWriter) {\n\twriteResponse(w, http.StatusNoContent, nil, mimeNone)\n}", "func doNothing(w http.ResponseWriter, r *http.Request) {}", "func Null() Decorator {\n\treturn func(c Client) Client {\n\t\treturn ClientFunc(func(*http.Request) (*http.Response, error) {\n\t\t\treturn &http.Response{\n\t\t\t\tStatus: http.StatusText(http.StatusNoContent),\n\t\t\t\tStatusCode: http.StatusNoContent,\n\t\t\t\tProto: \"HTTP/1.1\",\n\t\t\t\tProtoMajor: 1,\n\t\t\t\tProtoMinor: 1,\n\t\t\t\tHeader: make(map[string][]string),\n\t\t\t\tContentLength: 0,\n\t\t\t}, nil\n\t\t})\n\t}\n}", "func dummyServerResponse(w http.ResponseWriter, r *http.Request) {\n\tw.Write(dummyBytes())\n}", "func TestServeMuxHandleNoResponse(t *testing.T) {\n\tmux := dhcp6server.NewServeMux()\n\n\tr, err := dhcp6server.ParseRequest([]byte{1, 1, 2, 3}, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tw := dhcp6test.NewRecorder(r.TransactionID)\n\tmux.ServeDHCP(w, r)\n\n\tif mt := w.MessageType; mt != dhcp6.MessageType(0) {\n\t\tt.Fatalf(\"reply packet empty, but got message type: %v\", mt)\n\t}\n\tif l := len(w.Options()); l > 0 {\n\t\tt.Fatalf(\"reply packet empty, but got %d options\", l)\n\t}\n}", "func ReturnBlank(w http.ResponseWriter) {\n\tw.Header().Set(\"Content-Type\", \"text/html; charset=utf-8\")\n\tfmt.Fprint(w, \"\")\n}", "func (resp *NilResponse) Send() error {\n\treturn nil\n}", "func TestRequestEmpty(t *testing.T) {\n\t// Initialize server\n\tserver := setupServer(\n\t\tt,\n\t\t&serverImpl{\n\t\t\tonRequest: func(\n\t\t\t\t_ context.Context,\n\t\t\t\t_ webwire.Connection,\n\t\t\t\tmsg webwire.Message,\n\t\t\t) (webwire.Payload, error) {\n\t\t\t\t// Expect the following request to not even arrive\n\t\t\t\tt.Error(\"Not expected but reached\")\n\t\t\t\treturn nil, nil\n\t\t\t},\n\t\t},\n\t\twebwire.ServerOptions{},\n\t)\n\n\t// Initialize client\n\tclient := newCallbackPoweredClient(\n\t\tserver.Addr().String(),\n\t\twebwireClient.Options{\n\t\t\tDefaultRequestTimeout: 2 * time.Second,\n\t\t},\n\t\tcallbackPoweredClientHooks{},\n\t)\n\n\t// Send request without a name and without a payload.\n\t// Expect a protocol error in return not sending the invalid request off\n\t_, err := client.connection.Request(context.Background(), \"\", nil)\n\tif _, isProtoErr := err.(webwire.ProtocolErr); !isProtoErr {\n\t\tt.Fatalf(\"Expected a protocol error, got: %v\", err)\n\t}\n}", "func nocontent(out http.ResponseWriter) {\n\tout.WriteHeader(http.StatusNoContent)\n}", "func (r *Responder) Gone() { r.write(http.StatusGone) }", "func doNothing(error, http.ResponseWriter, *http.Request) {}", "func (r *Response) NoContent() *Response {\n\topChain := r.chain.enter(\"NoContent()\")\n\tdefer opChain.leave()\n\n\tif opChain.failed() {\n\t\treturn r\n\t}\n\n\tcontentType := r.httpResp.Header.Get(\"Content-Type\")\n\tif !r.checkEqual(opChain, `\"Content-Type\" header`, \"\", contentType) {\n\t\treturn r\n\t}\n\n\tcontent, ok := r.getContent(opChain)\n\tif !ok {\n\t\treturn r\n\t}\n\tif !r.checkEqual(opChain, \"body\", \"\", string(content)) {\n\t\treturn r\n\t}\n\n\treturn r\n}", "func NoContent(w http.ResponseWriter, r *http.Request) {\n\tw.WriteHeader(http.StatusNoContent)\n}", "func (r Response) Gone(code string, payload Payload, header ...ResponseHeader) {\n\tr.Response(code, http.Gone, payload, header...)\n}", "func (s *session) handleNOOP(args []string) error {\n\treturn s.respondOK(\"doing nothing\")\n}", "func EmptySyncResponse() Response {\n\treturn &syncResponse{\n\t\tsuccess: true,\n\t\tmetadata: make(map[string]interface{}),\n\t}\n}", "func NoHandler(payload []byte) ([]byte, error) {\n\treturn []byte(`{\"status\":{\"code\":503,\"status\":\"Not Implemented\"}}`), nil\n}", "func (h *Hijack) MustLoadResponse() {\n\th.browser.e(h.LoadResponse(http.DefaultClient, true))\n}", "func (ctx *AcceptOfferContext) NoContent() error {\n\tctx.ResponseData.WriteHeader(204)\n\treturn nil\n}", "func NoContent(w http.ResponseWriter) {\n\t// No custom handler since there's no custom behavior.\n\tw.Header().Del(\"Content-Type\")\n\tw.WriteHeader(http.StatusNoContent)\n}", "func NoContent() *Responder {\n\treturn &Responder{\n\t\thttp.StatusNoContent,\n\t\tnil,\n\t\tmake(http.Header),\n\t}\n}", "func (c *Context) Empty(status int) error {\n\tc.W.WriteHeader(status)\n\treturn nil\n}", "func (q *Query) ReturnNILResponse() []byte {\n\tq.queryResponse.TimeInvolved = time.Since(q.stamp)\n\tdata, _ := encode(q.queryResponse, q.encoding).([]byte)\n\treturn data\n}", "func (r *Reply) NoContent() *Reply {\n\treturn r.Status(http.StatusNoContent)\n}", "func NoContent(w http.ResponseWriter, r *http.Request) {\n\trender.NoContent(w, r)\n}", "func (ctx *Context) NoContent(code int) error {\n\tctx.ResponseWriter.WriteHeader(code)\n\treturn nil\n}", "func (w responseWriterNoBody) Write(data []byte) (int, error) {\n\treturn 0, nil\n}", "func (client HTTPSuccessClient) Head204Responder(resp *http.Response) (result autorest.Response, err error) { \n err = autorest.Respond(\n resp,\n client.ByInspecting(),\n azure.WithErrorUnlessStatusCode(http.StatusOK,http.StatusNoContent),\n autorest.ByClosing())\n result.Response = resp\n return\n}", "func (e EmptyHandler) Handle(q midl.Request, s midl.Response) []byte {\n\treturn e.HandleFunc(q, s)\n}", "func DecodeEmpty(response *Message) (err error) {\n\tmtype, _ := response.getHeader()\n\n\tif mtype == bindings.ResponseFailure {\n\t\te := ErrRequest{}\n\t\te.Code = response.getUint64()\n\t\te.Description = response.getString()\n err = e\n return\n\t}\n\n\tif mtype != bindings.ResponseEmpty {\n\t\terr = fmt.Errorf(\"unexpected response type %d\", mtype)\n return\n\t}\n\n\tresponse.getUint64()\n\n\treturn\n}", "func (client StringClient) PutEmptyResponder(resp *http.Response) (result autorest.Response, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByClosing())\n\tresult.Response = resp\n\treturn\n}", "func (client StringClient) GetEmptyResponder(resp *http.Response) (result StringModel, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result.Value),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (c *Client) Noop() error {\n\tif err := c.hello(); err != nil {\n\t\treturn err\n\t}\n\t_, _, err := c.cmd(250, \"NOOP\")\n\treturn err\n}", "func (u *Updater) HandleNoResponse(requestID []byte) {\n\tidStr := encodeToString(requestID)\n\tu.RLock()\n\ta, found := u.waiting[idStr]\n\tu.RUnlock()\n\tif !found {\n\t\treturn\n\t}\n\tu.Lock()\n\tdelete(u.waiting, idStr)\n\tu.Unlock()\n\tu.network.RemoveNodeID(a.NodeID, true)\n\tu.queueIdx(a.idx)\n}", "func (r *Response) Reset(w http.ResponseWriter) {\n\t*r = Response{ResponseWriter: w, Status: http.StatusOK}\n}", "func TestMiddlewareWithEmpty(t *testing.T) {\n\thandler := WithEmpty(nil, http.HandlerFunc(testHandler))\n\n\treq := httptest.NewRequest(http.MethodGet, \"/api/v1/\", nil)\n\tw := httptest.NewRecorder()\n\n\thandler.ServeHTTP(w, req)\n\n\t// Check the status code is what we expect.\n\tif status := w.Code; status != http.StatusOK {\n\t\tt.Errorf(\"handler returned wrong status code: got %v want %v\", status, http.StatusOK)\n\t}\n\t// Check the response body is what we expect.\n\texpected := `{\"alive\": true}`\n\tif w.Body.String() != expected {\n\t\tt.Errorf(\"handler returned unexpected body: got %v want %v\", w.Body.String(), expected)\n\t}\n\n}", "func (ctx *DeleteOutputContext) NoContent() error {\n\tctx.ResponseData.WriteHeader(204)\n\treturn nil\n}", "func (r Response) NoAuthoritativeInformation(code string, payload Payload, header ...ResponseHeader) {\n\tr.Response(code, http.NonAuthoritativeInformation, payload, header...)\n}", "func OptionNoneResponse(resp IOutProtocol) RouterOptionFunc {\n\treturn func(r *Router) {\n\t\tr.noneResp = resp\n\t}\n}", "func (o *DestroyOneNoContent) WriteResponse(rw http.ResponseWriter, producer httpkit.Producer) {\n\n\trw.WriteHeader(204)\n}", "func (r *Responder) NotImplemented() { r.write(http.StatusNotImplemented) }", "func (er *EmptyResponse) AlwaysNil() *string {\n\treturn nil\n}", "func (c *SeaterController) NoContent(code ...int) {\n\tif len(code) > 0 {\n\t\tc.Code(code[0])\n\t} else {\n\t\tc.Code(204)\n\t}\n\tc.Ctx.Output.Body([]byte(\"\"))\n}", "func (rc *Ctx) NoContent() NoContentResult {\n\treturn NoContent\n}", "func (client HTTPSuccessClient) Put204Responder(resp *http.Response) (result autorest.Response, err error) { \n err = autorest.Respond(\n resp,\n client.ByInspecting(),\n azure.WithErrorUnlessStatusCode(http.StatusOK,http.StatusNoContent),\n autorest.ByClosing())\n result.Response = resp\n return\n}", "func NewNilResponse(response Responsor) *NilResponse {\n\treturn &NilResponse{response: response}\n}", "func (o *BackFlipDroneNoContent) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(204)\n}", "func (o *GetOrgsOrgMembersUsernameNoContent) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(204)\n}", "func (o *ThingsDeleteNoContent) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(204)\n}", "func (r *Responder) NotExtended() { r.write(http.StatusNotExtended) }", "func (c *Client) Noop() (err error) {\n\tif _, err = c.Cmd(\"%s\\r\\n\", NOOP); err != nil {\n\t\treturn\n\t}\n\treturn\n}", "func (o *RemoveOneNoContent) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(204)\n}", "func (c *Client) Noop() (err error) {\n\ttag, err := c.prepareCmd(\"NOOP\")\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer c.cleanCmd()\n\n\terr = c.writeString(tag + \" NOOP\\r\\n\")\n\tif err != nil {\n\t\treturn\n\t}\n\n\trep := <-c.rep\n\tif rep == nil {\n\t\terr = ErrNilRep\n\t\treturn\n\t}\n\tif rep.err != nil {\n\t\terr = rep.err\n\t\treturn\n\t}\n\treturn\n}", "func (r *Responder) ResetContent() { r.write(http.StatusResetContent) }", "func DefaultResponse() *Response {\n\tresponse := &Response{}\n\tresponse.Headers.ContentType = \"text/html\"\n\tresponse.StatusCode = http.StatusBadRequest\n\tresponse.StatusDescription = http.StatusText(http.StatusBadRequest)\n\treturn response\n}", "func (o *ShipPackageNoContent) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(204)\n}", "func (m *MockServer) VerifyNoMoreRequests(c *C) {\n\tunsatisified := []*MockResponse{}\n\tfor _, r := range m.Responses {\n\t\tif !r.satisfied && !r.Persistant {\n\t\t\tpretty.Println(\"Unsatisfied response:\", r)\n\t\t\tunsatisified = append(unsatisified, r)\n\t\t}\n\t}\n\n\tif len(unsatisified) > 0 {\n\t\tc.Fatal(\"server has unsatisfied responses\")\n\t\tc.Fail()\n\t}\n}", "func RenderGone(w http.ResponseWriter, message ...interface{}) {\n\tRender(w, Gone(message...))\n}", "func (ctx *DeleteFeedContext) NoContent() error {\n\tctx.ResponseData.WriteHeader(204)\n\treturn nil\n}", "func (r *Responder) NotFound() { r.write(http.StatusNotFound) }", "func DefaultResponse() *Response {\n\treturn &Response{}\n}", "func DefaultResponse() *Response {\n\treturn &Response{}\n}", "func (b *ClientAdaptor) WithoutResponses(use bool) { b.withoutResponses = use }", "func TestGetEmptyPayrollReport(t *testing.T) {\n\tresetTables()\n\n\t// create and send request\n\trequest, err := http.NewRequest(\"GET\", \"/report\", nil)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\t// check for http.StatusOK\n\tresponse := sendRequest(request)\n\tcheckStatusCode(t, http.StatusOK, response.Code)\n}", "func (o *DeleteOfferingByIDNoContent) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(204)\n}", "func (c *echoContext) NoContent(d int) error {\n\treturn c.ctx.NoContent(d)\n}", "func (resp *NilResponse) Response() Responsor {\n\treturn resp.response\n}", "func NoOpHTTPStatusHandler(_ context.Context, resp *http.Response) (*http.Response, error) {\n\treturn resp, nil\n}", "func (client MultipleResponsesClient) GetDefaultNone200NoneResponder(resp *http.Response) (result autorest.Response, err error) { \n err = autorest.Respond(\n resp,\n client.ByInspecting(),\n azure.WithErrorUnlessStatusCode(http.StatusOK),\n autorest.ByClosing())\n result.Response = resp\n return\n}", "func (client HTTPSuccessClient) Post204Responder(resp *http.Response) (result autorest.Response, err error) { \n err = autorest.Respond(\n resp,\n client.ByInspecting(),\n azure.WithErrorUnlessStatusCode(http.StatusOK,http.StatusNoContent),\n autorest.ByClosing())\n result.Response = resp\n return\n}", "func (ctx *DeleteFilterContext) NoContent() error {\n\tctx.ResponseData.WriteHeader(204)\n\treturn nil\n}", "func (r TxResponse) Empty() bool {\n\treturn r.TxHash == \"\" && r.Logs == nil\n}", "func (o *DeleteExecutionNoContent) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(204)\n}", "func (o *UpdateOfferingByIDNoContent) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(204)\n}", "func (o *AbortUploadSessionNoContent) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(204)\n}", "func NewHTTPNoContentResponse(message string) Message {\n\treturn &defaultMessage{\n\t\tmessage,\n\t\thttp.StatusNoContent,\n\t\t\"no_content\",\n\t}\n}", "func (o *LogoutUserNoContent) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(204)\n}", "func PlainResponse(contentType string, val interface{}) Response {\n\treturn SimpleResponse(http.StatusOK, contentType, val)\n}", "func (r *Responder) ServiceUnavailable() { r.write(http.StatusServiceUnavailable) }", "func (client HTTPSuccessClient) Patch204Responder(resp *http.Response) (result autorest.Response, err error) { \n err = autorest.Respond(\n resp,\n client.ByInspecting(),\n azure.WithErrorUnlessStatusCode(http.StatusOK,http.StatusNoContent),\n autorest.ByClosing())\n result.Response = resp\n return\n}", "func (s StatusCode) Empty() bool {\n\treturn s == 0\n}", "func (o *DeregisterNodeNoContent) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(204)\n}", "func (o *PutOrgsOrgPublicMembersUsernameNoContent) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(204)\n}", "func (o *V2PostStepReplyNoContent) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(204)\n}", "func (ctx *UpdateCommentContext) NoContent() error {\n\tctx.ResponseData.WriteHeader(204)\n\treturn nil\n}", "func (c *ServerConn) NoOp() error {\n\t_, _, err := c.cmd(StatusCommandOK, \"NOOP\")\n\treturn err\n}", "func respondNotImplemented(w http.ResponseWriter, r *http.Request) {\n\tlog.Printf(\"[request] %s\", r.URL.Path)\n\trespondError(w, \"\", err501)\n}", "func respondNotImplemented(w http.ResponseWriter, r *http.Request) {\n\tlog.Printf(\"[request] %s\", r.URL.Path)\n\trespondError(w, \"\", err501)\n}", "func (t testGRPC) EmptyCall(context.Context, *gt.Empty) (*gt.Empty, error) {\n\treturn new(gt.Empty), nil\n}", "func DefaultResponse() Response {\n\treturn Response{\n\t\tGeneral: response.DefaultGeneral(),\n\t\tWorkers: response.DefaultWorkers(),\n\t}\n}" ]
[ "0.75151366", "0.7469854", "0.7159161", "0.7026811", "0.7014444", "0.69953966", "0.69483393", "0.6848218", "0.6798946", "0.6741318", "0.67343277", "0.67306024", "0.6727121", "0.6715123", "0.66678727", "0.6632295", "0.6557014", "0.6511988", "0.64460313", "0.6439891", "0.64352995", "0.6430173", "0.64267", "0.6410638", "0.64071983", "0.6359716", "0.63539857", "0.63368523", "0.6312081", "0.63023645", "0.6234941", "0.62321043", "0.61945903", "0.6174635", "0.6173499", "0.6138262", "0.61153287", "0.60614204", "0.6056046", "0.60555947", "0.6037688", "0.6036731", "0.60123605", "0.6001821", "0.5992591", "0.5961481", "0.59519124", "0.5934204", "0.59294903", "0.5889639", "0.5879139", "0.5867132", "0.5861577", "0.5856961", "0.58376855", "0.5817906", "0.58128935", "0.5794092", "0.5789573", "0.5773115", "0.5770769", "0.5764157", "0.575724", "0.57443386", "0.5736964", "0.5736324", "0.5730001", "0.57222205", "0.57221025", "0.57135314", "0.57071155", "0.57071155", "0.56918067", "0.5691546", "0.568768", "0.5686089", "0.56854355", "0.56850666", "0.5682753", "0.56667584", "0.56535524", "0.5652593", "0.5651072", "0.5640707", "0.56399775", "0.56396824", "0.563912", "0.56244576", "0.5610046", "0.56060666", "0.5605251", "0.5594965", "0.55825704", "0.55787593", "0.5569731", "0.55664474", "0.5555022", "0.5555022", "0.55528927", "0.55396026" ]
0.7485086
1
Simulate a non200 HTTP response code.
func TestHTTPError(t *testing.T) { doh, _ := NewTransport(testURL, ips, nil, nil, nil) transport := doh.(*transport) rt := makeTestRoundTripper() transport.client.Transport = rt go func() { <-rt.req r, w := io.Pipe() rt.resp <- &http.Response{ StatusCode: 500, Body: r, Request: &http.Request{URL: parsedURL}, } w.Write([]byte{0, 0, 8, 9, 10}) w.Close() }() _, err := doh.Query(simpleQueryBytes) var qerr *queryError if err == nil { t.Error("Empty body should cause an error") } else if !errors.As(err, &qerr) { t.Errorf("Wrong error type: %v", err) } else if qerr.status != HTTPError { t.Errorf("Wrong error status: %d", qerr.status) } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func respondHTTPCodeOnly(w http.ResponseWriter, code int) {\n w.WriteHeader(code)\n}", "func customResponseCode(w http.ResponseWriter, r *http.Request){\n\tw.WriteHeader(501)\n\tfmt.Fprintln(w, \"You have reached an endpoint that does not exist\")\n\n}", "func failIfNotStatusCode(t *testing.T, resp *github.Response, expectedCode int) {\n\n\tif resp.StatusCode != expectedCode {\n\t\tt.Fatalf(\"Expected HTTP status code [%v] but received [%v]\", expectedCode, resp.StatusCode)\n\t}\n\n}", "func anonymizeStatusCode(code int) int {\n\tif http.StatusOK <= code && code < http.StatusBadRequest {\n\t\treturn http.StatusOK\n\t}\n\treturn code\n}", "func (r *Responder) NoContent() { r.write(http.StatusNoContent) }", "func (r *Responder) ServiceUnavailable() { r.write(http.StatusServiceUnavailable) }", "func (e ServiceUnavailable) Code() int { return http.StatusServiceUnavailable }", "func TestServerReturnBadCode(t *testing.T) {\n\ttestServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\tw.Write([]byte(`{}`))\n\t}))\n\t_, err := sendMessage(testServer.URL, \"[email protected]\", \"test\")\n\tif err == nil {\n\t\tt.Fail()\n\t}\n}", "func updateResponseUnavailable(res *http.Response) {\n res.StatusCode = http.StatusServiceUnavailable\n res.Status = http.StatusText(res.StatusCode)\n // Remove any previously set header\n res.Header = nil\n}", "func notFound(resp *ApiResponse, msg string) error {\n resp.StatusCode = http.StatusNotFound\n resp.Message = []byte(msg)\n resp.ErrorMessage = http.StatusText(http.StatusNotFound)\n\n return nil\n}", "func (e Unauthorized) Code() int { return http.StatusUnauthorized }", "func (r *Responder) NotFound() { r.write(http.StatusNotFound) }", "func TestProcessStatusCode(t *testing.T) {\n\tassert := assert.New(t)\n\treq := NewRequest(\"GET\", \"url.com\", *auth)\n\treq.Response = new(http.Response)\n\treq.Response.StatusCode = 404\n\terr := req.ProcessStatusCode()\n\tassert.NotNil(err)\n\treq.Response.StatusCode = 450\n\terr = req.ProcessStatusCode()\n\tassert.NotNil(err)\n\treq.Response.StatusCode = 550\n\terr = req.ProcessStatusCode()\n\tassert.NotNil(err)\n\treq.Response.StatusCode = 200\n\terr = req.ProcessStatusCode()\n\tassert.Nil(err)\n}", "func (r *Responder) ExpectationFailed() { r.write(http.StatusExpectationFailed) }", "func CheckCode(res *http.Response) {\n\tif res.StatusCode != 200 {\n\t\tlog.Fatalf(\"status code err: %d %s\", res.StatusCode, res.Status)\n\t}\n}", "func (r *Responder) NotExtended() { r.write(http.StatusNotExtended) }", "func (c *CountHandler) FailResponse(resp http.ResponseWriter, req *http.Request) {\n\tc.numRequests++\n\tresp.WriteHeader(http.StatusOK)\n\tresp.Write([]byte(\"{\\\"stat\\\": \\\"fail\\\"}\"))\n}", "func TestHttp404(t *testing.T) {\n\tjolokia := genJolokiaClientStub(invalidJSON, 404, Servers, []string{HeapMetric})\n\n\tvar acc testutil.Accumulator\n\terr := acc.GatherError(jolokia.Gather)\n\n\trequire.Error(t, err)\n\trequire.Equal(t, 0, len(acc.Metrics))\n\trequire.Contains(t, err.Error(), \"has status code 404\")\n}", "func testResponse(t *testing.T, e error, expectedCode int) {\n\ttestServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\ttypes.HandleServerError(w, e)\n\t}))\n\tdefer testServer.Close()\n\n\tres, err := http.Get(testServer.URL)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif res.StatusCode != expectedCode {\n\t\tt.Errorf(\"Expected status code %v but got %v\", expectedCode, res.StatusCode)\n\t}\n}", "func (a *api) theResponseCodeShouldBe(statusCode int) error {\n\tif a.resp.StatusCode != statusCode {\n\t\treturn fmt.Errorf(\"Expected %d but was %d\", statusCode, a.resp.StatusCode)\n\t}\n\treturn nil\n}", "func writeSuccessNoContent(w http.ResponseWriter) {\n\twriteResponse(w, http.StatusNoContent, nil, mimeNone)\n}", "func InvalidHTTPServer(statusCode int) *httptest.Server {\n\tserver := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tw.WriteHeader(statusCode)\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t}))\n\treturn server\n}", "func is400(t *testing.T, ts *httptest.Server, path string) []byte {\n\tres, body := checkGet(t, ts, path)\n\tif res.StatusCode != 400 {\n\t\tt.Fatalf(\"Expected status %d, got %d\", 400, res.StatusCode)\n\t}\n\treturn body\n}", "func is200(t *testing.T, ts *httptest.Server, path string) []byte {\n\tres, body := checkGet(t, ts, path)\n\tif res.StatusCode != 200 {\n\t\tt.Fatalf(\"Expected status %d, got %d. Path: %s\", 200, res.StatusCode, path)\n\t}\n\treturn body\n}", "func nocontent(out http.ResponseWriter) {\n\tout.WriteHeader(http.StatusNoContent)\n}", "func HS200t(w http.ResponseWriter, b []byte) {\n\tw.Header().Set(\"Content-Type\", \"text/plain\")\n\tw.Header().Set(\"X-Content-Type-Options\", \"nosniff\")\n\tw.Header().Set(\"Cache-Control\", \"no-cache,no-store\")\n\tw.WriteHeader(http.StatusOK)\n\tw.Write(b)\n}", "func (r *Responder) BadRequest() { r.write(http.StatusBadRequest) }", "func noop(w http.ResponseWriter, r *http.Request) {\n\tw.WriteHeader(http.StatusOK)\n\tw.Write([]byte(\"not yet implemented\"))\n}", "func failResponse(status *commonpb.Status, reason string) {\n\tstatus.ErrorCode = commonpb.ErrorCode_UnexpectedError\n\tstatus.Reason = reason\n}", "func (client HTTPSuccessClient) Patch200Responder(resp *http.Response) (result autorest.Response, err error) { \n err = autorest.Respond(\n resp,\n client.ByInspecting(),\n azure.WithErrorUnlessStatusCode(http.StatusOK),\n autorest.ByClosing())\n result.Response = resp\n return\n}", "func (r Response) NoContent(code string, payload Payload, header ...ResponseHeader) {\n\tr.Response(code, http.NoContent, payload, header...)\n}", "func checkStatusCode(t *testing.T, expected, actual int) {\n\tif expected != actual {\n\t\tt.Errorf(\"Expected response code %d. Got %d\\n\", expected, actual)\n\t}\n}", "func alwaysOk(http.ResponseWriter, *http.Request, int) (int, error) { return 0, nil }", "func unauthorized(resp *ApiResponse, msg string) error {\n resp.StatusCode = http.StatusUnauthorized\n resp.Message = []byte(msg)\n resp.ErrorMessage = msg\n\n return nil\n}", "func Fail(w http.ResponseWriter, code, message string, err error) {\n\tres := &errors.Error{\n\t\tCode: code,\n\t\tMessage: message,\n\t\tDetail: err.Error(),\n\t}\n\tErr, ok := err.(*errors.Error)\n\tif ok {\n\t\tres = Err\n\t}\n\tsc, ok := statusMap()[res.Code]\n\tif !ok {\n\t\tsc = http.StatusInternalServerError\n\t}\n\tw.WriteHeader(sc)\n\tif err = render.Write(w, res); err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tfmt.Fprint(w, \"Error rendering response\")\n\t}\n}", "func fail(res http.ResponseWriter, code int, message string) {\n\tres.WriteHeader(code)\n\tbody, _ := json.Marshal(ErrorResponse{message})\n\tres.Write(body)\n}", "func serviceUnavailable(resp *ApiResponse, msg string) error {\n resp.StatusCode = http.StatusServiceUnavailable\n resp.Message = []byte(msg)\n resp.ErrorMessage = msg\n\n return nil\n}", "func translateNonSuccessStatusCode(code int) error {\n\tswitch code {\n\tcase http.StatusBadRequest:\n\t\treturn ErrBadRequest\n\tcase http.StatusUnauthorized, http.StatusForbidden:\n\t\treturn ErrFailedAuthentication\n\tdefault:\n\t\treturn errNonSuccessResponse\n\t}\n}", "func (e *expectation) WithStatusCode(code int) { e.code = code }", "func failWithRcode(w dns.ResponseWriter, r *dns.Msg, rCode int) {\n m := new(dns.Msg)\n m.SetRcode(r, rCode)\n w.WriteMsg(m)\n m = nil\n}", "func TestPostNonRetriable(t *testing.T) {\n\tstatus := http.StatusBadRequest\n\ttries := 0\n\tts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tw.WriteHeader(status)\n\t\tif tries++; tries > 1 {\n\t\t\tt.Errorf(\"expected client to not retry after receiving status code %d\", status)\n\t\t}\n\t}))\n\n\tdefer ts.Close()\n\n\tc := &APIClient{\n\t\tBaseURL: ts.URL,\n\t\tClient: ts.Client(),\n\t}\n\n\terr := c.PingSuccess(TestUUID, nil)\n\tif err == nil {\n\t\tt.Errorf(\"expected PingSuccess to return non-nil error after non-retriable API response\")\n\t}\n}", "func (r *Responder) NotImplemented() { r.write(http.StatusNotImplemented) }", "func mockTest0103(w http.ResponseWriter, r *http.Request) {\n\tretCode, err := common.GetIntArgFromQuery(r, \"code\")\n\tif err != nil {\n\t\tcommon.ErrHandler(w, err)\n\t\treturn\n\t}\n\tif retCode < http.StatusOK {\n\t\tretCode = http.StatusOK\n\t}\n\n\tb := []byte(\"mockTest0103, mock return error code.\")\n\tw.Header().Set(common.TextContentLength, strconv.Itoa(len(b)))\n\tw.WriteHeader(retCode)\n\tlog.Println(\"mock return error code:\", retCode)\n\n\tif _, err := w.Write(b); err != nil {\n\t\tcommon.ErrHandler(w, err)\n\t}\n}", "func NoOpHTTPStatusHandler(_ context.Context, resp *http.Response) (*http.Response, error) {\n\treturn resp, nil\n}", "func (client HTTPSuccessClient) Head404Responder(resp *http.Response) (result autorest.Response, err error) { \n err = autorest.Respond(\n resp,\n client.ByInspecting(),\n azure.WithErrorUnlessStatusCode(http.StatusOK,http.StatusNoContent,http.StatusNotFound),\n autorest.ByClosing())\n result.Response = resp\n return\n}", "func set_response_status(w http.ResponseWriter, s int) {\n\t// Argument s should be one of the RFC2616 constants defined here:\n\t// https://golang.org/src/net/http/status.go\n\tw.WriteHeader(s)\n}", "func (ctx *Context) NoContent(code int) error {\n\tctx.ResponseWriter.WriteHeader(code)\n\treturn nil\n}", "func raiseForStatus(response *http.Response, module int) (err error) {\n\tif response.StatusCode >= 300 {\n\t\tbody, err := ioutil.ReadAll(response.Body)\n\t\tvar explanation string = \"\"\n\t\tif err != nil {\n\t\t\texplanation = string(body)\n\t\t}\n\n\t\treturn APIError {\n\t\t\tGetGeneralStatusError(response.StatusCode, module),\n\t\t\tresponse.StatusCode,\n\t\t\texplanation,\n\t\t}\n\t}\n\n\treturn err\n}", "func (client HTTPSuccessClient) Post200Responder(resp *http.Response) (result autorest.Response, err error) { \n err = autorest.Respond(\n resp,\n client.ByInspecting(),\n azure.WithErrorUnlessStatusCode(http.StatusOK),\n autorest.ByClosing())\n result.Response = resp\n return\n}", "func SendResponseErr(w http.ResponseWriter, httpStatus int, resp Response) {\n\tif resp.Code == \"0\" {\n\t\tresp.Code = \"-1\"\n\t}\n\n\tjson, _ := json.Marshal(resp)\n\n\tw.WriteHeader(httpStatus)\n\tw.Write(json)\n}", "func TestGetDataFromUrlNon200HttpCode(t *testing.T) {\n\tdefer gock.Off()\n\n\tapiUrl := \"http://example.com\"\n\tapiPath := \"status\"\n\n\tgock.New(apiUrl).\n\t\tGet(apiPath).\n\t\tReply(201).\n\t\tBodyString(\"\")\n\n\t_, err := getDataFromURL(apiUrl+\"/\"+apiPath, ioutil.ReadAll)\n\n\tassert.Error(t, err)\n}", "func (hr *HTTPResponse) AssertStatusCode(t *testing.T, code int) *HTTPResponse {\n\tif hr.Code != code {\n\t\tt.Fatalf(\"expected response code to be %d, but got %d\", code, hr.Code)\n\t}\n\treturn hr\n}", "func (client HTTPSuccessClient) Get200Responder(resp *http.Response) (result Bool, err error) { \n err = autorest.Respond(\n resp,\n client.ByInspecting(),\n azure.WithErrorUnlessStatusCode(http.StatusOK),\n autorest.ByUnmarshallingJSON(&result.Value),\n autorest.ByClosing())\n result.Response = autorest.Response{Response: resp}\n return\n}", "func (r *Responder) SeeOther() { r.write(http.StatusSeeOther) }", "func NotOK(message string) *Responder {\n\treturn &Responder{\n\t\thttp.StatusOK,\n\t\t&models.StatusResponse{Status: false, Message: models.StatusMessage(message)},\n\t\tmake(http.Header),\n\t}\n}", "func NoHandler(payload []byte) ([]byte, error) {\n\treturn []byte(`{\"status\":{\"code\":503,\"status\":\"Not Implemented\"}}`), nil\n}", "func (o *GetRunDownstreamLineageNoContent) Code() int {\n\treturn 204\n}", "func FailSimple(code int) APIStatus {\n\treturn APIStatus{success: false, code: code, message: strconv.Itoa(code) + \" \" + http.StatusText(code)}\n}", "func CreateEmptyResponse(w http.ResponseWriter) {\n\tw.WriteHeader(http.StatusNoContent)\n}", "func HS400(w http.ResponseWriter) {\n\tw.Header().Set(\"Content-Type\", \"text/plain\")\n\tw.Header().Set(\"X-Content-Type-Options\", \"nosniff\")\n\tw.Header().Set(\"Cache-Control\", \"no-cache,no-store\")\n\tw.WriteHeader(http.StatusBadRequest)\n\tw.Write([]byte(\"400 Bad Request\"))\n}", "func Test_ErrorStatusCode(t *testing.T) {\n\tstatsCode := service.ErrorStatusCode(service.ErrorCodeNotFound)\n\tassert.Equal(t, statsCode, 404)\n}", "func (client HTTPSuccessClient) Patch204Responder(resp *http.Response) (result autorest.Response, err error) { \n err = autorest.Respond(\n resp,\n client.ByInspecting(),\n azure.WithErrorUnlessStatusCode(http.StatusOK,http.StatusNoContent),\n autorest.ByClosing())\n result.Response = resp\n return\n}", "func isOK(statusCode int) bool {\n\treturn statusCode < minHTTPErrorStatusCode\n}", "func (s statusCode) Unsuccessful() bool { return !s.Successful() }", "func (o *UnclaimTrafficFilterLinkIDOK) Code() int {\n\treturn 200\n}", "func (client MultipleResponsesClient) Get200ModelA200InvalidResponder(resp *http.Response) (result A, err error) { \n err = autorest.Respond(\n resp,\n client.ByInspecting(),\n azure.WithErrorUnlessStatusCode(http.StatusOK),\n autorest.ByUnmarshallingJSON(&result),\n autorest.ByClosing())\n result.Response = autorest.Response{Response: resp}\n return\n}", "func statusTargetWithResponseStatusCode(code int) (statusResponse, int) {\n\tstatusTarget := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tw.WriteHeader(code)\n\t}))\n\tdefer statusTarget.Close()\n\n\tresponse := httptest.NewRecorder()\n\thandler := newStatusHandler(func() (net.Conn, error) {\n\t\tu, _ := url.Parse(statusTarget.URL) // NOTE: I tried using statusTarget.Config.Addr instead, but it wasn't set.\n\t\treturn net.Dial(\"tcp\", fmt.Sprintf(\"%s:%s\", u.Hostname(), u.Port()))\n\t}, statusTarget.URL)\n\n\treq := httptest.NewRequest(http.MethodGet, \"/not-empty\", nil)\n\thandler.Listening() // NOTE: required for non-503 backend response code.\n\thandler.ServeHTTP(response, req)\n\tres := response.Result()\n\tdefer res.Body.Close()\n\n\tdata, _ := ioutil.ReadAll(res.Body)\n\n\tstatusResp := statusResponse{}\n\t_ = json.Unmarshal(data, &statusResp)\n\n\treturn statusResp, res.StatusCode\n}", "func HttpStatus(resp *http.Response) (int, error) {\n\tvar err error\n\tif resp.StatusCode > 399 {\n\t\terr = errors.New(strconv.Itoa(resp.StatusCode) + \" \" + resp.Status)\n\t}\n\treturn resp.StatusCode, err\n}", "func (s statusCode) Successful() bool { return s >= 200 && s < 300 }", "func (r Response) NotFound(code string, payload Payload, header ...ResponseHeader) {\n\tr.Response(code, http.NotFound, payload, header...)\n}", "func (client HTTPSuccessClient) Put204Responder(resp *http.Response) (result autorest.Response, err error) { \n err = autorest.Respond(\n resp,\n client.ByInspecting(),\n azure.WithErrorUnlessStatusCode(http.StatusOK,http.StatusNoContent),\n autorest.ByClosing())\n result.Response = resp\n return\n}", "func respondWithError(w http.ResponseWriter, code int, message string) {\n respondWithJSON(w, code, map[string]string{\"error\": message})\n}", "func (r *Response) NotFound(v interface{}) {\n\tr.writeResponse(http.StatusNotFound, v)\n}", "func NoContent(w http.ResponseWriter, r *http.Request) {\n\tw.WriteHeader(http.StatusNoContent)\n}", "func responseWithError(w http.ResponseWriter, code int, err error) {\n\tresponseWithJSON(w, code, map[string]string{\"error\": err.Error()})\n}", "func do404(response http.ResponseWriter) {\n\thttp.Error(response, \"404! OH NO! This page does not exist!\", 404)\n}", "func (client HTTPSuccessClient) Head204Responder(resp *http.Response) (result autorest.Response, err error) { \n err = autorest.Respond(\n resp,\n client.ByInspecting(),\n azure.WithErrorUnlessStatusCode(http.StatusOK,http.StatusNoContent),\n autorest.ByClosing())\n result.Response = resp\n return\n}", "func (o *UnclaimTrafficFilterLinkIDInternalServerError) Code() int {\n\treturn 500\n}", "func HS400t(w http.ResponseWriter, errmsg string) {\n\tw.Header().Set(\"Content-Type\", \"text/plain\")\n\tw.Header().Set(\"X-Content-Type-Options\", \"nosniff\")\n\tw.Header().Set(\"Cache-Control\", \"no-cache,no-store\")\n\tw.WriteHeader(http.StatusBadRequest)\n\tw.Write([]byte(\"400 Bad Request: \" + errmsg))\n}", "func (o *TransferRunsNoContent) Code() int {\n\treturn 204\n}", "func responseBadGateway(rw http.ResponseWriter) {\n\tresponseString(rw, http.StatusBadGateway, \"bad gateway\")\n}", "func (client HTTPSuccessClient) Head200Responder(resp *http.Response) (result autorest.Response, err error) { \n err = autorest.Respond(\n resp,\n client.ByInspecting(),\n azure.WithErrorUnlessStatusCode(http.StatusOK),\n autorest.ByClosing())\n result.Response = resp\n return\n}", "func (client HTTPSuccessClient) Put200Responder(resp *http.Response) (result autorest.Response, err error) { \n err = autorest.Respond(\n resp,\n client.ByInspecting(),\n azure.WithErrorUnlessStatusCode(http.StatusOK),\n autorest.ByClosing())\n result.Response = resp\n return\n}", "func (o *PatchServiceAccountTokenNoContent) Code() int {\n\treturn 204\n}", "func NotFound(w ResponseWriter, r *Request) {\n\tw.SetHeader(CodeNotFound, \"not found\")\n}", "func (client HTTPSuccessClient) Post204Responder(resp *http.Response) (result autorest.Response, err error) { \n err = autorest.Respond(\n resp,\n client.ByInspecting(),\n azure.WithErrorUnlessStatusCode(http.StatusOK,http.StatusNoContent),\n autorest.ByClosing())\n result.Response = resp\n return\n}", "func failResponseWithCode(status *commonpb.Status, errCode commonpb.ErrorCode, reason string) {\n\tstatus.ErrorCode = errCode\n\tstatus.Reason = reason\n}", "func (bur BlobsUndeleteResponse) StatusCode() int {\n\treturn bur.rawResponse.StatusCode\n}", "func (r *Responder) NotAcceptable() { r.write(http.StatusNotAcceptable) }", "func return404(w http.ResponseWriter) {\n\tw.Header().Set(\"Content-Type\", \"text/html\")\n\tw.Header().Set(\"Server\", \"nginx\") // tell its nginx\n\tw.WriteHeader(http.StatusNotFound) // 404\n\n\t// write default nginx 404 page\n\tfmt.Fprintf(w, defaultnginx404)\n}", "func checkResponseCode(resp http.Response) (respBody []byte, err error) {\n\trespBody, err = ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"failed to read tank response: %d %w\", resp.StatusCode, err)\n\t\treturn\n\t}\n\tif resp.StatusCode != 200 {\n\t\terr = fmt.Errorf(\"%d: %s\", resp.StatusCode, string(respBody))\n\t}\n\treturn\n}", "func TestReturns200IfThereAreNoChecks(t *testing.T) {\n\trecorder := httptest.NewRecorder()\n\n\treq, err := http.NewRequest(\"GET\", \"https://fakeurl.com/debug/health\", nil)\n\tif err != nil {\n\t\tt.Errorf(\"Failed to create request.\")\n\t}\n\n\tStatusHandler(recorder, req)\n\n\tif recorder.Code != 200 {\n\t\tt.Errorf(\"Did not get a 200.\")\n\t}\n}", "func custom404PageHandler(w http.ResponseWriter, r *http.Request, status int) {\n\tw.Header().Set(\"Content-Type\", \"text/html\") // set the content header type\n\tw.WriteHeader(status) // this automatically generates a 404 status code\n\tif reflect.DeepEqual(status, http.StatusNotFound) {\n\t\tdata404Page := \"This page does not exist ... 404!\" // custom error message content\n\t\tio.WriteString(w, data404Page)\n\t}\n}", "func HS200j(w http.ResponseWriter, b []byte) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.Header().Set(\"X-Content-Type-Options\", \"nosniff\")\n\tw.Header().Set(\"Cache-Control\", \"no-cache,no-store\")\n\tw.WriteHeader(http.StatusOK)\n\tw.Write(b)\n}", "func TestGetFailure0(t *testing.T) {\n\tisTesting = true\n\n\tvar params = make(map[string]string)\n\tparams[\"id\"] = \"invalid-id\"\n\n\tvar request = Request{\n\t\tPath: \"/api/devices\",\n\t\tPathParameters: params,\n\t\tHTTPMethod: \"GET\",\n\t}\n\tvar response, _ = Handler(request)\n\n\tif response.StatusCode != 404 {\n\t\tt.Errorf(\"response status code has to be 404 but is %d\", response.StatusCode)\n\t}\n}", "func (client HTTPSuccessClient) Delete200Responder(resp *http.Response) (result autorest.Response, err error) { \n err = autorest.Respond(\n resp,\n client.ByInspecting(),\n azure.WithErrorUnlessStatusCode(http.StatusOK),\n autorest.ByClosing())\n result.Response = resp\n return\n}", "func httpGetRespondsWith200(goTest *testing.T, output infratests.TerraformOutput) {\n\thostname := output[\"app_service_default_hostname\"].(string)\n\tmaxRetries := 20\n\ttimeBetweenRetries := 2 * time.Second\n\texpectedResponse := \"Hello App Service!\"\n\n\terr := httpClient.HttpGetWithRetryWithCustomValidationE(\n\t\tgoTest,\n\t\thostname,\n\t\tmaxRetries,\n\t\ttimeBetweenRetries,\n\t\tfunc(status int, content string) bool {\n\t\t\treturn status == 200 && strings.Contains(content, expectedResponse)\n\t\t},\n\t)\n\tif err != nil {\n\t\tgoTest.Fatal(err)\n\t}\n}", "func NoContent() Response {\n\treturn Response{\n\t\tStatusCode: http.StatusNoContent,\n\t}\n}", "func HandleStatusCode(res *http.Response, err error) (*http.Response, error) {\n\tif err != nil || (res.StatusCode >= 200 && res.StatusCode < 300) {\n\t\treturn res, err\n\t}\n\tvar sb strings.Builder\n\tfmt.Fprintln(&sb, \"http error with status code:\", res.StatusCode)\n\tif err = printPayload(res, &sb); err != nil {\n\t\treturn res, err\n\t}\n\treturn res, errors.New(sb.String())\n}", "func (r *Responder) Unauthorized() { r.write(http.StatusUnauthorized) }", "func doNotFoundTest(t *testing.T, method string, uri string) {\n\tclient := testHttpClient()\n\treq, err := http.NewRequest(method, testServer.URL+uri, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tres, err := client.Do(req)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif res.StatusCode != 404 {\n\t\tt.Errorf(\"%s %s : Expected HTTP Status Code 404, got %d\\n\", method, uri, res.StatusCode)\n\t}\n}" ]
[ "0.7124401", "0.7087085", "0.65703964", "0.64711255", "0.6452569", "0.64233696", "0.641495", "0.6388999", "0.6352772", "0.63128644", "0.6304014", "0.62912434", "0.6290846", "0.62804615", "0.6208764", "0.62053645", "0.6187882", "0.6167296", "0.6165685", "0.6135368", "0.61277133", "0.6121265", "0.6112465", "0.6110579", "0.6102175", "0.60869324", "0.60864854", "0.6083982", "0.6083737", "0.6065298", "0.60647565", "0.60609406", "0.60557723", "0.6051562", "0.6048065", "0.6047516", "0.60084933", "0.60003436", "0.5996815", "0.5993572", "0.59869057", "0.5983134", "0.59739894", "0.59713304", "0.596386", "0.59606856", "0.59604865", "0.5951102", "0.59472823", "0.5946136", "0.5932015", "0.5928322", "0.59195125", "0.59129673", "0.5909774", "0.59063995", "0.5903589", "0.59003174", "0.58991766", "0.5880846", "0.58658636", "0.5860436", "0.58575594", "0.5855649", "0.5851456", "0.584836", "0.5833951", "0.58326846", "0.5829316", "0.5828173", "0.5820712", "0.5819232", "0.58148557", "0.58045626", "0.5797651", "0.5797131", "0.5782425", "0.57808846", "0.57800347", "0.57767427", "0.57757264", "0.5767967", "0.57632995", "0.5762032", "0.57620245", "0.5754301", "0.5751595", "0.5746989", "0.5746971", "0.5741766", "0.5740135", "0.57392", "0.5729979", "0.5729624", "0.5727519", "0.57213765", "0.5720906", "0.5719067", "0.5718296", "0.5713125", "0.57087255" ]
0.0
-1
Simulate an HTTP query error.
func TestSendFailed(t *testing.T) { doh, _ := NewTransport(testURL, ips, nil, nil, nil) transport := doh.(*transport) rt := makeTestRoundTripper() transport.client.Transport = rt rt.err = errors.New("test") _, err := doh.Query(simpleQueryBytes) var qerr *queryError if err == nil { t.Error("Send failure should be reported") } else if !errors.As(err, &qerr) { t.Errorf("Wrong error type: %v", err) } else if qerr.status != SendFailed { t.Errorf("Wrong error status: %d", qerr.status) } else if !errors.Is(qerr, rt.err) { t.Errorf("Underlying error is not retained") } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func TestHTTPError(t *testing.T) {\n\tdoh, _ := NewTransport(testURL, ips, nil, nil, nil)\n\ttransport := doh.(*transport)\n\trt := makeTestRoundTripper()\n\ttransport.client.Transport = rt\n\n\tgo func() {\n\t\t<-rt.req\n\t\tr, w := io.Pipe()\n\t\trt.resp <- &http.Response{\n\t\t\tStatusCode: 500,\n\t\t\tBody: r,\n\t\t\tRequest: &http.Request{URL: parsedURL},\n\t\t}\n\t\tw.Write([]byte{0, 0, 8, 9, 10})\n\t\tw.Close()\n\t}()\n\n\t_, err := doh.Query(simpleQueryBytes)\n\tvar qerr *queryError\n\tif err == nil {\n\t\tt.Error(\"Empty body should cause an error\")\n\t} else if !errors.As(err, &qerr) {\n\t\tt.Errorf(\"Wrong error type: %v\", err)\n\t} else if qerr.status != HTTPError {\n\t\tt.Errorf(\"Wrong error status: %d\", qerr.status)\n\t}\n}", "func queryFailed(t *testing.T, err error) {\n\tt.Fatalf(\"Failed to query tree: %s\\n\", err.Error())\n}", "func TestQueryError(t *testing.T) {\n\tdb, mock := newMock(t)\n\tmock.ExpectQuery(initialQueryRegex()).WillReturnError(errors.New(\"Query failed.\"))\n\n\tevs := LoadAll(context.Background(), db, initialQuery)\n\tsave := <-evs.Saves()\n\tassertMapLength(t, 0, save.Requests)\n\tassertMapLength(t, 0, save.Imps)\n\tassertExpectationsMet(t, mock)\n}", "func (r *Responder) ExpectationFailed() { r.write(http.StatusExpectationFailed) }", "func (r *Responder) ServiceUnavailable() { r.write(http.StatusServiceUnavailable) }", "func TestGraphQL_BadRequest(t *testing.T) {\n\tstack := createNode(t, false, true)\n\tdefer stack.Close()\n\t// start node\n\tif err := stack.Start(); err != nil {\n\t\tt.Fatalf(\"could not start node: %v\", err)\n\t}\n\t// create http request\n\tbody := strings.NewReader(\"{\\\"query\\\": \\\"{bleh{number}}\\\",\\\"variables\\\": null}\")\n\tgqlReq, err := http.NewRequest(http.MethodGet, fmt.Sprintf(\"%s/graphql\", stack.HTTPEndpoint()), body)\n\tif err != nil {\n\t\tt.Fatalf(\"could not post: %v\", err)\n\t}\n\t// read from response\n\tresp := doHTTPRequest(t, gqlReq)\n\tbodyBytes, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tt.Fatalf(\"could not read from response body: %v\", err)\n\t}\n\tassert.Equal(t, \"\", string(bodyBytes)) // TODO: geth1.10.2: check changes\n\tassert.Equal(t, 404, resp.StatusCode)\n}", "func TestGetDataFromUrlError(t *testing.T) {\n\tdefer gock.Off()\n\n\tapiUrl := \"http://example.com\"\n\tapiPath := \"status\"\n\n\tgock.New(apiUrl).\n\t\tGet(apiPath).\n\t\tReply(302).\n\t\tBodyString(\"\")\n\n\t_, err := getDataFromURL(apiUrl+\"/\"+apiPath, ioutil.ReadAll)\n\n\tassert.Error(t, err)\n}", "func Query(err error, switcher int) {\n\tfmt.Fprintf(os.Stderr, \"ERROR: %v\\n\", err)\n\n\tswitch {\n\n\tcase switcher == 0:\n\t\tfmt.Fprintln(os.Stderr, \"Search: \"+queryURL+replaceSpace(err.Error()))\n\n\tcase switcher == 1:\n\t\twebbrowser.Open(queryURL + err.Error())\n\n\tdefault:\n\t\tfmt.Fprintf(os.Stderr, \"UNKNOWN switcher (need 0 or 1): %v\\n\", switcher)\n\t}\n\n\tos.Exit(1)\n}", "func captureFailedQuery(method string, query interface{}, errorResponse interface{}) {\n\tCaptureException(\n\t\tfmt.Errorf(\"daemon responded with an error when calling method %v\", method),\n\t\tmap[string]string{\n\t\t\t\"method\": method,\n\t\t\t\"query\": fmt.Sprintf(\"%v\", query),\n\t\t\t\"response\": fmt.Sprintf(\"%v\", errorResponse),\n\t\t},\n\t)\n}", "func TestHttp404(t *testing.T) {\n\tjolokia := genJolokiaClientStub(invalidJSON, 404, Servers, []string{HeapMetric})\n\n\tvar acc testutil.Accumulator\n\terr := acc.GatherError(jolokia.Gather)\n\n\trequire.Error(t, err)\n\trequire.Equal(t, 0, len(acc.Metrics))\n\trequire.Contains(t, err.Error(), \"has status code 404\")\n}", "func TestBadHTTP(t *testing.T, method, urlStr string, payload io.Reader) {\n\treq, err := http.NewRequest(method, urlStr, payload)\n\tif err != nil {\n\t\t_, fn, line, _ := runtime.Caller(1)\n\t\tt.Fatalf(\"Unsuccessful %s on %q: %v [%s:%d]\\n\", method, urlStr, err, fn, line)\n\t}\n\tw := httptest.NewRecorder()\n\tServeSingleHTTP(w, req)\n\tif w.Code == http.StatusOK {\n\t\t_, fn, line, _ := runtime.Caller(1)\n\t\tt.Fatalf(\"Expected bad server response to %s on %q, got %d instead. [%s:%d]\\n\", method, urlStr, w.Code, fn, line)\n\t}\n}", "func Error(w http.ResponseWriter, code int, msg string, data interface{}) error {\n\tif Log {\n\t\tlog.Printf(\"Error %v: %v\", code, msg)\n\t}\n\n\treturn sendResponse(w, Resp{ERROR, code, msg, data, ErrorHttpCode})\n}", "func Error(w http.ResponseWriter, r *http.Request, err error) {\n\thandler, ok := err.(http.Handler)\n\tif !ok {\n\t\terrCode, ok := err.(ErrorCode)\n\t\tif !ok {\n\t\t\terrCode = errcode.Add(500, err)\n\t\t}\n\t\thandler = errorCodeHandler{\n\t\t\terr: errCode,\n\t\t}\n\t}\n\thandler.ServeHTTP(w, r)\n}", "func TestGetFailure0(t *testing.T) {\n\tisTesting = true\n\n\tvar params = make(map[string]string)\n\tparams[\"id\"] = \"invalid-id\"\n\n\tvar request = Request{\n\t\tPath: \"/api/devices\",\n\t\tPathParameters: params,\n\t\tHTTPMethod: \"GET\",\n\t}\n\tvar response, _ = Handler(request)\n\n\tif response.StatusCode != 404 {\n\t\tt.Errorf(\"response status code has to be 404 but is %d\", response.StatusCode)\n\t}\n}", "func endRequestWithError(w http.ResponseWriter, r *http.Request, statusCode int, err error) {\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), statusCode)\n\t} else {\n\t\thttp.Error(w, \"\", statusCode)\n\t}\n}", "func QueryBadRequest(result interface{}) ([]byte, error) {\n\treturn json.MarshalIndent(QueryResult{\n\t\tStatus: http.StatusBadRequest,\n\t\tResult: codec.MustMarshalJSONIndent(ModuleCdc, result),\n\t}, \"\", \" \")\n}", "func (s *Service) sendError(rsp http.ResponseWriter, req *Request, err error) {\n var m string\n var r int\n var c error\n var h map[string]string\n \n switch v := err.(type) {\n case *Error:\n r = v.Status\n h = v.Headers\n c = v.Cause\n m = fmt.Sprintf(\"%s: [%v] %v\", s.name, req.Id, c)\n if d := formatDetail(c); d != \"\" {\n m += \"\\n\"+ d\n }\n default:\n r = http.StatusInternalServerError\n c = basicError{http.StatusInternalServerError, err.Error()}\n m = fmt.Sprintf(\"%s: [%v] %v\", s.name, req.Id, err)\n }\n \n // propagate non-success, non-client errors; just log others\n if r < 200 || r >= 500 {\n alt.Error(m, nil, nil)\n }else{\n alt.Debug(m)\n }\n if req.Accepts(\"text/html\") {\n s.sendEntity(rsp, req, r, h, htmlError(r, h, c))\n }else{\n s.sendEntity(rsp, req, r, h, c)\n }\n}", "func Error(ctx *fiber.Ctx, msg string, e error, status int) error {\n\treturn response(ctx, status, fiber.Map{\n\t\t\"success\": false,\n\t\t\"message\": msg,\n\t\t\"data\": e,\n\t})\n}", "func (r *Router) Error(w http.ResponseWriter, err error, statusCode int) {\n\thttp.Error(w, err.Error(), statusCode)\n}", "func (r *result) Error(url string) {\n\tr.mux.Lock()\n\tr.Errors[url] = true\n\tr.give(url)\n\tr.mux.Unlock()\n}", "func (r *Responder) BadRequest() { r.write(http.StatusBadRequest) }", "func HTTPQuery(request *http.Request) *http.Response {\n\tclient := &http.Client{\n\t\tTransport: &http.Transport {\n\t\t\t// Support for 'HTTP[S]_PROXY'/'NO_PROXY' envvars\n\t\t\tProxy: http.ProxyFromEnvironment,\n\t\t\t// Support main CLI's 'core.ssl_verify' setting\n\t\t\tTLSClientConfig: buildTLSConfig(),\n\t\t},\n\t}\n\tvar err interface{}\n\tresponse, err := client.Do(request)\n\tswitch err.(type) {\n\tcase *url.Error:\n\t\t// extract wrapped error\n\t\terr = err.(*url.Error).Err\n\t}\n\tif err != nil {\n\t\tswitch err.(type) {\n\t\tcase x509.UnknownAuthorityError:\n\t\t\t// custom suggestions for a certificate error:\n\t\t\tPrintMessage(\"HTTP %s Query for %s failed: %s\", request.Method, request.URL, err)\n\t\t\tPrintMessage(\"- Is the cluster CA certificate configured correctly? Check 'dcos config show core.ssl_verify'.\")\n\t\t\tPrintMessageAndExit(\"- To ignore the unvalidated certificate and force your command (INSECURE), use --force-insecure\")\n\t\tdefault:\n\t\t\tPrintMessage(\"HTTP %s Query for %s failed: %s\", request.Method, request.URL, err)\n\t\t\tPrintMessage(\"- Is 'core.dcos_url' set correctly? Check 'dcos config show core.dcos_url'.\")\n\t\t\tPrintMessage(\"- Is 'core.dcos_acs_token' set correctly? Run 'dcos auth login' to log in.\")\n\t\t\tPrintMessageAndExit(\"- Are any needed proxy settings set correctly via HTTP_PROXY/HTTPS_PROXY/NO_PROXY? Check with your network administrator.\")\n\t\t}\n\t}\n\treturn response\n}", "func internalServerError(rw http.ResponseWriter, r *http.Request) {\n\n}", "func TestGetFailure1(t *testing.T) {\n\tisTesting = true\n\n\tvar params = make(map[string]string)\n\tparams[\"id\"] = \"\"\n\n\tvar request = Request{\n\t\tPath: \"/api/devices\",\n\t\tPathParameters: params,\n\t\tHTTPMethod: \"GET\",\n\t}\n\tvar response, _ = Handler(request)\n\n\tif response.StatusCode != 404 {\n\t\tt.Errorf(\"response status code has to be 404 but is %d\", response.StatusCode)\n\t}\n}", "func sendHttpErr(w http.ResponseWriter, status int) {\n\thttp.Error(w, http.StatusText(status), status)\n}", "func TestGraphQLHTTPOnSamePort_GQLRequest_Unsuccessful(t *testing.T) {\n\tstack := createNode(t, false, false)\n\tdefer stack.Close()\n\tif err := stack.Start(); err != nil {\n\t\tt.Fatalf(\"could not start node: %v\", err)\n\t}\n\tbody := strings.NewReader(`{\"query\": \"{block{number}}\",\"variables\": null}`)\n\tresp, err := http.Post(fmt.Sprintf(\"%s/graphql\", stack.HTTPEndpoint()), \"application/json\", body)\n\tif err != nil {\n\t\tt.Fatalf(\"could not post: %v\", err)\n\t}\n\t// make sure the request is not handled successfully\n\tassert.Equal(t, http.StatusNotFound, resp.StatusCode)\n}", "func handle_conn_err(err error) {\n if err == io.EOF {\n fmt.Println(\"Connection went away\")\n } else if neterr, ok := err.(net.Error); ok && neterr.Timeout() {\n fmt.Println(\"Connection Timeout\")\n } else if operr, ok := err.(*net.OpError); ok {\n if operr.Op == \"dial\" {\n fmt.Println(\"Couldn't reach host\")\n } else if operr.Op == \"read\" {\n fmt.Println(\"Can't read closed connection\")\n } else {\n fmt.Printf(\"Failed to perform op: '%s'\\n\", operr.Op)\n }\n }\n}", "func TestGetDataFromUrlBodyReadError(t *testing.T) {\n\tdefer gock.Off()\n\n\tapiUrl := \"http://example.com\"\n\tapiPath := \"status\"\n\n\tgock.New(apiUrl).\n\t\tGet(apiPath).\n\t\tReply(200).\n\t\tBodyString(\"\")\n\n\t_, err := getDataFromURL(apiUrl+\"/\"+apiPath, func(r io.Reader) ([]byte, error) {\n\t\treturn nil, errors.New(\"IO Reader error occurred\")\n\t})\n\n\tassert.Error(t, err)\n}", "func (une *UnknownNetworkError) NotFound() {}", "func (r *Request) Error() error {\n\treturn r.err\n}", "func (api *API) readError(c echo.Context, err error) *echo.HTTPError {\n\tapi.systemMetrics.Inc(metrics.Error, 1)\n\tgm := c.Get(\"gm\") // DO NOT cast .(metrics.Metrics), only use maybeInc()\n\tswitch v := err.(type) {\n\tcase etre.Error:\n\t\tmaybeInc(metrics.ClientError, 1, gm)\n\t\treturn echo.NewHTTPError(v.HTTPStatus, err)\n\tcase entity.ValidationError:\n\t\tmaybeInc(metrics.ClientError, 1, gm)\n\t\tetreError := etre.Error{\n\t\t\tMessage: v.Err.Error(),\n\t\t\tType: v.Type,\n\t\t\tHTTPStatus: http.StatusBadRequest,\n\t\t}\n\t\treturn echo.NewHTTPError(etreError.HTTPStatus, etreError)\n\tcase auth.Error:\n\t\t// Metric incremented by caller\n\t\tetreError := etre.Error{\n\t\t\tMessage: v.Err.Error(),\n\t\t\tType: v.Type,\n\t\t\tHTTPStatus: v.HTTPStatus,\n\t\t}\n\t\treturn echo.NewHTTPError(etreError.HTTPStatus, etreError)\n\tcase entity.DbError:\n\t\tdbErr := err.(entity.DbError)\n\t\tif dbErr.Err == context.DeadlineExceeded {\n\t\t\tmaybeInc(metrics.QueryTimeout, 1, gm)\n\t\t} else {\n\t\t\tlog.Printf(\"DATABASE ERROR: %v\", dbErr)\n\t\t\tmaybeInc(metrics.DbError, 1, gm)\n\t\t}\n\t\tetreError := etre.Error{\n\t\t\tMessage: dbErr.Error(),\n\t\t\tType: dbErr.Type,\n\t\t\tHTTPStatus: http.StatusServiceUnavailable,\n\t\t\tEntityId: dbErr.EntityId,\n\t\t}\n\t\treturn echo.NewHTTPError(etreError.HTTPStatus, etreError)\n\tdefault:\n\t\tlog.Printf(\"API ERROR: %v\", err)\n\t\tmaybeInc(metrics.APIError, 1, gm)\n\t\treturn echo.NewHTTPError(http.StatusInternalServerError, err)\n\t}\n}", "func TestBadRequest(t *testing.T) {\n\tserver := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tpushError(w, errPaginate, http.StatusBadRequest)\n\t}))\n\tclient := SearchClient{allowedAccessToken, server.URL}\n\tdefer server.Close()\n\n\t_, err := client.FindUsers(SearchRequest{})\n\n\tif err == nil {\n\t\tt.Errorf(\"empty error\")\n\t} else if !strings.Contains(err.Error(), \"unknown bad request error\") {\n\t\tt.Errorf(\"invalid error: %v\", err.Error())\n\t}\n}", "func TestEmptyQueryValue(t *testing.T) {\n\t_, err := metaweather.QueryLocations(\"\")\n\tif err == nil {\n\t\tt.Fatalf(\"error is nil\")\n\t}\n\tif !strings.Contains(err.Error(), \"JSON\") {\n\t\tt.Fatalf(\"error is %v\", err)\n\t}\n}", "func TestBadCalls(t *testing.T) {\n\tserver := NewServer(business_logic.NewUserPostLogic(datasource.NewUserClient(), datasource.NewPostClient()))\n\tserver.Start(\":8181\")\n\tdefer server.Shutdown(context.Background())\n\n\ttests := []struct{\n\t\tname string\n\t\trequest string\n\t\tstatusCode int\n\t}{\n\t\t{\"noID\", \"http://localhost:8181/v1/user-posts/\", 404},\n\t\t{\"noNumberID\", \"http://localhost:8181/v1/user-posts/sdf\", 404},\n\t\t{\"IDNotFoundUpper\", \"http://localhost:8181/v1/user-posts/11\", 404},\n\t\t{\"IDNotFoundLower\", \"http://localhost:8181/v1/user-posts/0\", 404},\n\t}\n\n\tfor _, test := range tests {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\tresponse, err := http.Get(test.request)\n\t\t\tif err != nil {\n\t\t\t\tt.Error(err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif response.StatusCode != test.statusCode {\n\t\t\t\tt.Errorf(\"expected status code 400 instead of %d\", response.StatusCode)\n\t\t\t}\n\t\t})\n\t}\n}", "func TestHangAfterError(t *testing.T) {\n\tconnDB := openConnection(t)\n\tdefer closeConnection(t, connDB)\n\n\trows, err := connDB.QueryContext(ctx, \"SELECT 1\")\n\tdefer rows.Close()\n\n\tassertNoErr(t, err)\n\tassertNext(t, rows)\n\tassertNoNext(t, rows)\n\n\trows, err = connDB.QueryContext(ctx, \"SELECT 1+'abcd'\")\n\tverr, ok := err.(*VError)\n\tif !ok {\n\t\tt.Fatalf(\"failed to extract error VError: %v\", err)\n\t}\n\tassertEqual(t, verr.SQLState, \"22V02\")\n\tassertEqual(t, verr.Severity, \"ERROR\")\n\tassertEqual(t, verr.Routine, \"scanint8\")\n\tassertEqual(t, verr.ErrorCode, \"3681\")\n\tassertErr(t, err, \"Invalid input syntax for integer\")\n\n\trows, err = connDB.QueryContext(ctx, \"SELECT 2\")\n\tdefer rows.Close()\n\n\tassertNoErr(t, err)\n\tassertNext(t, rows)\n\tassertNoNext(t, rows)\n}", "func badrequest(out http.ResponseWriter, format string, args ...interface{}) {\n\tsend(http.StatusBadRequest, out, format, args...)\n}", "func (h *Handler) error(w http.ResponseWriter, error string, code int) {\n\t// TODO: Return error as JSON.\n\thttp.Error(w, error, code)\n}", "func TestFailedEndpoint1(t *testing.T) {\n\tisTesting = true\n\tvar request = Request{\n\t\tPath: \"/api/device\",\n\t\tHTTPMethod: \"GET\",\n\t}\n\tvar response, _ = Handler(request)\n\tif response.StatusCode != 404 {\n\t\tt.Errorf(\"response status code has to be 404 but is %d\", response.StatusCode)\n\t}\n\tif response.Body != `{\"message\":\"requested endpoint not found\"}` {\n\t\tt.Errorf(\"body is: %s\", response.Body)\n\t}\n}", "func (c *QueryClient) queryRequest(nrqlQuery string, queryResult interface{}) (err error) {\n\tvar request *http.Request\n\tvar response *http.Response\n\n\tqueryURL, err := c.generateQueryURL(nrqlQuery)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif queryResult == nil {\n\t\treturn errors.New(\"must have pointer for result\")\n\t}\n\n\trequest, err = http.NewRequest(\"GET\", queryURL, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\trequest.Header.Add(\"Accept\", \"application/json\")\n\trequest.Header.Add(\"X-Query-Key\", c.QueryKey)\n\n\tclient := &http.Client{Timeout: c.RequestTimeout}\n\n\tresponse, err = client.Do(request)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"failed query request for: %v\", err)\n\t\treturn\n\t}\n\tdefer func() {\n\t\trespErr := response.Body.Close()\n\t\tif respErr != nil && err == nil {\n\t\t\terr = respErr // Don't mask previous errors\n\t\t}\n\t}()\n\n\tif response.StatusCode != http.StatusOK {\n\t\terr = fmt.Errorf(\"bad response code: %d\", response.StatusCode)\n\t\treturn\n\t}\n\n\terr = c.parseResponse(response, queryResult)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"failed query: %v\", err)\n\t}\n\n\treturn err\n}", "func TestSendError(t *testing.T) {\n\terr := SendError(\"Send Error\", \"https://status.btfs.io\", \"my peer id\", \"my HValue\")\n\tif err != nil {\n\t\tt.Errorf(\"Send error message to status server failed, reason: %v\", err)\n\t} else {\n\t\tt.Log(\"Send error message to status server successfully!\")\n\t}\n}", "func (r *Responder) PreconditionFailed() { r.write(http.StatusPreconditionFailed) }", "func badRequest(msg string) error {\n\treturn status.Error(codes.InvalidArgument, msg)\n}", "func TestShortQuery(t *testing.T) {\n\tvar qerr *queryError\n\tdoh, _ := NewTransport(testURL, ips, nil, nil, nil)\n\t_, err := doh.Query([]byte{})\n\tif err == nil {\n\t\tt.Error(\"Empty query should fail\")\n\t} else if !errors.As(err, &qerr) {\n\t\tt.Errorf(\"Wrong error type: %v\", err)\n\t} else if qerr.status != BadQuery {\n\t\tt.Errorf(\"Wrong error status: %d\", qerr.status)\n\t}\n\n\t_, err = doh.Query([]byte{1})\n\tif err == nil {\n\t\tt.Error(\"One byte query should fail\")\n\t} else if !errors.As(err, &qerr) {\n\t\tt.Errorf(\"Wrong error type: %v\", err)\n\t} else if qerr.status != BadQuery {\n\t\tt.Errorf(\"Wrong error status: %d\", qerr.status)\n\t}\n}", "func ReplyUnrecognisedCommand() *Reply { return &Reply{500, []string{\"Unrecognised command\"}, nil} }", "func handleError(URL string, resp *http.Response) error {\n\terrBytes, _ := ioutil.ReadAll(resp.Body)\n\terrInfo := string(errBytes)\n\t// Check if we have a JSON representation of the failure, if so decode it.\n\tif resp.Header.Get(\"Content-Type\") == contentTypeJSON {\n\t\terrorResponse, err := unmarshallError(errBytes)\n\t\t//TODO (hduran-8): Obtain a logger and log the error\n\t\tif err == nil {\n\t\t\terrInfo = errorResponse.Error()\n\t\t}\n\t}\n\thttpError := &HttpError{\n\t\tresp.StatusCode, map[string][]string(resp.Header), URL, errInfo,\n\t}\n\tswitch resp.StatusCode {\n\tcase http.StatusNotFound:\n\t\treturn errors.NewNotFoundf(httpError, \"\", \"Resource at %s not found\", URL)\n\tcase http.StatusForbidden, http.StatusUnauthorized:\n\t\treturn errors.NewUnauthorisedf(httpError, \"\", \"Unauthorised URL %s\", URL)\n\tcase http.StatusBadRequest:\n\t\tdupExp, _ := regexp.Compile(\".*already exists.*\")\n\t\tif dupExp.Match(errBytes) {\n\t\t\treturn errors.NewDuplicateValuef(httpError, \"\", string(errBytes))\n\t\t}\n\t}\n\treturn httpError\n}", "func (nt NetworkTypeError) NotFound() {}", "func sendErrorResponse(func_name string, w http.ResponseWriter, http_code int, resp_body string, log_message string) {\n\tutils.Log(fmt.Sprintf(\"%s: %s\", func_name, log_message))\n\tw.WriteHeader(http_code)\n\tw.Write([]byte(resp_body))\n\treturn\n}", "func ERROR(w http.ResponseWriter, statusCode int, err error) {\n\tif err != nil {\n\t\tJSON(w, statusCode, struct {\n\t\t\tError string `json:\"error\"`\n\t\t}{\n\t\t\tError: err.Error(),\n\t\t})\n\t} else {\n\t\tJSON(w, http.StatusBadRequest, nil)\n\t}\n}", "func respondWithError(w http.ResponseWriter, code int, message string) {\n respondWithJSON(w, code, map[string]string{\"error\": message})\n}", "func internalServerError(resp *ApiResponse, msg string) error {\n if msg == \"\" {\n msg = http.StatusText(http.StatusInternalServerError)\n }\n\n resp.StatusCode = http.StatusInternalServerError\n resp.Message = []byte(msg)\n resp.ErrorMessage = msg\n\n return nil\n}", "func writeServiceError(w http.ResponseWriter) {\n\t// TODO log error\n\tw.WriteHeader(http.StatusServiceUnavailable)\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tjson.NewEncoder(w).Encode(response{\"Fail connection on DB.\"})\n}", "func (r *Responder) BadGateway() { r.write(http.StatusBadGateway) }", "func (m *Command) QueryExpect(t *testing.T, index, rawQuery, query string, expected string) {\n\tresp := Do(t, \"POST\", fmt.Sprintf(\"%s/index/%s/query?%s\", m.URL(), index, rawQuery), query)\n\tif resp.StatusCode != gohttp.StatusOK {\n\t\tt.Fatalf(\"invalid status from %s: %d, body=%q\", m.ID(), resp.StatusCode, resp.Body)\n\t}\n\tlast := len(resp.Body) - 1\n\t// Trim trailing newline so we don't need it to be present in the expected data.\n\tif last >= 0 && resp.Body[last] == '\\n' {\n\t\tresp.Body = resp.Body[:last]\n\t}\n\n\tif resp.Body != expected {\n\t\tt.Fatalf(\"node %s, query %q: expected response %s, got %s\", m.ID(), query, expected, resp.Body)\n\t}\n}", "func TestFetchError(t *testing.T) {\n\tmockUCase := new(mocks.Usecase)\n\tlimit := 10\n\toffset := 0\n\tmockUCase.On(\"Fetch\", mock.Anything, limit, offset).Return(nil, models.ErrInternalServerError)\n\n\te := echo.New()\n\treq, err := http.NewRequest(echo.GET, \"/orders?page=1\", strings.NewReader(\"\"))\n\tassert.NoError(t, err)\n\n\trec := httptest.NewRecorder()\n\tc := e.NewContext(req, rec)\n\thandler := orderHttp.OrderHandler{\n\t\tOUsecase: mockUCase,\n\t}\n\terr = handler.FetchOrder(c)\n\trequire.NoError(t, err)\n\n\tassert.Equal(t, http.StatusInternalServerError, rec.Code)\n\tassert.JSONEq(t, `{\"error\": \"internal server error\"}`, rec.Body.String())\n\tmockUCase.AssertExpectations(t)\n}", "func LogFailedQuery(method string, query interface{}, errorResponse interface{}) {\n\tLogger.WithFields(logrus.Fields{\n\t\t\"method\": method,\n\t\t\"query\": query,\n\t\t\"response\": errorResponse,\n\t}).Error(\"daemon responded with an error\")\n\n\tcaptureFailedQuery(method, query, errorResponse)\n}", "func serviceUnavailable(resp *ApiResponse, msg string) error {\n resp.StatusCode = http.StatusServiceUnavailable\n resp.Message = []byte(msg)\n resp.ErrorMessage = msg\n\n return nil\n}", "func logHttpError(str string, rw http.ResponseWriter) {\n\tlog.Printf(\"%s\", str)\n\tvar buffer strings.Builder\n\tbuffer.WriteString(\"<!DOCTYPE html><html><head><title>\")\n\tbuffer.WriteString(str)\n\tbuffer.WriteString(\"</title></head><body>\")\n\tbuffer.WriteString(str)\n\tbuffer.WriteString(\"</body></html>\")\n\trw.WriteHeader(404)\n\trw.Write([]byte(buffer.String()))\n}", "func internalError(err error) *httpRetMsg {\n\tfmt.Fprintf(os.Stderr, \"ERROR: %+v\\n\", err)\n\treturn &httpRetMsg{code: http.StatusInternalServerError}\n}", "func (m *mockSeriesSet) Err() error {\n\treturn m.queryErr\n}", "func (r *Result) Error() string {\n return r.data.Error\n}", "func (c *Connection) Query(q string) ([]byte, error) {\n\tlog.Infof(\"c.Query: %s\", q)\n\treq, err := http.NewRequest(\"GET\", fmt.Sprintf(\"%v%v\", c.BaseURL, q), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Header.Add(\"Accept\", \"application/json\")\n\tresp, err := c.client.Do(req)\n\tif err != nil {\n\t\tfmt.Printf(\" !!!fhir query returned err: %s\\n\", err)\n\t\treturn nil, err\n\t}\n\n\tif resp.StatusCode < 200 || resp.StatusCode > 299 {\n\t\tdefer resp.Body.Close()\n\t\tb, err := ioutil.ReadAll(resp.Body)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Query Error: %v\\n\", err)\n\t\t\treturn nil, err\n\t\t}\n\t\tes := err.Error()\n\t\tif strings.Contains(es, \"timeout\") {\n\t\t\terr = fmt.Errorf(\"408|%v\", err)\n\t\t}\n\n\t\t//fmt.Printf(\"!!!ERROR Response Status Code: %d, Status: %s\\n\", resp.StatusCode, string(b))\n\t\t// err = &FhirError{\n\t\t// \tHttpStatusCode: resp.StatusCode,\n\t\t// \tHttpStatus: resp.Status,\n\t\t// \tMessage: string(b),\n\t\t// }\n\t\terr = fmt.Errorf(\"%d|%s\", resp.StatusCode, string(b))\n\t\treturn nil, err\n\t}\n\t//fmt.Printf(\"Response Status Code: %s, Status: %s\\n\", resp.StatusCode, resp.Status)\n\tdefer resp.Body.Close()\n\treturn ioutil.ReadAll(resp.Body)\n}", "func TestFailedEndpoint0(t *testing.T) {\n\tisTesting = true\n\tvar request = Request{\n\t\tPath: \"/api/devices\",\n\t\tHTTPMethod: \"PUT\",\n\t}\n\tvar response, _ = Handler(request)\n\tif response.StatusCode != 404 {\n\t\tt.Errorf(\"response status code has to be 404 but is %d\", response.StatusCode)\n\t}\n\tif response.Body != `{\"message\":\"requested endpoint not found\"}` {\n\t\tt.Errorf(\"body is: %s\", response.Body)\n\t}\n}", "func (c *requestContext) fail(code int, msg string, args ...any) {\n\tbody := fmt.Sprintf(msg, args...)\n\tlogging.Errorf(c.Context, \"HTTP %d: %s\", code, body)\n\thttp.Error(c.Writer, body, code)\n}", "func errorResponse(conn net.Conn, response string) {\n\tconn.Write(append(data.PackString(\"ERROR\"), data.PackString(response)...))\n}", "func loadQueryFailed(targetName string, queryPath string, err error) TargetStatus {\n\terrs := []error{fmt.Errorf(\"%s: %s: %s\", errorQueryFailedInit, queryPath, err)}\n\treturn TargetStatus{\n\t\tName: targetName,\n\t\tErrors: errs,\n\t\tSteps: nil,\n\t}\n}", "func (e ServiceUnavailable) Code() int { return http.StatusServiceUnavailable }", "func InternalErr(w http.ResponseWriter, desc string) {\n\tsetError(w, desc, http.StatusInternalServerError)\n}", "func (e QueryValidationError) Reason() string { return e.reason }", "func TestIndexHandlerWithErr(t *testing.T) {\n\terrMsg := \"Bad things happened\"\n\th := Handler{\n\t\tdb: MockDatabase{err: errors.New(errMsg)},\n\t}\n\treq, w := newReqParams(\"GET\")\n\n\th.Index(w, req, httprouter.Params{})\n\n\tcases := []struct {\n\t\tlabel, actual, expected interface{}\n\t}{\n\t\t{\"Response code\", w.Code, 500},\n\t\t{\"Response body contains error message\", strings.Contains(w.Body.String(), errMsg), true},\n\t}\n\n\ttestCases(t, cases)\n}", "func TestIndexHandlerWithErr(t *testing.T) {\n\terrMsg := \"Bad things happened\"\n\th := Handler{\n\t\tdb: MockDatabase{err: errors.New(errMsg)},\n\t}\n\treq, w := newReqParams(\"GET\")\n\n\th.Index(w, req, httprouter.Params{})\n\n\tcases := []struct {\n\t\tlabel, actual, expected interface{}\n\t}{\n\t\t{\"Response code\", w.Code, 500},\n\t\t{\"Response body contains error message\", strings.Contains(w.Body.String(), errMsg), true},\n\t}\n\n\ttestCases(t, cases)\n}", "func (e HTTPRequestValidationError) Reason() string { return e.reason }", "func TestEmptyResponse(t *testing.T) {\n\tdoh, _ := NewTransport(testURL, ips, nil, nil, nil)\n\ttransport := doh.(*transport)\n\trt := makeTestRoundTripper()\n\ttransport.client.Transport = rt\n\n\t// Fake server.\n\tgo func() {\n\t\t<-rt.req\n\t\t// Make an empty body.\n\t\tr, w := io.Pipe()\n\t\tw.Close()\n\t\trt.resp <- &http.Response{\n\t\t\tStatusCode: 200,\n\t\t\tBody: r,\n\t\t\tRequest: &http.Request{URL: parsedURL},\n\t\t}\n\t}()\n\n\t_, err := doh.Query(simpleQueryBytes)\n\tvar qerr *queryError\n\tif err == nil {\n\t\tt.Error(\"Empty body should cause an error\")\n\t} else if !errors.As(err, &qerr) {\n\t\tt.Errorf(\"Wrong error type: %v\", err)\n\t} else if qerr.status != BadResponse {\n\t\tt.Errorf(\"Wrong error status: %d\", qerr.status)\n\t}\n}", "func syntaxError(text string) *smtpResponse {\n\treturn response(501, text, telnet.REQUEST)\n}", "func TestAuthInvalidQuery_EmptyMessage(t *testing.T) {\n\trunTest(t, func(s *res.Service) {\n\t\ts.Handle(\"model\", res.Auth(\"method\", func(r res.AuthRequest) {\n\t\t\tr.InvalidQuery(\"\")\n\t\t}))\n\t}, func(s *restest.Session) {\n\t\ts.Auth(\"test.model\", \"method\", nil).\n\t\t\tResponse().\n\t\t\tAssertError(res.ErrInvalidQuery)\n\t})\n}", "func err(w http.ResponseWriter, r *http.Request) {\n\tif r.Method != \"GET\" {\n\t\thttp.NotFound(w, r)\n\t\treturn\n\t}\n\n\tmsg := r.URL.Query().Get(\"msg\")\n\tif msg == \"\" {\n\t\tmsg = \"(no error message)\"\n\t}\n\tif _, err := data.CheckSession(r); err == nil {\n\t\tgenerateHTML(w, msg, []string{\"login.layout\", \"private.navbar\", \"error\"})\n\t} else {\n\t\tgenerateHTML(w, msg, []string{\"login.layout\", \"public.navbar\", \"error\"})\n\t}\n}", "func (e HTTPResponseValidationError) Reason() string { return e.reason }", "func TestBadRequest(t *testing.T) {\n\texpectedCode := http.StatusBadRequest\n\tbody := testEndpoint(t, \"GET\", \"/weather?location=kampala\", expectedCode)\n\n\texpectedBody := `{\"message\":\"No location/date specified\"}`\n\n\tif body != expectedBody {\n\t\tt.Errorf(\"Handler returned wrong body: got %v instead of %v\", body, expectedBody)\n\t}\n}", "func SendError(conn net.Conn,name string, msg string){\n\n\tdataMap:=make(map[string]interface{})\n\tdataMap[\"type\"]=\"error\"\n\tdataMap[\"name\"]=name\n\tdataMap[\"msg\"]=msg\n\tSendJSONData(conn,dataMap)\n\n\n}", "func badRequest(resp *ApiResponse, msg string) error {\n resp.StatusCode = http.StatusBadRequest\n resp.Message = []byte(msg)\n resp.ErrorMessage = msg\n\n return nil\n}", "func badRequestHandler(w http.ResponseWriter, r *http.Request, e error) {\n\tw.WriteHeader(http.StatusBadRequest)\n\tio.WriteString(w, e.Error())\n}", "func AssertFailedGET(t *testing.T, route string, expectedStatus int, expectedMessage string) {\n\tt.Helper()\n\tres, err := http.Get(route)\n\trequire.NoErrorf(t, err, \"Requesting %s\", route)\n\tdefer res.Body.Close()\n\tassert.Equal(t, expectedStatus, res.StatusCode, \"Comparing status with expected value\")\n\tAssertErrorMessage(t, res, expectedMessage)\n}", "func Query(w http.ResponseWriter, r *http.Request, format string) error {\n\n\treturn nil\n}", "func QueryNotFound(result interface{}) ([]byte, error) {\n\treturn json.MarshalIndent(QueryResult{\n\t\tStatus: http.StatusNotFound,\n\t\tResult: codec.MustMarshalJSONIndent(ModuleCdc, result),\n\t}, \"\", \" \")\n}", "func BadRequest(msg string) error {\n\tif msg == \"\" {\n\t\tmsg = \"su solicitud está en un formato incorrecto.\"\n\t}\n\treturn echo.NewHTTPError(http.StatusBadRequest, msg)\n}", "func (c *HawkularClientError) Error() string {\n\treturn fmt.Sprintf(\"Hawkular returned status code %d, error message: %s\", c.Code, c.msg)\n}", "func internalServerError(w http.ResponseWriter, r *http.Request) {\r\n\tw.WriteHeader(http.StatusInternalServerError)\r\n\tw.Write([]byte(\"internal server error\"))\r\n}", "func Error(response *http.Response) error {\n\tresult := &HTTPError{StatusCode: response.StatusCode}\n\tbody, err := ioutil.ReadAll(response.Body)\n\tif err != nil {\n\t\treturn result\n\t}\n\tif err = json.Unmarshal(body, result); err != nil {\n\t\tresult.Message = string(body)\n\t}\n\treturn result\n}", "func (ec *ErrConnection) QueryInt(query string, args ...interface{}) int {\n\tvar i int\n\tif ec.Err != nil {\n\t\treturn i\n\t}\n\ti, err := QueryInt(ec.Conn, query, args...)\n\tif err != nil {\n\t\tec.Err = err\n\t}\n\treturn i\n}", "func ErrMisdirectedRequestf(format string, arguments ...interface{}) *Status {\n\treturn &Status{Code: http.StatusMisdirectedRequest, Text: fmt.Sprintf(format, arguments...)}\n}", "func Error(err error) *JSONResponse {\n\treturn ErrorFromString(err.Error(), 500)\n}", "func ErrQueryFailed() sdk.Error {\n\treturn types.NewError(types.CodeReputationQueryFailed, fmt.Sprintf(\"query reputation store failed\"))\n}", "func isHttpConnError(err error) bool {\n\n\testr := err.Error()\n\treturn strings.Contains(estr, \"broken pipe\") ||\n\t\tstrings.Contains(estr, \"broken connection\") ||\n\t\tstrings.Contains(estr, \"connection reset\")\n}", "func FailCommand(err error) *HTTPResult {\n\treturn &HTTPResult{\n\t\tMessage: \"FAIL\",\n\t\tDetails: &ErrorCmd{Error: err.Error()},\n\t}\n}", "func BadRequest(rw http.ResponseWriter) {\n\tHttpError(rw, \"bad request\", 403)\n}", "func GinErrorMalfunction(c *gin.Context) {\n\terror := &ResponseError{\n\t\tMessage: \"Malfunctioned request.\",\n\t}\n\tc.JSON(http.StatusBadRequest, gin.H{\"error\": error})\n}", "func (router Router) badRequest(w http.ResponseWriter) {\n\tfailed, err := json.Marshal(types.GenericResponse{Response: false})\n\tif err != nil {\n\t\tw.Write([]byte(\"BACKEND ERROR\"))\n\t\treturn\n\t}\n\tw.Write(failed)\n}", "func writeErrorResponse(w http.ResponseWriter, status int, body string) {\n\tw.Header().Set(\"Content-Type\", \"text/plain\")\n\tw.WriteHeader(status)\n\n\t_, _ = fmt.Fprintf(os.Stderr, \"error: %s\", body)\n\tif _, err := w.Write([]byte(body)); err != nil {\n\t\t_, _ = fmt.Fprintf(os.Stderr, \"cannot write to stream: %v\\n\", err)\n\t\treturn\n\t}\n}", "func (*ResponseTimeoutError) Timeout() bool { return true }", "func badServerName(w http.ResponseWriter) {\n\thttp.Error(w, \"missing or invalid servername\", 403) // intentionally vague\n}", "func (a errorServer) ServeHTTP(w http.ResponseWriter, _ *http.Request) {\n\thttp.Error(w, \"random error\", http.StatusInternalServerError)\n}" ]
[ "0.71777856", "0.62353325", "0.6109641", "0.6030236", "0.59038633", "0.57996684", "0.5759412", "0.5752646", "0.57412255", "0.5717031", "0.56211805", "0.557163", "0.5566794", "0.55465424", "0.5541418", "0.5538802", "0.55216664", "0.55215216", "0.5515991", "0.5514905", "0.5513362", "0.55092233", "0.5499094", "0.54717714", "0.54707056", "0.54577464", "0.54328567", "0.5424132", "0.5414445", "0.5404993", "0.54006773", "0.5396749", "0.5383523", "0.53787565", "0.5376403", "0.5365398", "0.5337253", "0.53298116", "0.5323635", "0.53195864", "0.5310097", "0.52832705", "0.5281461", "0.5257569", "0.523916", "0.523196", "0.5221987", "0.5210607", "0.52086806", "0.5207763", "0.5199942", "0.51985294", "0.5197166", "0.5189549", "0.51810163", "0.5173948", "0.51616585", "0.51584435", "0.5157753", "0.515614", "0.5155882", "0.5152684", "0.5148519", "0.5139105", "0.51374394", "0.5137156", "0.51353675", "0.51333696", "0.5127999", "0.5127999", "0.51222163", "0.5113492", "0.51118565", "0.51118404", "0.5106638", "0.5098212", "0.50850654", "0.50850505", "0.5084098", "0.5083229", "0.5079552", "0.50677633", "0.5063689", "0.5063249", "0.50629944", "0.5062249", "0.5060678", "0.50582004", "0.50576323", "0.5055999", "0.5050014", "0.5049984", "0.5048864", "0.5040229", "0.5039212", "0.50372046", "0.50183946", "0.50171614", "0.501451", "0.5007518" ]
0.5748074
8
Test if DoH resolver IPs are confirmed and disconfirmed when queries suceeded and fail, respectively.
func TestDohIPConfirmDisconfirm(t *testing.T) { u, _ := url.Parse(testURL) doh, _ := NewTransport(testURL, ips, nil, nil, nil) transport := doh.(*transport) hostname := u.Hostname() ipmap := transport.ips.Get(hostname) // send a valid request to first have confirmed-ip set res, _ := doh.Query(simpleQueryBytes) mustUnpack(res) ip1 := ipmap.Confirmed() if ip1 == nil { t.Errorf("IP not confirmed despite valid query to %s", u) } // simulate http-fail with doh server-ip set to previously confirmed-ip rt := makeTestRoundTripper() transport.client.Transport = rt go func() { req := <-rt.req trace := httptrace.ContextClientTrace(req.Context()) trace.GotConn(httptrace.GotConnInfo{ Conn: &fakeConn{ remoteAddr: &net.TCPAddr{ IP: ip1, // confirmed-ip from before Port: 443, }}}) rt.resp <- &http.Response{ StatusCode: 509, // some non-2xx status Body: nil, Request: &http.Request{URL: u}, } }() doh.Query(simpleQueryBytes) ip2 := ipmap.Confirmed() if ip2 != nil { t.Errorf("IP confirmed (%s) despite err", ip2) } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func TestFlaggingDecommissionedHosts(t *testing.T) {\n\tConvey(\"When flagging decommissioned hosts\", t, func() {\n\n\t\tConvey(\"only hosts in the database who are marked decommissioned\"+\n\t\t\t\" should be returned\", func() {\n\n\t\t\t// reset the db\n\t\t\trequire.NoError(t, db.ClearCollections(host.Collection), \"error clearing hosts collection\")\n\n\t\t\t// insert hosts with different statuses\n\n\t\t\thost1 := &host.Host{\n\t\t\t\tProvider: evergreen.ProviderNameMock,\n\t\t\t\tId: \"h1\",\n\t\t\t\tStatus: evergreen.HostRunning,\n\t\t\t}\n\t\t\trequire.NoError(t, host1.Insert(), \"error inserting host\")\n\n\t\t\thost2 := &host.Host{\n\t\t\t\tProvider: evergreen.ProviderNameMock,\n\t\t\t\tId: \"h2\",\n\t\t\t\tStatus: evergreen.HostTerminated,\n\t\t\t}\n\t\t\trequire.NoError(t, host2.Insert(), \"error inserting host\")\n\n\t\t\thost3 := &host.Host{\n\t\t\t\tProvider: evergreen.ProviderNameMock,\n\t\t\t\tId: \"h3\",\n\t\t\t\tStatus: evergreen.HostDecommissioned,\n\t\t\t}\n\t\t\trequire.NoError(t, host3.Insert(), \"error inserting host\")\n\n\t\t\thost4 := &host.Host{\n\t\t\t\tProvider: evergreen.ProviderNameMock,\n\t\t\t\tId: \"h4\",\n\t\t\t\tStatus: evergreen.HostDecommissioned,\n\t\t\t}\n\t\t\trequire.NoError(t, host4.Insert(), \"error inserting host\")\n\n\t\t\thost5 := &host.Host{\n\t\t\t\tProvider: evergreen.ProviderNameMock,\n\t\t\t\tId: \"h5\",\n\t\t\t\tStatus: evergreen.HostQuarantined,\n\t\t\t}\n\t\t\trequire.NoError(t, host5.Insert(), \"error inserting host\")\n\n\t\t\t// flag the decommissioned hosts - there should be 2 of them\n\t\t\tdecommissioned, err := host.FindHostsToTerminate()\n\t\t\tSo(err, ShouldBeNil)\n\t\t\tSo(len(decommissioned), ShouldEqual, 2)\n\t\t\tvar ids []string\n\t\t\tfor _, h := range decommissioned {\n\t\t\t\tids = append(ids, h.Id)\n\t\t\t}\n\t\t\tSo(util.StringSliceContains(ids, host3.Id), ShouldBeTrue)\n\t\t\tSo(util.StringSliceContains(ids, host4.Id), ShouldBeTrue)\n\t\t})\n\t})\n}", "func TestWhoisQuery(t *testing.T) {\n\t// Retry WhoisQuery up to 3 times for network timeout errors.\n\tfor i := 0; i < 3; i++ {\n\t\tres, err := WhoisQuery(\"koding.com\", \"whois.arin.net\", 5*time.Second)\n\t\tif e, ok := err.(net.Error); ok && e.Timeout() {\n\t\t\tcontinue\n\t\t}\n\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tif res == \"\" {\n\t\t\tt.Fatal(\"Whois response empty.\")\n\t\t}\n\n\t\t// Use a the street name to validate the response\n\t\tif regexp.MustCompile(`(?i)brannan`).MatchString(res) != true {\n\t\t\tt.Fatal(\"Response does not match as expected.\" +\n\t\t\t\t`Wanted the regexp \"brannan\" to match`)\n\t\t}\n\n\t\treturn\n\t}\n\n\tt.Fatal(\"exceeded max retry attempts for WhoisQuery\")\n}", "func (suite *DnsQuerySuite) TestShouldHandleQueryForManagedZone() {\n\tseedDBwithRecords(suite.DB, defaultSeedRecords)\n\n\tmockMetricsService := NewMockMetricsService()\n\n\tgo serveDNS(suite.DB, defaultDnsConfig, &mockMetricsService)\n\n\t// Avoid connection refused because the DNS server is not ready\n\t// FIXME: I tried to set the Timeout + Dialtimeout for the client\n\t// but that seem's to have no effect\n\ttime.Sleep(100 * time.Millisecond)\n\n\tclient := new(dns.Client)\n\tm := new(dns.Msg)\n\n\tm.Question = append(m.Question, dns.Question{\"a.rock.\", dns.TypeA, dns.ClassINET})\n\tm.RecursionDesired = true\n\n\tr, _, err := client.Exchange(m, \"127.0.0.1:8053\")\n\n\tif err != nil {\n\t\tfmt.Printf(\"%s\", err.Error())\n\t\tsuite.Fail(\"error on exchange\")\n\t}\n\n\tif r.Rcode != dns.RcodeSuccess {\n\t\tsuite.Fail(\" *** invalid answer name\")\n\t}\n\n\tsuite.Equal(len(defaultSeedRecords[0]), len(r.Answer))\n\n\tif answer, ok := r.Answer[0].(*dns.A); ok {\n\t\tsuite.True(answer.A.Equal(net.ParseIP(defaultSeedRecords[0][0].Content)))\n\t} else {\n\t\tsuite.Fail(\"Invalid dns answer type: requested a type A\")\n\t}\n}", "func (c *Checker) connectionTest() {\n\tfor i := range c.DNSList {\n\t\t_, err := net.LookupAddr(c.DNSList[i])\n\t\tif err == nil {\n\t\t\tc.Lock()\n\t\t\tif !c.connected {\n\t\t\t\tlog.Warnf(\"Internet connectivity re-established\")\n\t\t\t\tc.connected = true\n\t\t\t}\n\t\t\tc.Unlock()\n\t\t\treturn\n\t\t}\n\t}\n\n\tfor i := range c.DomainList {\n\t\t_, err := net.LookupHost(c.DomainList[i])\n\t\tif err == nil {\n\t\t\tc.Lock()\n\t\t\tif !c.connected {\n\t\t\t\tlog.Warnf(\"Internet connectivity re-established\")\n\t\t\t\tc.connected = true\n\t\t\t}\n\t\t\tc.Unlock()\n\t\t\treturn\n\t\t}\n\t}\n\n\tc.Lock()\n\tif c.connected {\n\t\tlog.Warnf(\"Internet connectivity lost\")\n\t\tc.connected = false\n\t}\n\tc.Unlock()\n}", "func main() {\n\tif len(os.Args) != 2 {\n\t\tfmt.Fprintf(os.Stderr, \"Usage: %s host\", os.Args[0])\n\t\tos.Exit(1)\n\t}\n\n\taddr, err := net.ResolveIPAddr(\"ip\", os.Args[1])\n\tif err != nil {\n\t\tfmt.Println(\"Resolution error: \", err.Error())\n\t\tos.Exit(1)\n\t}\n\n\tconn, err := net.DialIP(\"ip4:icmp\", addr, addr)\n\tcheckError(err)\n\n\tvar msg [512]byte\n\tmsg[0] = 8 // echo\n\n\tmsg[1] = 0\n\n\tmsg[2] = 0\n\tmsg[3] = 0\n\n\tmsg[4] = 0\n\tmsg[5] = 13\n\n\tmsg[6] = 0\n\tmsg[7] = 37\n\n\tlen := 8\n\n\tcheck := checkSum(msg[0:len])\n\tmsg[2] = byte(check >> 8)\n\tmsg[3] = byte(check & 255)\n\t_, err = conn.Write(msg[0:len])\n\tcheckError(err)\n\t_, err = conn.Read(msg[0:])\n\tcheckError(err)\n\n\tfmt.Println(\"got response\")\n\n\tif msg[5] == 13 {\n\t\tfmt.Println(\"indentifier matches\")\n\t}\n\n\tif msg[7] == 37 {\n\t\tfmt.Println(\"sequence matches\")\n\t}\n\n\tos.Exit(0)\n}", "func handleHealthCheck(m *MicroService, d *net.Dialer) bool {\r\n\tchange := false\r\n\tfor i, inst := range m.Instances {\r\n\t\t_, err := d.Dial(\"tcp\", inst.Host)\r\n\t\tif err != nil {\r\n\t\t\tif !m.isBlacklisted(i) {\r\n\t\t\t\tm.blackList(i, true)\r\n\t\t\t\tlogInfo(\"Instance: \" + inst.Host + \" is now marked as DOWN\")\r\n\t\t\t\tchange = true\r\n\t\t\t}\r\n\t\t} else {\r\n\t\t\tif m.isBlacklisted(i) {\r\n\t\t\t\tm.blackList(i, false)\r\n\t\t\t\tlogInfo(\"Instance: \" + inst.Host + \" is now marked as UP\")\r\n\t\t\t\tchange = true\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\treturn change\r\n}", "func IPIsDoHOnlyServer(ip netip.Addr) bool {\n\treturn nextDNSv6RangeA.Contains(ip) || nextDNSv6RangeB.Contains(ip) ||\n\t\tnextDNSv4RangeA.Contains(ip) || nextDNSv4RangeB.Contains(ip)\n}", "func (c *HetznerFloatingIPConfigurer) QueryAddress() bool {\n\tif (time.Since(c.lastAPICheck) / time.Hour) > 1 {\n\t\t/**We need to recheck the status!\n\t\t * Don't check too often because of stupid API rate limits\n\t\t */\n\t\tlog.Println(\"Cached state was too old.\")\n\t\tc.cachedState = UNKNOWN\n\t} else {\n\t\t/** no need to check, we can use \"cached\" state if set.\n\t\t * if it is set to UNKOWN, a check will be done.\n\t\t */\n\t\tif c.cachedState == CONFIGURED {\n\t\t\treturn true\n\t\t} else if c.cachedState == RELEASED {\n\t\t\treturn false\n\t\t}\n\t}\n\n\tstr, err := c.curlQueryFloatingIP(false)\n\tif err != nil {\n\t\t//TODO\n\t\tc.cachedState = UNKNOWN\n\t} else {\n\t\tc.lastAPICheck = time.Now()\n\t}\n\n\tcurrentFailoverDestinationServerID, err := c.getActiveServerIDFromJson(str)\n\tif err != nil {\n\t\t//TODO\n\t\tc.cachedState = UNKNOWN\n\t}\n\n\tif c.serverID != 0 &&\n\t currentFailoverDestinationServerID == c.serverID {\n\t\t//We \"are\" the current failover destination.\n\t\tc.cachedState = CONFIGURED\n\t\treturn true\n\t} else {\n\t\tc.cachedState = RELEASED\n\t}\n\n\treturn false\n}", "func CheckHost(ip *netip.Addr, cfg *pb.Config) (*pb.ServerStatus, *ntp.Response, error) {\n\n\tlog := logger.Setup()\n\n\tif cfg.Samples == 0 {\n\t\tcfg.Samples = 3\n\t}\n\n\topts := ntp.QueryOptions{\n\t\tTimeout: 3 * time.Second,\n\t}\n\n\tconfigIP := cfg.GetIP()\n\tif configIP != nil && configIP.IsValid() {\n\t\topts.LocalAddress = configIP.String()\n\t\tif natIP := cfg.GetNatIP(); natIP != nil && natIP.IsValid() {\n\t\t\topts.LocalAddress = natIP.String()\n\t\t}\n\t} else {\n\t\tlog.Error(\"Did not get valid local configuration IP\", \"configIP\", configIP)\n\t}\n\n\tif ip.IsLoopback() {\n\t\treturn nil, nil, fmt.Errorf(\"loopback address\")\n\t}\n\tif ip.IsPrivate() {\n\t\treturn nil, nil, fmt.Errorf(\"private address\")\n\t}\n\tif ip.IsMulticast() {\n\t\treturn nil, nil, fmt.Errorf(\"multicast address\")\n\t}\n\tif !ip.IsValid() {\n\t\treturn nil, nil, fmt.Errorf(\"invalid IP\")\n\t}\n\n\tresponses := []*response{}\n\n\tfor i := int32(0); i < cfg.Samples; i++ {\n\n\t\tif i > 0 {\n\t\t\t// minimum headway time is 2 seconds, https://www.eecis.udel.edu/~mills/ntp/html/rate.html\n\t\t\ttime.Sleep(2 * time.Second)\n\t\t}\n\n\t\tipStr := ip.String()\n\t\tif ip.Is6() {\n\t\t\tipStr = \"[\" + ipStr + \"]:123\"\n\t\t}\n\n\t\tresp, err := ntp.QueryWithOptions(ipStr, opts)\n\t\tif err != nil {\n\t\t\tr := &response{\n\t\t\t\tStatus: &pb.ServerStatus{},\n\t\t\t}\n\t\t\tr.Status.SetIP(ip)\n\t\t\tif resp != nil {\n\t\t\t\tr.Response = resp\n\t\t\t\tr.Status = ntpResponseToStatus(ip, resp)\n\t\t\t}\n\t\t\tr.Error = err\n\t\t\tresponses = append(responses, r)\n\n\t\t\tlog.Debug(\"ntp query error\", \"host\", ip.String(), \"iteration\", i, \"error\", err)\n\n\t\t\tcontinue\n\t\t}\n\n\t\tstatus := ntpResponseToStatus(ip, resp)\n\n\t\tlog.Debug(\"ntp query\", \"host\", ip.String(), \"iteration\", i, \"rtt\", resp.RTT.String(), \"offset\", resp.ClockOffset, \"error\", err)\n\n\t\t// if we get an explicit bad response in any of the samples, we error out\n\t\tif resp.Stratum == 0 || resp.Stratum == 16 {\n\t\t\tif len(resp.KissCode) > 0 {\n\t\t\t\tif resp.KissCode == \"RATE\" {\n\t\t\t\t\tstatus.Offset = nil\n\t\t\t\t}\n\t\t\t\treturn status, resp, fmt.Errorf(\"%s\", resp.KissCode)\n\t\t\t}\n\n\t\t\trefText := fmt.Sprintf(\"%#x\", resp.ReferenceID)\n\n\t\t\trefIDStr := referenceIDString(resp.ReferenceID)\n\t\t\tif utf8.Valid([]byte(refIDStr)) {\n\t\t\t\trefText = refText + \", \" + refIDStr\n\t\t\t}\n\n\t\t\treturn status, resp,\n\t\t\t\tfmt.Errorf(\"bad stratum %d (referenceID: %s)\",\n\t\t\t\t\tresp.Stratum, refText)\n\t\t}\n\n\t\tif resp.Stratum > 6 {\n\t\t\treturn status, resp, fmt.Errorf(\"bad stratum %d\", resp.Stratum)\n\t\t}\n\n\t\tresponses = append(responses, &response{\n\t\t\tStatus: status,\n\t\t\tResponse: resp,\n\t\t})\n\t}\n\n\tvar best *response\n\n\t// log.Printf(\"for %s we collected %d samples, now find the best result\", ip.String(), len(statuses))\n\n\t// todo: if there are more than 2 (3?) samples with an offset, throw\n\t// away the offset outlier(s)\n\n\tfor _, r := range responses {\n\n\t\t// log.Printf(\"status for %s / %d: offset: %s rtt: %s err: %q\", ip.String(), i, status.Offset.AsDuration(), status.RTT.AsDuration(), status.Error)\n\n\t\tif best == nil {\n\t\t\tbest = r\n\t\t\tcontinue\n\t\t}\n\n\t\t// todo: ... and it's otherwise a valid response?\n\t\tif (r.Error == nil && best.Error != nil) || (r.Status.RTT.AsDuration() < best.Status.RTT.AsDuration()) {\n\t\t\tbest = r\n\t\t}\n\t}\n\n\t// errLog := \"\"\n\t// if len(best.Error) > 0 {\n\t// \terrLog = fmt.Sprintf(\" err: %q\", best.Error)\n\t// }\n\t// log.Printf(\"best result for %s - offset: %s rtt: %s%s\",\n\t// \tip.String(), best.Offset.AsDuration(), best.RTT.AsDuration(), errLog)\n\n\tif best.Error != nil {\n\t\treturn best.Status, best.Response, fmt.Errorf(\"%s\", best.Error)\n\t}\n\n\treturn best.Status, best.Response, nil\n}", "func checkDHCPPacketInfo(bnNum int, packet gopacket.Packet, ctx *zedrouterContext) {\n\tvar isReplyAck, needUpdate, foundDstMac, isBroadcast bool\n\tvar vifInfo []types.VifNameMac\n\tvar netstatus types.NetworkInstanceStatus\n\tvar vifTrig types.VifIPTrig\n\n\t// use the IPAssigments of the NetworkInstanceStatus, since this is switched net\n\t// and the field will not be assigned or modified by others\n\tpub := ctx.pubNetworkInstanceStatus\n\titems := pub.GetAll()\n\tfor _, st := range items {\n\t\tnetstatus = st.(types.NetworkInstanceStatus)\n\t\tif netstatus.Type != types.NetworkInstanceTypeSwitch || netstatus.BridgeNum != bnNum {\n\t\t\tcontinue\n\t\t}\n\t\tvifInfo = netstatus.Vifs\n\t\tbreak\n\t}\n\tif len(vifInfo) == 0 { // there is no Mac on the bridge\n\t\tlog.Tracef(\"checkDHCPPacketInfo: no mac on the bridge\")\n\t\treturn\n\t}\n\n\tetherLayer := packet.Layer(layers.LayerTypeEthernet)\n\tif etherLayer != nil {\n\t\tetherPkt, _ := etherLayer.(*layers.Ethernet)\n\t\tif bytes.Compare(etherPkt.DstMAC, broadcastMAC) == 0 {\n\t\t\t// some DHCP servers send replies with broadcast MAC address,\n\t\t\t// need to check those in payload to see if it's for-us\n\t\t\tisBroadcast = true\n\t\t} else {\n\t\t\tfor _, vif := range vifInfo {\n\t\t\t\tif strings.Compare(etherPkt.DstMAC.String(), vif.MacAddr) == 0 {\n\t\t\t\t\tfoundDstMac = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tif !foundDstMac && !isBroadcast { // dhcp packet not for this bridge App ports\n\t\tlog.Tracef(\"checkDHCPPacketInfo: pkt no dst mac for us\\n\")\n\t\treturn\n\t}\n\n\tipLayer := packet.Layer(layers.LayerTypeIPv4)\n\tisIPv4 := (ipLayer != nil)\n\tif isIPv4 {\n\t\t// dhcp client will send discovery or request, server will send offer and Ack\n\t\t// in the code we wait for the Reply from server with Ack to confirm the client's IP address\n\t\tdhcpLayer := packet.Layer(layers.LayerTypeDHCPv4)\n\t\tif dhcpLayer != nil {\n\t\t\tdhcpv4, _ := dhcpLayer.(*layers.DHCPv4)\n\t\t\tif dhcpv4 != nil && dhcpv4.Operation == layers.DHCPOpReply {\n\t\t\t\topts := dhcpv4.Options\n\t\t\t\tfor _, opt := range opts {\n\t\t\t\t\tif opt.Type == layers.DHCPOptMessageType && int(opt.Data[0]) == int(layers.DHCPMsgTypeAck) {\n\t\t\t\t\t\tisReplyAck = true\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tif isReplyAck {\n\t\t\t\tlog.Tracef(\"checkDHCPPacketInfo: bn%d, Xid %d, clientip %s, yourclientip %s, clienthw %v, options %v\\n\",\n\t\t\t\t\tbnNum, dhcpv4.Xid, dhcpv4.ClientIP.String(), dhcpv4.YourClientIP.String(), dhcpv4.ClientHWAddr, dhcpv4.Options)\n\t\t\t\tfor _, vif := range vifInfo {\n\t\t\t\t\tif strings.Compare(vif.MacAddr, dhcpv4.ClientHWAddr.String()) == 0 {\n\t\t\t\t\t\tif _, ok := netstatus.IPAssignments[vif.MacAddr]; !ok {\n\t\t\t\t\t\t\tlog.Functionf(\"checkDHCPPacketInfo: mac %v assign new IP %v\\n\", vif.MacAddr, dhcpv4.YourClientIP)\n\t\t\t\t\t\t\tnetstatus.IPAssignments[vif.MacAddr] = dhcpv4.YourClientIP\n\t\t\t\t\t\t\tneedUpdate = true\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tif netstatus.IPAssignments[vif.MacAddr].Equal(dhcpv4.YourClientIP) == false {\n\t\t\t\t\t\t\t\tlog.Functionf(\"checkDHCPPacketInfo: update mac %v, prev %v, now %v\\n\",\n\t\t\t\t\t\t\t\t\tvif.MacAddr, netstatus.IPAssignments[vif.MacAddr], dhcpv4.YourClientIP)\n\t\t\t\t\t\t\t\tnetstatus.IPAssignments[vif.MacAddr] = dhcpv4.YourClientIP\n\t\t\t\t\t\t\t\tneedUpdate = true\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\tvifTrig.MacAddr = vif.MacAddr\n\t\t\t\t\t\tvifTrig.IPAddr = dhcpv4.YourClientIP\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tlog.Tracef(\"checkDHCPPacketInfo: no dhcp layer\\n\")\n\t\t}\n\t} else {\n\t\t// XXX need to come back to handle ipv6 properly, including:\n\t\t// each MAC can have both ipv4 and ipv6 addresses\n\t\t// ipv6 can be stateful with DHCPv6 or stateless with autoconfig with RS/RA/etc\n\t\t// ipv6 can be link-local, global scope and rfc 4941 with many temporary addresses\n\t\t// which we don't know which one it will use and timeout\n\t\tdhcpLayer := packet.Layer(layers.LayerTypeDHCPv6)\n\t\tif dhcpLayer != nil {\n\t\t\tdhcpv6, _ := dhcpLayer.(*layers.DHCPv6)\n\t\t\tlog.Tracef(\"DHCPv6: Msgtype %v, LinkAddr %s, PeerAddr %s, Options %v\\n\",\n\t\t\t\tdhcpv6.MsgType, dhcpv6.LinkAddr.String(), dhcpv6.PeerAddr.String(), dhcpv6.Options)\n\t\t}\n\t}\n\n\tif needUpdate {\n\t\tlog.Functionf(\"checkDHCPPacketInfo: need update %v, %v\\n\", vifInfo, netstatus.IPAssignments)\n\t\tpub := ctx.pubNetworkInstanceStatus\n\t\tpub.Publish(netstatus.Key(), netstatus)\n\t\tctx.pubAppVifIPTrig.Publish(vifTrig.MacAddr, vifTrig)\n\t\tcheckAndPublishDhcpLeases(ctx)\n\t}\n}", "func queryDelegatorProxyCheck(dlgAddr sdk.AccAddress, expIsProxy bool, expHasProxy bool,\n\texpTotalDlgTokens *sdk.Dec, expBoundToProxy *sdk.AccAddress, expBoundDelegators []sdk.AccAddress) actResChecker {\n\treturn func(t *testing.T, beforeStatus, afterStatus IValidatorStatus, resultCtx *ActionResultCtx) bool {\n\n\t\tctx := getNewContext(resultCtx.tc.mockKeeper.MountedStore, resultCtx.tc.currentHeight)\n\n\t\t//query delegator from keeper directly\n\t\tdlg, found := resultCtx.tc.mockKeeper.Keeper.GetDelegator(ctx, dlgAddr)\n\t\trequire.True(t, found)\n\n\t\tb1 := assert.Equal(t, expIsProxy, dlg.IsProxy)\n\t\tb2 := assert.Equal(t, expHasProxy, dlg.HasProxy())\n\t\tb3 := true\n\t\tif expTotalDlgTokens != nil {\n\t\t\tb3 = assert.Equal(t, expTotalDlgTokens.String(), dlg.TotalDelegatedTokens.String(), dlg)\n\t\t}\n\n\t\tvar b4 bool\n\t\tif expBoundToProxy != nil {\n\t\t\tb4 = assert.Equal(t, *expBoundToProxy, dlg.ProxyAddress)\n\t\t} else {\n\t\t\tb4 = dlg.ProxyAddress == nil\n\t\t}\n\n\t\tb5 := true\n\t\tif expBoundDelegators != nil && len(expBoundDelegators) > 0 {\n\t\t\tq := NewQuerier(resultCtx.tc.mockKeeper.Keeper)\n\t\t\tpara := types.NewQueryDelegatorParams(dlgAddr)\n\t\t\tbz, _ := types.ModuleCdc.MarshalJSON(para)\n\t\t\tdata, err := q(ctx, []string{types.QueryProxy}, abci.RequestQuery{Data: bz})\n\t\t\trequire.NoError(t, err)\n\n\t\t\trealProxiedDelegators := []sdk.AccAddress{}\n\t\t\trequire.NoError(t, ModuleCdc.UnmarshalJSON(data, &realProxiedDelegators))\n\n\t\t\tb5 = assert.Equal(t, len(expBoundDelegators), len(realProxiedDelegators))\n\t\t\tif b5 {\n\t\t\t\tcnt := 0\n\t\t\t\tfor _, e := range expBoundDelegators {\n\t\t\t\t\tfor _, r := range realProxiedDelegators {\n\t\t\t\t\t\tif r.Equals(e) {\n\t\t\t\t\t\t\tcnt++\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tb5 = assert.Equal(t, len(expBoundDelegators), cnt)\n\t\t\t}\n\t\t}\n\n\t\t// check if the shares correct\n\t\tb6 := true\n\t\tif len(dlg.GetShareAddedValidatorAddresses()) > 0 {\n\t\t\texpectDlgShares, err := keeper.SimulateWeight(getGlobalContext().BlockTime().Unix(), (dlg.TotalDelegatedTokens.Add(dlg.Tokens)))\n\t\t\tb6 = err == nil\n\t\t\tb6 = b6 && assert.Equal(t, expectDlgShares.String(), dlg.Shares.String(), dlg)\n\t\t} else {\n\t\t\texpectDlgShares := sdk.ZeroDec()\n\t\t\tb6 = assert.Equal(t, expectDlgShares.String(), dlg.Shares.String(), dlg)\n\t\t}\n\n\t\tconstraintCheckRes := delegatorConstraintCheck(dlg)(t, beforeStatus, afterStatus, resultCtx)\n\n\t\tr := b1 && b2 && b3 && b4 && b5 && b6 && constraintCheckRes\n\t\tif !r {\n\t\t\tresultCtx.tc.printParticipantSnapshot(resultCtx.t)\n\t\t}\n\n\t\treturn r\n\t}\n}", "func verifyServeHostnameServiceUp(c *client.Client, ns, host string, expectedPods []string, serviceIP string, servicePort int) error {\n\texecPodName := createExecPodOrFail(c, ns, \"execpod-\")\n\tdefer func() {\n\t\tdeletePodOrFail(c, ns, execPodName)\n\t}()\n\n\t// Loop a bunch of times - the proxy is randomized, so we want a good\n\t// chance of hitting each backend at least once.\n\tbuildCommand := func(wget string) string {\n\t\treturn fmt.Sprintf(\"for i in $(seq 1 %d); do %s http://%s:%d 2>&1 || true; echo; done\",\n\t\t\t50*len(expectedPods), wget, serviceIP, servicePort)\n\t}\n\tcommands := []func() string{\n\t\t// verify service from node\n\t\tfunc() string {\n\t\t\tcmd := \"set -e; \" + buildCommand(\"wget -q --timeout=0.2 --tries=1 -O -\")\n\t\t\tframework.Logf(\"Executing cmd %q on host %v\", cmd, host)\n\t\t\tresult, err := framework.SSH(cmd, host, framework.TestContext.Provider)\n\t\t\tif err != nil || result.Code != 0 {\n\t\t\t\tframework.LogSSHResult(result)\n\t\t\t\tframework.Logf(\"error while SSH-ing to node: %v\", err)\n\t\t\t}\n\t\t\treturn result.Stdout\n\t\t},\n\t\t// verify service from pod\n\t\tfunc() string {\n\t\t\tcmd := buildCommand(\"wget -q -T 1 -O -\")\n\t\t\tframework.Logf(\"Executing cmd %q in pod %v/%v\", cmd, ns, execPodName)\n\t\t\t// TODO: Use exec-over-http via the netexec pod instead of kubectl exec.\n\t\t\toutput, err := framework.RunHostCmd(ns, execPodName, cmd)\n\t\t\tif err != nil {\n\t\t\t\tframework.Logf(\"error while kubectl execing %q in pod %v/%v: %v\\nOutput: %v\", cmd, ns, execPodName, err, output)\n\t\t\t}\n\t\t\treturn output\n\t\t},\n\t}\n\n\texpectedEndpoints := sets.NewString(expectedPods...)\n\tBy(fmt.Sprintf(\"verifying service has %d reachable backends\", len(expectedPods)))\n\tfor _, cmdFunc := range commands {\n\t\tpassed := false\n\t\tgotEndpoints := sets.NewString()\n\n\t\t// Retry cmdFunc for a while\n\t\tfor start := time.Now(); time.Since(start) < kubeProxyLagTimeout; time.Sleep(5 * time.Second) {\n\t\t\tfor _, endpoint := range strings.Split(cmdFunc(), \"\\n\") {\n\t\t\t\ttrimmedEp := strings.TrimSpace(endpoint)\n\t\t\t\tif trimmedEp != \"\" {\n\t\t\t\t\tgotEndpoints.Insert(trimmedEp)\n\t\t\t\t}\n\t\t\t}\n\t\t\t// TODO: simply checking that the retrieved endpoints is a superset\n\t\t\t// of the expected allows us to ignore intermitten network flakes that\n\t\t\t// result in output like \"wget timed out\", but these should be rare\n\t\t\t// and we need a better way to track how often it occurs.\n\t\t\tif gotEndpoints.IsSuperset(expectedEndpoints) {\n\t\t\t\tif !gotEndpoints.Equal(expectedEndpoints) {\n\t\t\t\t\tframework.Logf(\"Ignoring unexpected output wgetting endpoints of service %s: %v\", serviceIP, gotEndpoints.Difference(expectedEndpoints))\n\t\t\t\t}\n\t\t\t\tpassed = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tframework.Logf(\"Unable to reach the following endpoints of service %s: %v\", serviceIP, expectedEndpoints.Difference(gotEndpoints))\n\t\t}\n\t\tif !passed {\n\t\t\t// Sort the lists so they're easier to visually diff.\n\t\t\texp := expectedEndpoints.List()\n\t\t\tgot := gotEndpoints.List()\n\t\t\tsort.StringSlice(exp).Sort()\n\t\t\tsort.StringSlice(got).Sort()\n\t\t\treturn fmt.Errorf(\"service verification failed for: %s\\nexpected %v\\nreceived %v\", serviceIP, exp, got)\n\t\t}\n\t}\n\treturn nil\n}", "func checkHost(ipAddress string, suffix int, c chan string) {\n\t_, err := net.DialTimeout(\"tcp\", ipAddress+\":80\", time.Second*3)\n\tif err == nil {\n\t\tc <- strconv.Itoa(suffix) + \" is up\"\n\t} else {\n\t\tc <- strconv.Itoa(suffix) + \" is down\"\n\t}\n}", "func (suite *TestDbNqmSuite) TestGetPingTaskState(c *C) {\n\ttestedCases := []struct {\n\t\tagentId int\n\t\texpectedStatus int\n\t} {\n\t\t{2001, NO_PING_TASK}, // The agent has no ping task\n\t\t{2002, NO_PING_TASK}, // The agent has ping task, which are disabled\n\t\t{2003, HAS_PING_TASK}, // The agent has ping task(enabled, with ISP filter)\n\t\t{2004, HAS_PING_TASK}, // The agent has ping task(enabled, with province filter)\n\t\t{2005, HAS_PING_TASK}, // The agent has ping task(enabled, with city filter)\n\t\t{2006, HAS_PING_TASK}, // The agent has ping task(enabled, with name tag filter)\n\t\t{2007, HAS_PING_TASK}, // The agent has ping task(enabled, with group tag filter)\n\t\t{2010, HAS_PING_TASK_MATCH_ANY_TARGET}, // The agent has ping task(enabled, without filters)\n\t}\n\n\tfor _, v := range testedCases {\n\t\ttestedResult, err := getPingTaskState(v.agentId)\n\n\t\tc.Assert(err, IsNil)\n\t\tc.Assert(testedResult, Equals, v.expectedStatus)\n\t}\n}", "func ( ipMeta * pState ) verifyScanningIP( pRecv *packet_metadata ) bool {\n\n\tpRecvKey := constructKey(pRecv)\n\t//first check that IP itself is being scanned\n\tps, ok := ipMeta.Get(pRecvKey)\n\tif !ok {\n\t\treturn false\n\t}\n\tpMap := ps.Packet\n\n\t//second check that 4-tuple matches with default packet\n\tif (( pMap.Saddr == pRecv.Saddr ) && (pMap.Dport == pRecv.Dport) &&\n\t\t(pMap.Sport == pRecv.Sport) ) {\n\n\t\tif verifySA( pMap, pRecv) {\n\t\t\treturn true\n\t\t}\n\t}\n\n\t/*//lets re-query for the ACKtive packets\n\tpRecv.HyperACKtive = true\n\tpRecvKey = constructKey(pRecv)\n\tps, ok = ipMeta.Get( pRecvKey )\n\tif !ok {\n\t\tpRecv.HyperACKtive = false\n\t\treturn false\n\t}\n\tpMap = ps.Packet\n\n\tif verifySA( pMap, pRecv) {\n\t\treturn true\n\t}\n\tpRecv.HyperACKtive = false\n\t*/\n\tif DebugOn() {\n\t\tfmt.Println(pMap.Saddr, \"====\")\n\t\tfmt.Println(\"recv seq num:\", pRecv.Seqnum)\n\t\tfmt.Println(\"stored seqnum: \", pMap.Seqnum)\n\t\tfmt.Println(\"recv ack num:\", pRecv.Acknum)\n\t\tfmt.Println(\"stored acknum: \", pMap.Acknum)\n\t\tfmt.Println(\"received response length: \",len(pRecv.Data))\n\t\tfmt.Println(\"stored response length: \",pMap.LZRResponseL)\n\t\tfmt.Println(pMap.Saddr ,\"====\")\n\t}\n\treturn false\n\n}", "func DetectAttacks(callback func(), addr string){\n\t// create pinger and set the IP address if it is a real IP address\n\t// (otherwise throw a fit)\n\tp := fastping.NewPinger()\n\tra, err := net.ResolveIPAddr(\"ip4:icmp\", addr)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tp.AddIPAddr(ra)\n\n\t// received is used to tell when a ping has been successfully received\n\t// done is for when it hasn't\n\t// create timer here so that it can be used in different scopes and can just be reset later\n\treceived := false\n\tdone := make(chan bool)\n\ttimer := time.NewTimer(100 * time.Second)\n\n\t// called when a ping response is received\n\tp.OnRecv = func(addr *net.IPAddr, rtt time.Duration) {\n\t\tlog.Println(\"Got ping response from\", addr, \"in\", rtt)\n\t\treceived = true\n\t}\n\n\t// called at the end of a loop of p.RunLoop()\n\tp.OnIdle = func() {\n\t\t// if no ping has been received, exit and go back to the main function\n\t\tif !received {\n\t\t\tcallback()\n\t\t\tdone <- true\n\t\t}\n\n\t\t// reset the timer for the next run loop\n\t\ttimer.Reset(time.Second)\n\t\treceived = false\n\t}\n\n\t// GOOOOOOOOOOOOOOOOOOOOOO!!!!!!!!!!!!!!!!!!!!\n\tp.RunLoop()\n\n\t// when either there has been an error or a timeout (DDOS ATTACK!!!), leave\n\tselect {\n\tcase <-p.Done():\n\tcase <-done:\n\t}\n\n\t// if something goes wrong, panic\n\tif p.Err() != nil {\n\t\tlog.Fatal(p.Err())\n\t}\n}", "func (h *MysqlHealthCheck) DoHealthCheck() (resHealthCheck *view.ResHealthCheck, err error) {\n\tif h.DSN == \"\" {\n\t\terr = errors.New(\"mysql dsn is nil\")\n\t\treturn\n\t}\n\t_, err = ParseDSN(h.DSN)\n\tif err != nil {\n\t\treturn\n\t}\n\tif !strings.Contains(h.DSN, \"timeout\") {\n\t\th.DSN += \"&timeout=\" + DefaultTimeOut\n\t}\n\tdb, err := Open(\"mysql\", h)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer db.Close()\n\tif db == nil {\n\t\terr = errors.New(\"can not get mysql connection\")\n\t\treturn\n\t}\n\tif err := db.DB().Ping(); err != nil {\n\t\treturn nil, err\n\t}\n\tresHealthCheck = view.HealthCheckResult(\"mysql\", true, \"success\")\n\treturn\n}", "func (suite *TestDbNqmSuite) TestTriggersOfFiltersForPingTask(c *C) {\n\ttestedCases := []struct {\n\t\tsqls []string\n\t\texpectedNumberOfIspFilters int\n\t\texpectedNumberOfProvinceFilters int\n\t\texpectedNumberOfCityFilters int\n\t\texpectedNumberOfNameTagFilters int\n\t\texpectedNumberOfGroupTagFilters int\n\t} {\n\t\t{ // Tests the trigger of insertion for filters\n\t\t\t[]string {\n\t\t\t\t`INSERT INTO nqm_pt_target_filter_name_tag(tfnt_pt_id, tfnt_nt_id) VALUES(9201, 3071), (9201, 3072)`,\n\t\t\t\t`INSERT INTO nqm_pt_target_filter_isp(tfisp_pt_id, tfisp_isp_id) VALUES(9201, 2), (9201, 3)`,\n\t\t\t\t`INSERT INTO nqm_pt_target_filter_province(tfpv_pt_id, tfpv_pv_id) VALUES(9201, 6), (9201, 7)`,\n\t\t\t\t`INSERT INTO nqm_pt_target_filter_city(tfct_pt_id, tfct_ct_id) VALUES(9201, 16), (9201, 17)`,\n\t\t\t\t`INSERT INTO nqm_pt_target_filter_group_tag(tfgt_pt_id, tfgt_gt_id) VALUES(9201, 70021), (9201, 70022)`,\n\t\t\t},\n\t\t\t2, 2, 2, 2, 2,\n\t\t},\n\t\t{ // Tests the trigger of deletion for filters\n\t\t\t[]string {\n\t\t\t\t`DELETE FROM nqm_pt_target_filter_name_tag WHERE tfnt_pt_id = 9201`,\n\t\t\t\t`DELETE FROM nqm_pt_target_filter_isp WHERE tfisp_pt_id = 9201`,\n\t\t\t\t`DELETE FROM nqm_pt_target_filter_province WHERE tfpv_pt_id = 9201`,\n\t\t\t\t`DELETE FROM nqm_pt_target_filter_city WHERE tfct_pt_id = 9201`,\n\t\t\t\t`DELETE FROM nqm_pt_target_filter_group_tag WHERE tfgt_pt_id = 9201`,\n\t\t\t},\n\t\t\t0, 0, 0, 0, 0,\n\t\t},\n\t}\n\n\tfor _, testCase := range testedCases {\n\t\t/**\n\t\t * Executes INSERT/DELETE statements\n\t\t */\n\t\tDbFacade.SqlDbCtrl.InTx(commonDb.BuildTxForSqls(testCase.sqls...))\n\t\t// :~)\n\n\t\tnumberOfRows := 0\n\t\tDbFacade.SqlDbCtrl.QueryForRow(\n\t\t\tcommonDb.RowCallbackFunc(func(row *sql.Row) {\n\t\t\t\tnumberOfRows++\n\n\t\t\t\tvar numberOfIspFilters int\n\t\t\t\tvar numberOfProvinceFilters int\n\t\t\t\tvar numberOfCityFilters int\n\t\t\t\tvar numberOfNameTagFilters int\n\t\t\t\tvar numberOfGroupTagFilters int\n\n\t\t\t\trow.Scan(\n\t\t\t\t\t&numberOfIspFilters,\n\t\t\t\t\t&numberOfProvinceFilters,\n\t\t\t\t\t&numberOfCityFilters,\n\t\t\t\t\t&numberOfNameTagFilters,\n\t\t\t\t\t&numberOfGroupTagFilters,\n\t\t\t\t)\n\n\t\t\t\t/**\n\t\t\t\t * Asserts the cached value for number of filters\n\t\t\t\t */\n\t\t\t\tc.Assert(numberOfIspFilters, Equals, testCase.expectedNumberOfIspFilters);\n\t\t\t\tc.Assert(numberOfProvinceFilters, Equals, testCase.expectedNumberOfProvinceFilters);\n\t\t\t\tc.Assert(numberOfCityFilters, Equals, testCase.expectedNumberOfCityFilters);\n\t\t\t\tc.Assert(numberOfNameTagFilters, Equals, testCase.expectedNumberOfNameTagFilters);\n\t\t\t\tc.Assert(numberOfGroupTagFilters, Equals, testCase.expectedNumberOfGroupTagFilters);\n\t\t\t\t// :~)\n\t\t\t}),\n\t\t\t`\n\t\t\tSELECT\n\t\t\t\tpt_number_of_isp_filters,\n\t\t\t\tpt_number_of_province_filters,\n\t\t\t\tpt_number_of_city_filters,\n\t\t\t\tpt_number_of_name_tag_filters,\n\t\t\t\tpt_number_of_group_tag_filters\n\t\t\tFROM nqm_ping_task WHERE pt_id = 9201\n\t\t\t`,\n\t\t)\n\n\t\t// Ensures that the row is effective\n\t\tc.Assert(numberOfRows, Equals, 1)\n\t}\n}", "func Check(collection *collection.Collection, dbg, suggest bool) Results {\n\n\tresults := Results{}\n\n\t// Start tests\n\n\t/* Check to make sure that the public DNS server NS records match\n\t Check to make sure the one of the public and the private NS record servers match\n\t Check to make sure there are at least 1 NS server\n\t*/\n\n\tcollection.PublicMatchNS = reflect.DeepEqual(collection.DNS1NS, collection.DNS2NS)\n\tcollection.LocalMatchNS = reflect.DeepEqual(collection.DNS1NS, collection.LocalNS)\n\tif collection.PublicMatchNS && collection.LocalMatchNS && len(collection.LocalNS) > 0 {\n\t\tresults.ResultNS = true\n\t} else {\n\t\tresults.ResultNS = false\n\t}\n\n\t/* Check to make sure the public DNS server Glue records match\n\t Check to make sure the one of the public and the private Glue record servers match\n\t Check to make sure there the Glue record length matches the ns record length\n\t*/\n\n\tcollection.PublicMatchGlue = reflect.DeepEqual(collection.DNS1Glue, collection.DNS2Glue)\n\tcollection.LocalMatchGlue = reflect.DeepEqual(collection.DNS1Glue, collection.LocalGlue)\n\n\tif collection.PublicMatchGlue && collection.LocalMatchGlue && (len(collection.LocalNS) == len(collection.LocalGlue)) && len(collection.LocalNS) > 0 {\n\t\tresults.ResultGlue = true\n\t} else {\n\t\tresults.ResultGlue = false\n\t}\n\n\t/* Check to make sure that we can access all of the name servers and the numbers match */\n\n\tresults.ResultAccess = true\n\tfor _, a := range collection.EndpointStatus {\n\t\tif a && results.ResultAccess {\n\t\t} else {\n\t\t\tresults.ResultAccess = false\n\t\t}\n\t}\n\tif len(collection.EndpointStatus) != len(collection.LocalNS) || len(collection.EndpointStatus) < 1 {\n\t\tresults.ResultAccess = false\n\t}\n\n\t/* Check to make sure both public DNS server results match\n\t Check that the LocalDNS and one of the remotes match\n\t Check that there is more than 1 A record\n\t*/\n\n\tcollection.PublicMatchA = reflect.DeepEqual(collection.DNS1A, collection.DNS2A)\n\tcollection.LocalMatchA = reflect.DeepEqual(collection.DNS1A, collection.LocalA)\n\n\tif collection.PublicMatchA && collection.LocalMatchA && len(collection.LocalA) > 0 && (len(collection.LocalA) == len(collection.DNS1A)) {\n\t\tresults.ResultA = true\n\t} else {\n\t\tresults.ResultA = false\n\t}\n\n\t// check to make sure the SOA records match the domain name we expect\n\tresults.ResultSOAMatch = collection.SOAMatch\n\n\t// Show test results if suggest or debug\n\tif dbg || suggest {\n\t\tfmt.Printf(\"--------------------------------\\n\")\n\t\tdebugPrint(\"NS Record Test\", results.ResultNS)\n\t\tdebugPrint(\"Glue Record Test\", results.ResultGlue)\n\t\tdebugPrint(\"NS Access Test\", results.ResultAccess)\n\t\tdebugPrint(\"SOA Match Test\", results.ResultSOAMatch)\n\t\tdebugPrint(\"A Record Test\", results.ResultA)\n\t\tfmt.Printf(\"--------------------------------\\n\")\n\t}\n\n\t// only print datastructure if debug is on\n\tif dbg {\n\t\tcolor.Cyan.Printf(\"Results Debug:\\n%+v\\n\", results)\n\t}\n\n\treturn (results)\n}", "func TestConnectednessCorrect(t *testing.T) {\n\tnets := make([]network.Network, 4)\n\tfor i := 0; i < 4; i++ {\n\t\tnets[i] = GenSwarm(t)\n\t}\n\n\t// connect 0-1, 0-2, 0-3, 1-2, 2-3\n\n\tdial := func(a, b network.Network) {\n\t\tDivulgeAddresses(b, a)\n\t\tif _, err := a.DialPeer(context.Background(), b.LocalPeer()); err != nil {\n\t\t\tt.Fatalf(\"Failed to dial: %s\", err)\n\t\t}\n\t}\n\n\tdial(nets[0], nets[1])\n\tdial(nets[0], nets[3])\n\tdial(nets[1], nets[2])\n\tdial(nets[3], nets[2])\n\n\t// The notifications for new connections get sent out asynchronously.\n\t// There is the potential for a race condition here, so we sleep to ensure\n\t// that they have been received.\n\ttime.Sleep(time.Millisecond * 100)\n\n\t// test those connected show up correctly\n\n\t// test connected\n\texpectConnectedness(t, nets[0], nets[1], network.Connected)\n\texpectConnectedness(t, nets[0], nets[3], network.Connected)\n\texpectConnectedness(t, nets[1], nets[2], network.Connected)\n\texpectConnectedness(t, nets[3], nets[2], network.Connected)\n\n\t// test not connected\n\texpectConnectedness(t, nets[0], nets[2], network.NotConnected)\n\texpectConnectedness(t, nets[1], nets[3], network.NotConnected)\n\n\trequire.Len(t, nets[0].Peers(), 2, \"expected net 0 to have two peers\")\n\trequire.Len(t, nets[2].Peers(), 2, \"expected net 2 to have two peers\")\n\trequire.NotZerof(t, nets[1].ConnsToPeer(nets[3].LocalPeer()), \"net 1 should have no connections to net 3\")\n\trequire.NoError(t, nets[2].ClosePeer(nets[1].LocalPeer()))\n\n\ttime.Sleep(time.Millisecond * 50)\n\texpectConnectedness(t, nets[2], nets[1], network.NotConnected)\n\n\tfor _, n := range nets {\n\t\tn.Close()\n\t}\n}", "func testMdns(t *testing.T) {\n\tservice := \"_liqo._tcp\"\n\tdomain := \"local.\"\n\n\tgo clientCluster.discoveryCtrl.Register()\n\n\ttime.Sleep(1 * time.Second)\n\n\ttxts := []*discovery.TxtData{}\n\tclientCluster.discoveryCtrl.Resolve(service, domain, 3, &txts)\n\n\ttime.Sleep(1 * time.Second)\n\n\t// TODO: find better way to test mDNS, local IP is not always detected\n\tassert.Assert(t, len(txts) >= 0, \"If this line is reached test would be successful, no foreign packet can reach our testing environment at the moment\")\n}", "func testConns(t *testing.T) (c1, c2 *Conn) {\n\tt.Helper()\n\n\tvar veths [2]*net.Interface\n\tfor i, v := range []string{\"vethospf0\", \"vethospf1\"} {\n\t\tifi, err := net.InterfaceByName(v)\n\t\tif err != nil {\n\t\t\tvar nerr *net.OpError\n\t\t\tif errors.As(err, &nerr) && nerr.Err.Error() == \"no such network interface\" {\n\t\t\t\tt.Skipf(\"skipping, interface %q does not exist\", v)\n\t\t\t}\n\n\t\t\tt.Fatalf(\"failed to get interface %q: %v\", v, err)\n\t\t}\n\n\t\tveths[i] = ifi\n\t}\n\n\t// Now that we have the veths, make sure they're usable.\n\twaitInterfacesReady(t, veths[0], veths[1])\n\n\tvar conns [2]*Conn\n\tfor i, v := range veths {\n\t\tc, err := Listen(v)\n\t\tif err != nil {\n\t\t\tif errors.Is(err, os.ErrPermission) {\n\t\t\t\tt.Skipf(\"skipping, permission denied while trying to listen OSPFv3 on %q\", v.Name)\n\t\t\t}\n\n\t\t\tt.Fatalf(\"failed to listen OSPFv3 on %q: %v\", v.Name, err)\n\t\t}\n\n\t\tconns[i] = c\n\t\tt.Cleanup(func() { c.Close() })\n\t}\n\n\treturn conns[0], conns[1]\n}", "func TestDirectConnectionFlags(t *testing.T) {\n // Test with the equivalent of a single IP address in the -d arg: -d 1.2.3.4\n gDirects = \"1.2.3.4\"\n dirFuncs := buildDirectors(gDirects)\n director = getDirector(dirFuncs)\n \n ipv4 := \"1.2.3.4\"\n wentDirect,_ := director(ipv4)\n if wentDirect == false {\n t.Errorf(\"The IP address %s should have been sent direct, but instead was proxied\", ipv4)\n }\n\n // now make sure an address that should be proxied still works\n ipv4 = \"4.5.6.7\"\n wentDirect,_ = director(ipv4)\n if wentDirect == true {\n t.Errorf(\"The IP address %s should have been sent to an upstream proxy, but instead was sent directly\", ipv4)\n }\n\n\n // Test with the equivalent of a multiple IP addresses in the -d arg: -d 1.2.3.4,2.3.4.5\n gDirects = \"1.2.3.4,2.3.4.5\"\n dirFuncs = buildDirectors(gDirects)\n director = getDirector(dirFuncs)\n \n addrsToTest := []string{\"1.2.3.4\", \"2.3.4.5\"}\n for _,ipv4 = range addrsToTest {\n wentDirect,_ := director(ipv4)\n if wentDirect == false {\n t.Errorf(\"The IP address %s should have been sent direct, but instead was proxied\", ipv4)\n }\n }\n\n // now make sure an address that should be proxied still works\n ipv4 = \"4.5.6.7\"\n wentDirect,_ = director(ipv4)\n if wentDirect == true {\n t.Errorf(\"The IP address %s should have been sent to an upstream proxy, but instead was sent directly\", ipv4)\n }\n\n\n // Test with the equivalent of multiple IP address specs in the -d arg: -d 1.2.3.0/24,2.3.4.0/25,4.4.4.4\"\n gDirects = \"1.2.3.0/24,2.3.4.0/25,4.4.4.4\"\n dirFuncs = buildDirectors(gDirects)\n director = getDirector(dirFuncs)\n \n addrsToTest = []string{\"1.2.3.4\", \"1.2.3.254\", \"2.3.4.5\", \"4.4.4.4\"}\n for _,ipv4 = range addrsToTest {\n wentDirect,_ := director(ipv4)\n if wentDirect == false {\n t.Errorf(\"The IP address %s should have been sent direct, but instead was proxied\", ipv4)\n }\n }\n\n // now make sure an address that should be proxied still works\n addrsToTest = []string{\"4.5.6.7\", \"2.3.4.254\"}\n for _,ipv4 = range addrsToTest {\n wentDirect,_ = director(ipv4)\n if wentDirect == true {\n t.Errorf(\"The IP address %s should have been sent to an upstream proxy, but instead was sent directly\", ipv4)\n }\n }\n}", "func shouldIPing() bool {\n\treturn !initialElection &&\n\t\t!bullyImpl.EnCours() &&\n\t\t!bullyImpl.IsCoordinator()\n}", "func checkHostGenerator(expectedUsername string, expectedID string, err error) func(string, string) (bool, error) {\n\treturn func(username string, eventID string) (bool, error) {\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\n\t\tif username != expectedUsername {\n\t\t\treturn false, nil\n\t\t} else if eventID != expectedID {\n\t\t\treturn false, nil\n\t\t}\n\t\treturn true, nil\n\t}\n}", "func checkDialWorkerLoopScheduling(t *testing.T, s1, s2 *Swarm, tc schedulingTestCase) error {\n\tt.Helper()\n\t// failDials is used to track dials which should fail in the future\n\t// at appropriate moment a message is sent to dialState.ch to trigger\n\t// failure\n\tfailDials := make(map[ma.Multiaddr]dialState)\n\t// recvCh is used to receive dial notifications for dials that will fail\n\trecvCh := make(chan struct{}, 100)\n\t// allDials tracks all pending dials\n\tallDials := make(map[ma.Multiaddr]dialState)\n\t// addrs are the peer addresses the swarm will use for dialing\n\taddrs := make([]ma.Multiaddr, 0)\n\t// create pending dials\n\t// we add success cases as a listen address on swarm\n\t// failed cases are created using makeTCPListener\n\tfor _, inp := range tc.input {\n\t\tvar failCh chan struct{}\n\t\tif inp.success {\n\t\t\t// add the address as a listen address if this dial should succeed\n\t\t\terr := s2.AddListenAddr(inp.addr)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"failed to listen on addr: %s: err: %w\", inp.addr, err)\n\t\t\t}\n\t\t} else {\n\t\t\t// make a listener which will fail on sending a message to ch\n\t\t\tl, ch := makeTCPListener(t, inp.addr, recvCh)\n\t\t\tfailCh = ch\n\t\t\tf := func() {\n\t\t\t\terr := l.Close()\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Error(err)\n\t\t\t\t}\n\t\t\t}\n\t\t\tdefer f()\n\t\t}\n\t\taddrs = append(addrs, inp.addr)\n\t\t// add to pending dials\n\t\tallDials[inp.addr] = dialState{\n\t\t\tch: failCh,\n\t\t\taddr: inp.addr,\n\t\t\tdelay: inp.delay,\n\t\t\tsuccess: inp.success,\n\t\t\tfailAfter: inp.failAfter,\n\t\t}\n\t}\n\t// setup the peers addresses\n\ts1.Peerstore().AddAddrs(s2.LocalPeer(), addrs, peerstore.PermanentAddrTTL)\n\n\t// create worker\n\treqch := make(chan dialRequest)\n\tresch := make(chan dialResponse)\n\tcl := newMockClock()\n\tst := cl.Now()\n\tworker1 := newDialWorker(s1, s2.LocalPeer(), reqch, cl)\n\tgo worker1.loop()\n\tdefer worker1.wg.Wait()\n\tdefer close(reqch)\n\n\t// trigger the request\n\treqch <- dialRequest{ctx: context.Background(), resch: resch}\n\n\tconnected := false\n\n\t// Advance the clock by 10 ms every iteration\n\t// At every iteration:\n\t// Check if any dial should fail. if it should, trigger the failure by sending a message on the\n\t// listener failCh\n\t// If there are no dials in flight check the most urgent dials have been triggered\n\t// If there are dials in flight check that the relevant dials have been triggered\n\t// Before next iteration ensure that no unexpected dials are received\nloop:\n\tfor {\n\t\t// fail any dials that should fail at this instant\n\t\tfor a, p := range failDials {\n\t\t\tif p.failAt.Before(cl.Now()) || p.failAt == cl.Now() {\n\t\t\t\tp.ch <- struct{}{}\n\t\t\t\tdelete(failDials, a)\n\t\t\t}\n\t\t}\n\t\t// if there are no pending dials, next dial should have been triggered\n\t\ttrigger := len(failDials) == 0\n\n\t\t// mi is the minimum delay of pending dials\n\t\t// if trigger is true, all dials with miDelay should have been triggered\n\t\tmi := time.Duration(math.MaxInt64)\n\t\tfor _, ds := range allDials {\n\t\t\tif ds.delay < mi {\n\t\t\t\tmi = ds.delay\n\t\t\t}\n\t\t}\n\t\tfor a, ds := range allDials {\n\t\t\tif (trigger && mi == ds.delay) ||\n\t\t\t\tcl.Now().After(st.Add(ds.delay)) ||\n\t\t\t\tcl.Now() == st.Add(ds.delay) {\n\t\t\t\tif ds.success {\n\t\t\t\t\t// check for success and exit\n\t\t\t\t\tselect {\n\t\t\t\t\tcase r := <-resch:\n\t\t\t\t\t\tif r.conn == nil {\n\t\t\t\t\t\t\treturn errors.New(\"expected connection to succeed\")\n\t\t\t\t\t\t}\n\t\t\t\t\t// High timeout here is okay. We will exit whenever the other branch\n\t\t\t\t\t// is triggered\n\t\t\t\t\tcase <-time.After(10 * time.Second):\n\t\t\t\t\t\treturn errors.New(\"expected to receive a response\")\n\t\t\t\t\t}\n\t\t\t\t\tconnected = true\n\t\t\t\t\tbreak loop\n\t\t\t\t} else {\n\t\t\t\t\t// ensure that a failing dial attempt happened but didn't succeed\n\t\t\t\t\tselect {\n\t\t\t\t\tcase <-recvCh:\n\t\t\t\t\tcase <-resch:\n\t\t\t\t\t\treturn errors.New(\"didn't expect a response\")\n\t\t\t\t\t// High timeout here is okay. We will exit whenever the other branch\n\t\t\t\t\t// is triggered\n\t\t\t\t\tcase <-time.After(10 * time.Second):\n\t\t\t\t\t\treturn errors.New(\"didn't receive a dial attempt notification\")\n\t\t\t\t\t}\n\t\t\t\t\tfailDials[a] = dialState{\n\t\t\t\t\t\tch: ds.ch,\n\t\t\t\t\t\tfailAt: cl.Now().Add(ds.failAfter),\n\t\t\t\t\t\taddr: a,\n\t\t\t\t\t\tdelay: ds.delay,\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tdelete(allDials, a)\n\t\t\t}\n\t\t}\n\t\t// check for unexpected dials\n\t\tselect {\n\t\tcase <-recvCh:\n\t\t\treturn errors.New(\"no dial should have succeeded at this instant\")\n\t\tdefault:\n\t\t}\n\n\t\t// advance the clock\n\t\tcl.AdvanceBy(10 * time.Millisecond)\n\t\t// nothing more to do. exit\n\t\tif len(failDials) == 0 && len(allDials) == 0 {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif connected {\n\t\t// ensure we don't receive any extra connections\n\t\tselect {\n\t\tcase <-recvCh:\n\t\t\treturn errors.New(\"didn't expect a dial attempt\")\n\t\tcase <-time.After(100 * time.Millisecond):\n\t\t}\n\t} else {\n\t\t// ensure that we do receive the final error response\n\t\tselect {\n\t\tcase r := <-resch:\n\t\t\trequire.Error(t, r.err)\n\t\tcase <-time.After(100 * time.Millisecond):\n\t\t\treturn errors.New(\"expected to receive response\")\n\t\t}\n\t}\n\t// check if this test didn't take too much time\n\tif cl.Now().Sub(st) > tc.maxDuration {\n\t\treturn fmt.Errorf(\"expected test to finish early: expected %d, took: %d\", tc.maxDuration, cl.Now().Sub(st))\n\t}\n\treturn nil\n}", "func waitForIPAM() bool {\n\tfor {\n\t\tcmd := exec.Command(\"./grpc-health-probe\", \"-addr\", \"127.0.0.1:50051\", \">\", \"/dev/null\", \"2>&1\")\n\t\tif err := cmd.Run(); err == nil {\n\t\t\treturn true\n\t\t}\n\t}\n}", "func okHealthCheck(proxy *Proxy) error {\n\treturn nil\n}", "func TestFacts(t *testing.T) {\n\t// Test with query\n\tquery := `[\"=\", \"certname\", \"foobar.puppetlabs.net\"]`\n\tsetupGetResponder(t, facts, \"query=\"+query, \"facts-response.json\")\n\tactual, err := pdbClient.Facts(query, nil, nil)\n\trequire.Nil(t, err)\n\trequire.Equal(t, expectedFacts, actual)\n}", "func (cd *Coredns) testDNS() {\n\tvar successCount int\n\n\t//1. readEtcResolvConf -> compare nameserver with ClusterIP\n\t//nameserver either should be coredns clusterIP or nodeLocalcache DNS IP\n\trc := &ResolvConf{}\n\tdnstest := &Dnstest{}\n\n\terr := rc.readResolvConf()\n\tif err != nil {\n\t\tlog.Errorf(\"Failed to read /etc/resolv.conf file: %s\", err)\n\t\tdnstest.DnsResolution = \"Failed\"\n\t\t//cd.Dnstest = false\n\t\tcd.Dnstest = *dnstest\n\t\treturn\n\t}\n\tcd.ResolvConf = *rc\n\tlog.Infof(\"resolvconf values are: %+v\", rc)\n\n\t//2. Match nameserver in /etc/resolv.conf with ClusterIP ->it should match\n\t//from the nameserver IP -> check its coredns or nodeLocalDNSCache\n\tdnstest.Description = \"tests the internal and external DNS queries against ClusterIP and two Coredns Pod IPs\"\n\n\tif rc.Nameserver[0] == cd.ClusterIP {\n\t\tlog.Infof(\"Pod's nameserver is matching to ClusterIP: %s\", rc.Nameserver[0])\n\t} else if rc.Nameserver[0] == \"169.254.20.10\" {\n\t\tcd.HasNodeLocalCache = true\n\t\tlog.Infof(\"Pod's nameserver is matching to NodeLocal DNS Cache: %s\", rc.Nameserver[0])\n\t} else {\n\t\tlog.Warnf(\"Pod's Nameserver is not set to Coredns clusterIP or NodeLocal Cache IP...Review the --cluster-dns parameter of kubelet or check dnsPolicy field of Pod\")\n\t}\n\n\t//3. Test the DNS queries against multiple domains and host\n\t//As per miekg/dns library, domain names MUST be fully qualified before sending them, unqualified names in a message will result in a packing failure.\n\t//Fqdn() just adds . at the end of the query\n\t//If you make query for \"kuberenetes\" then query will be sent to COREDNS as \"kubernetes.\"\n\t//Due to that used FQDN for kubernetes like kubernetes.default.svc.cluster.local\n\tdomains := []string{\"amazon.com\", \"kubernetes.default.svc.cluster.local\"}\n\tdnstest.DomainsTested = domains\n\n\tnameservers := make([]string, 0, 3)\n\tnameservers = append(nameservers, rc.Nameserver...)\n\tnameservers = append(nameservers, cd.EndpointsIP[:2]...) //select only 2 endpoints\n\n\t//tests each DOMAIN against 3 NAMESERVERS (i.e. 1 ClusterIP and 2 COREDNS ENDPOINTS)\n\tdnstest.DnsTestResultForDomains = make([]DnsTestResultForDomain, 0)\n\n\tfor _, dom := range domains {\n\t\tfor _, ns := range nameservers {\n\t\t\tresult := lookupIP(dom, []string{ns})\n\t\t\tdnstest.DnsTestResultForDomains = append(dnstest.DnsTestResultForDomains, *result)\n\t\t}\n\t}\n\n\tfor _, res := range dnstest.DnsTestResultForDomains {\n\t\tif res.Result == \"success\" {\n\t\t\tsuccessCount++\n\t\t}\n\t}\n\tif successCount != len(dnstest.DnsTestResultForDomains) {\n\t\tdnstest.DnsResolution = \"failed\"\n\t}\n\tdnstest.DnsResolution = \"success\"\n\n\tcd.Dnstest = *dnstest\n\t//cd.Dnstest = success\n\tlog.Debugf(\"DNS test completed: %v *dnstest: %v\", cd.Dnstest, *dnstest)\n\n}", "func (w *worker) extractHostsFailingCheck(ctx context.Context, hosts []resources.Host) ([]resources.Host, fail.Error) {\n\tvar concernedHosts []resources.Host\n\tdones := map[resources.Host]chan fail.Error{}\n\tres := map[resources.Host]chan resources.Results{}\n\n\tsettings := w.settings\n\tif w.cluster != nil {\n\t\tsettings.IgnoreSuitability = true\n\t}\n\n\tfor _, h := range hosts {\n\t\td := make(chan fail.Error)\n\t\tr := make(chan resources.Results)\n\t\tdones[h] = d\n\t\tres[h] = r\n\t\tgo func(host resources.Host, res chan resources.Results, done chan fail.Error) {\n\t\t\tr2, innerXErr := w.feature.Check(ctx, host, w.variables, settings)\n\t\t\tif innerXErr != nil {\n\t\t\t\tres <- nil\n\t\t\t\tdone <- innerXErr\n\t\t\t\treturn\n\t\t\t}\n\t\t\tres <- r2\n\t\t\tdone <- nil\n\t\t}(h, r, d)\n\t}\n\tfor h := range dones {\n\t\tr := <-res[h]\n\t\td := <-dones[h]\n\t\tif d != nil {\n\t\t\treturn nil, d\n\t\t}\n\t\tif !r.Successful() {\n\t\t\tconcernedHosts = append(concernedHosts, h)\n\t\t}\n\t}\n\treturn concernedHosts, nil\n}", "func TestNDPNeighborAdvert(t *testing.T) {\n\tb := []byte{\n\t\t160, 0, 0, 0,\n\t\t1, 2, 3, 4,\n\t\t5, 6, 7, 8,\n\t\t9, 10, 11, 12,\n\t\t13, 14, 15, 16,\n\t}\n\n\t// Test getting the Target Address.\n\tna := NDPNeighborAdvert(b)\n\taddr := testutil.MustParse6(\"102:304:506:708:90a:b0c:d0e:f10\")\n\tif got := na.TargetAddress(); got != addr {\n\t\tt.Errorf(\"got TargetAddress = %s, want %s\", got, addr)\n\t}\n\n\t// Test getting the Router Flag.\n\tif got := na.RouterFlag(); !got {\n\t\tt.Errorf(\"got RouterFlag = false, want = true\")\n\t}\n\n\t// Test getting the Solicited Flag.\n\tif got := na.SolicitedFlag(); got {\n\t\tt.Errorf(\"got SolicitedFlag = true, want = false\")\n\t}\n\n\t// Test getting the Override Flag.\n\tif got := na.OverrideFlag(); !got {\n\t\tt.Errorf(\"got OverrideFlag = false, want = true\")\n\t}\n\n\t// Test updating the Target Address.\n\taddr2 := testutil.MustParse6(\"1112:1314:1516:1718:191a:1b1c:1d1e:1f11\")\n\tna.SetTargetAddress(addr2)\n\tif got := na.TargetAddress(); got != addr2 {\n\t\tt.Errorf(\"got TargetAddress = %s, want %s\", got, addr2)\n\t}\n\t// Make sure the address got updated in the backing buffer.\n\tif got := tcpip.AddrFrom16Slice(b[ndpNATargetAddressOffset:][:IPv6AddressSize]); got != addr2 {\n\t\tt.Errorf(\"got targetaddress buffer = %s, want %s\", got, addr2)\n\t}\n\n\t// Test updating the Router Flag.\n\tna.SetRouterFlag(false)\n\tif got := na.RouterFlag(); got {\n\t\tt.Errorf(\"got RouterFlag = true, want = false\")\n\t}\n\n\t// Test updating the Solicited Flag.\n\tna.SetSolicitedFlag(true)\n\tif got := na.SolicitedFlag(); !got {\n\t\tt.Errorf(\"got SolicitedFlag = false, want = true\")\n\t}\n\n\t// Test updating the Override Flag.\n\tna.SetOverrideFlag(false)\n\tif got := na.OverrideFlag(); got {\n\t\tt.Errorf(\"got OverrideFlag = true, want = false\")\n\t}\n\n\t// Make sure flags got updated in the backing buffer.\n\tif got := b[ndpNAFlagsOffset]; got != 64 {\n\t\tt.Errorf(\"got flags byte = %d, want = 64\", got)\n\t}\n}", "func CheckPing(addr string) bool {\n\tgotit := false\n\tfor {\n\t\tconn, err := net.Dial(\"tcp\", addr+\":22\")\n\t\tif err == nil {\n\t\t\t// log.Fatal(err.Error())\n\t\t\tfmt.Print(\"got conncetion\")\n\t\t\tgotit = true\n\t\t\tconn.Close()\n\t\t\tbreak\n\t\t}\n\t\ttime.Sleep(10 * time.Second)\n\t}\n\treturn gotit\n}", "func (q *quitTest) checkNoLeases(ctx context.Context, nodeID int) {\n\t// We need to use SQL against a node that's not the one we're\n\t// shutting down.\n\totherNodeID := 1 + nodeID%q.c.spec.NodeCount\n\n\t// Now we're going to check two things:\n\t//\n\t// 1) *immediately*, that every range in the cluster has a lease\n\t// some other place than nodeID.\n\t//\n\t// Note that for with this condition, it is possible that _some_\n\t// replica of any given range think that the leaseholder is\n\t// nodeID, even though _another_ replica has become leaseholder\n\t// already. That's because followers can lag behind and\n\t// drain does not wait for followers to catch up.\n\t// https://github.com/cockroachdb/cockroach/issues/47100\n\t//\n\t// 2) *eventually* that every other node than nodeID has no range\n\t// replica whose lease refers to nodeID, i.e. the followers\n\t// have all caught up.\n\t// Note: when issue #47100 is fixed, this 2nd condition\n\t// must be true immediately -- drain is then able to wait\n\t// for all followers to learn who the new leaseholder is.\n\n\tif err := testutils.SucceedsSoonError(func() error {\n\t\t// To achieve that, we ask first each range in turn for its range\n\t\t// report.\n\t\t//\n\t\t// For condition (1) we accumulate all the known ranges in\n\t\t// knownRanges, and assign them the node ID of their leaseholder\n\t\t// whenever it is not nodeID. Then at the end we check that every\n\t\t// entry in the map has a non-zero value.\n\t\tknownRanges := map[string]int{}\n\t\t//\n\t\t// For condition (2) we accumulate the unwanted leases in\n\t\t// invLeaseMap, then check at the end that the map is empty.\n\t\tinvLeaseMap := map[int][]string{}\n\t\tfor i := 1; i <= q.c.spec.NodeCount; i++ {\n\t\t\tif i == nodeID {\n\t\t\t\t// Can't request this node. Ignore.\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tq.t.l.Printf(\"retrieving ranges for node %d\\n\", i)\n\t\t\t// Get the report via HTTP.\n\t\t\t// Flag -s is to remove progress on stderr, so that the buffer\n\t\t\t// contains the JSON of the response and nothing else.\n\t\t\tbuf, err := q.c.RunWithBuffer(ctx, q.t.l, q.c.Node(otherNodeID),\n\t\t\t\t\"curl\", \"-s\", fmt.Sprintf(\"http://%s/_status/ranges/%d\",\n\t\t\t\t\tq.c.InternalAdminUIAddr(ctx, q.c.Node(otherNodeID))[0], i))\n\t\t\tif err != nil {\n\t\t\t\tq.Fatal(err)\n\t\t\t}\n\t\t\t// We need just a subset of the response. Make an ad-hoc\n\t\t\t// struct with just the bits of interest.\n\t\t\ttype jsonOutput struct {\n\t\t\t\tRanges []struct {\n\t\t\t\t\tState struct {\n\t\t\t\t\t\tState struct {\n\t\t\t\t\t\t\tDesc struct {\n\t\t\t\t\t\t\t\tRangeID string `json:\"rangeId\"`\n\t\t\t\t\t\t\t} `json:\"desc\"`\n\t\t\t\t\t\t\tLease struct {\n\t\t\t\t\t\t\t\tReplica struct {\n\t\t\t\t\t\t\t\t\tNodeID int `json:\"nodeId\"`\n\t\t\t\t\t\t\t\t} `json:\"replica\"`\n\t\t\t\t\t\t\t} `json:\"lease\"`\n\t\t\t\t\t\t} `json:\"state\"`\n\t\t\t\t\t} `json:\"state\"`\n\t\t\t\t} `json:\"ranges\"`\n\t\t\t}\n\t\t\tvar details jsonOutput\n\t\t\tif err := json.Unmarshal(buf, &details); err != nil {\n\t\t\t\tq.Fatal(err)\n\t\t\t}\n\t\t\t// Some sanity check.\n\t\t\tif len(details.Ranges) == 0 {\n\t\t\t\tq.Fatal(\"expected some ranges from RPC, got none\")\n\t\t\t}\n\t\t\t// Is there any range whose lease refers to nodeID?\n\t\t\tvar invalidLeases []string\n\t\t\tfor _, r := range details.Ranges {\n\t\t\t\t// Some more sanity check.\n\t\t\t\tif r.State.State.Lease.Replica.NodeID == 0 {\n\t\t\t\t\tq.Fatalf(\"expected a valid lease state, got %# v\", pretty.Formatter(r))\n\t\t\t\t}\n\t\t\t\tcurLeaseHolder := knownRanges[r.State.State.Desc.RangeID]\n\t\t\t\tif r.State.State.Lease.Replica.NodeID == nodeID {\n\t\t\t\t\t// As per condition (2) above we want to know which ranges\n\t\t\t\t\t// have an unexpected left over lease on nodeID.\n\t\t\t\t\tinvalidLeases = append(invalidLeases, r.State.State.Desc.RangeID)\n\t\t\t\t} else {\n\t\t\t\t\t// As per condition (1) above we track in knownRanges if there\n\t\t\t\t\t// is at least one known other than nodeID that thinks that\n\t\t\t\t\t// the lease has been transferred.\n\t\t\t\t\tcurLeaseHolder = r.State.State.Lease.Replica.NodeID\n\t\t\t\t}\n\t\t\t\tknownRanges[r.State.State.Desc.RangeID] = curLeaseHolder\n\t\t\t}\n\t\t\tif len(invalidLeases) > 0 {\n\t\t\t\tinvLeaseMap[i] = invalidLeases\n\t\t\t}\n\t\t}\n\t\t// (1): is there a range with no replica outside of nodeID?\n\t\tvar leftOver []string\n\t\tfor r, n := range knownRanges {\n\t\t\tif n == 0 {\n\t\t\t\tleftOver = append(leftOver, r)\n\t\t\t}\n\t\t}\n\t\tif len(leftOver) > 0 {\n\t\t\tq.Fatalf(\"(1) ranges with no lease outside of node %d: %# v\", nodeID, pretty.Formatter(leftOver))\n\t\t}\n\t\t// (2): is there a range with left over replicas on nodeID?\n\t\t//\n\t\t// TODO(knz): Eventually we want this condition to be always\n\t\t// true, i.e. fail the test immediately if found to be false\n\t\t// instead of waiting. (#47100)\n\t\tif len(invLeaseMap) > 0 {\n\t\t\terr := errors.Newf(\n\t\t\t\t\"(2) ranges with remaining leases on node %d, per node: %# v\",\n\t\t\t\tnodeID, pretty.Formatter(invLeaseMap))\n\t\t\tq.t.l.Printf(\"condition failed: %v\\n\", err)\n\t\t\tq.t.l.Printf(\"retrying until SucceedsSoon has enough...\\n\")\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}); err != nil {\n\t\tq.Fatal(err)\n\t}\n\n\tdb := q.c.Conn(ctx, otherNodeID)\n\tdefer db.Close()\n\t// For good measure, also write to the table. This ensures it\n\t// remains available.\n\tif _, err := db.ExecContext(ctx, `UPDATE t SET y = y + 1`); err != nil {\n\t\tq.Fatal(err)\n\t}\n}", "func checkAndResend(s *server, ConnID int) {\n\t// check the waitingForAckMessage to see if there is pending data to resend\n\tfor k, v := range s.clientMap[ConnID].waitingForAckMessage {\n\t\tif v.epochToSent == 0 {\n\t\t\tresendDataToClient(s, *s.clientMap[ConnID].waitingForAckMessage[k].message)\n\t\t\ts.clientMap[ConnID].currentEpochAction = true\n\t\t\t// reset the interval\n\t\t\tif s.params.MaxBackOffInterval == 0 {\n\t\t\t\ts.clientMap[ConnID].waitingForAckMessage[k].epochToSent = 0\n\t\t\t} else {\n\t\t\t\tcur := s.clientMap[ConnID].waitingForAckMessage[k].curInterval\n\t\t\t\tif cur == 0 {\n\t\t\t\t\tcur++\n\t\t\t\t} else {\n\t\t\t\t\tcur = cur * 2\n\t\t\t\t}\n\t\t\t\tif cur > s.params.MaxBackOffInterval {\n\t\t\t\t\tcur = s.params.MaxBackOffInterval\n\t\t\t\t}\n\t\t\t\ts.clientMap[ConnID].waitingForAckMessage[k].curInterval = cur\n\t\t\t}\n\t\t} else {\n\t\t\ts.clientMap[ConnID].waitingForAckMessage[k].epochToSent = s.clientMap[ConnID].waitingForAckMessage[k].epochToSent - 1\n\t\t}\n\t}\n\t// if currentEpochAction is false, then send ack(0) as required\n\tif s.clientMap[ConnID].currentEpochAction == false {\n\t\tif verbose {\n\t\t\tfmt.Printf(\" ACK0 to client:%v\\n\", ConnID)\n\t\t}\n\t\tsendAckToClient(s, ConnID, 0, s.clientMap[ConnID].clientAddressMap)\n\t}\n}", "func waitForConfirmation(algodClient algod.Client, txID string) {\n\tfor {\n\t\tpt, err := algodClient.PendingTransactionInformation(txID)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"waiting for confirmation... (pool error, if any): %s\\n\", err)\n\t\t\tcontinue\n\t\t}\n\t\tif pt.ConfirmedRound > 0 {\n\t\t\tfmt.Printf(\"Transaction \"+pt.TxID+\" confirmed in round %d\\n\", pt.ConfirmedRound)\n\t\t\tbreak\n\t\t}\n\t\tnodeStatus, err := algodClient.Status()\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"error getting algod status: %s\\n\", err)\n\t\t\treturn\n\t\t}\n\t\talgodClient.StatusAfterBlock(nodeStatus.LastRound + 1)\n\t}\n}", "func (ss *SNSServer) DnsReady() (e error) {\n\n\t// if an SOA provider isn't given, we're done\n\tif ss.SOAProvider == \"\" {\n\t\treturn nil\n\t}\n\n\tvar (\n\t\tctx context.Context\n\t\tcancel context.CancelFunc\n\t)\n\n\tif ss.waitForDns > 0 {\n\t\tctx, cancel = context.WithTimeout(context.Background(), ss.waitForDns)\n\t} else {\n\t\tctx, cancel = context.WithCancel(context.Background())\n\t}\n\tdefer cancel()\n\n\t// Creating the dns client for our query\n\tclient := dns.Client{\n\t\tNet: \"tcp\", // tcp to connect to the SOA provider? or udp (default)?\n\t\tDialer: &net.Dialer{\n\t\t\tTimeout: ss.waitForDns,\n\t\t},\n\t}\n\t// the message contains what we are looking for - the SOA record of the host\n\tmsg := dns.Msg{}\n\tmsg.SetQuestion(strings.SplitN(ss.SelfUrl.Host, \":\", 2)[0]+\".\", dns.TypeANY)\n\n\tdefer cancel()\n\n\tvar check = func() <-chan struct{} {\n\t\tvar channel = make(chan struct{})\n\n\t\tgo func(c chan struct{}) {\n\t\t\tvar (\n\t\t\t\terr error\n\t\t\t\tresponse *dns.Msg\n\t\t\t)\n\n\t\t\tfor {\n\t\t\t\t// sending the dns query to the soa provider\n\t\t\t\tresponse, _, err = client.Exchange(&msg, ss.SOAProvider)\n\t\t\t\t// if we found a record, then we are done\n\t\t\t\tif err == nil && response != nil && response.Rcode == dns.RcodeSuccess && len(response.Answer) > 0 {\n\t\t\t\t\tc <- struct{}{}\n\t\t\t\t\tss.metrics.DnsReady.Add(1.0)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\t// otherwise, we keep trying\n\t\t\t\tss.metrics.DnsReadyQueryCount.Add(1.0)\n\t\t\t\tss.logger.Info(\"checking if server's DNS is ready\",\n\t\t\t\t\tzap.String(\"endpoint\", strings.SplitN(ss.SelfUrl.Host, \":\", 2)[0]+\".\"), zap.Error(err), zap.Any(\"response\", response))\n\t\t\t\ttime.Sleep(time.Second)\n\t\t\t}\n\t\t}(channel)\n\n\t\treturn channel\n\t}\n\n\tselect {\n\tcase <-check():\n\tcase <-ctx.Done():\n\t\te = ctx.Err()\n\t}\n\n\treturn\n}", "func expectPeersEventually(t *testing.T, s *libp2p.Service, addrs ...boson.Address) {\n\tt.Helper()\n\n\tvar peers []p2p.Peer\n\tfor i := 0; i < 100; i++ {\n\t\tpeers = s.Peers()\n\t\tif len(peers) == len(addrs) {\n\t\t\tbreak\n\t\t}\n\t\ttime.Sleep(10 * time.Millisecond)\n\t}\n\n\tif len(peers) != len(addrs) {\n\t\tt.Fatalf(\"got peers %v, want %v\", len(peers), len(addrs))\n\t}\n\n\tsort.Slice(addrs, func(i, j int) bool {\n\t\treturn bytes.Compare(addrs[i].Bytes(), addrs[j].Bytes()) == -1\n\t})\n\tsort.Slice(peers, func(i, j int) bool {\n\t\treturn bytes.Compare(peers[i].Address.Bytes(), peers[j].Address.Bytes()) == -1\n\t})\n\n\tfor i, got := range peers {\n\t\twant := addrs[i]\n\t\tif !got.Address.Equal(want) {\n\t\t\tt.Errorf(\"got %v peer %s, want %s\", i, got.Address, want)\n\t\t}\n\t}\n}", "func testQuiescentTunnels(t *testing.T) {\n\tcases := []struct {\n\t\tname string\n\t\tcfg TunnelConfig\n\t\texpectFail bool\n\t}{\n\t\t{\n\t\t\tname: \"reject L2TPv2 IP encap\",\n\t\t\tcfg: TunnelConfig{\n\t\t\t\tLocal: \"127.0.0.1:6000\",\n\t\t\t\tPeer: \"localhost:5000\",\n\t\t\t\tVersion: ProtocolVersion2,\n\t\t\t\tTunnelID: 1,\n\t\t\t\tPeerTunnelID: 1001,\n\t\t\t\tEncap: EncapTypeIP,\n\t\t\t},\n\t\t\t// L2TPv2 doesn't support IP encap\n\t\t\texpectFail: true,\n\t\t},\n\t\t{\n\t\t\tname: \"reject L2TPv2 config with no tunnel IDs\",\n\t\t\tcfg: TunnelConfig{\n\t\t\t\tLocal: \"127.0.0.1:6000\",\n\t\t\t\tPeer: \"localhost:5000\",\n\t\t\t\tVersion: ProtocolVersion2,\n\t\t\t\tEncap: EncapTypeUDP,\n\t\t\t},\n\t\t\t// Must call out tunnel IDs\n\t\t\texpectFail: true,\n\t\t},\n\t\t{\n\t\t\tname: \"reject L2TPv3 config with no tunnel IDs\",\n\t\t\tcfg: TunnelConfig{\n\t\t\t\tLocal: \"127.0.0.1:6000\",\n\t\t\t\tPeer: \"localhost:5000\",\n\t\t\t\tVersion: ProtocolVersion3,\n\t\t\t\tEncap: EncapTypeUDP,\n\t\t\t},\n\t\t\t// Must call out control connection IDs\n\t\t\texpectFail: true,\n\t\t},\n\t\t{\n\t\t\tname: \"L2TPv2 UDP AF_INET\",\n\t\t\tcfg: TunnelConfig{\n\t\t\t\tLocal: \"127.0.0.1:6000\",\n\t\t\t\tPeer: \"localhost:5000\",\n\t\t\t\tVersion: ProtocolVersion2,\n\t\t\t\tTunnelID: 1,\n\t\t\t\tPeerTunnelID: 1001,\n\t\t\t\tEncap: EncapTypeUDP,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"L2TPv2 UDP AF_INET6\",\n\t\t\tcfg: TunnelConfig{\n\t\t\t\tLocal: \"[::1]:6000\",\n\t\t\t\tPeer: \"[::1]:5000\",\n\t\t\t\tVersion: ProtocolVersion2,\n\t\t\t\tTunnelID: 2,\n\t\t\t\tPeerTunnelID: 1002,\n\t\t\t\tEncap: EncapTypeUDP,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"L2TPv3 UDP AF_INET\",\n\t\t\tcfg: TunnelConfig{\n\t\t\t\tLocal: \"127.0.0.1:6000\",\n\t\t\t\tPeer: \"localhost:5000\",\n\t\t\t\tVersion: ProtocolVersion3,\n\t\t\t\tTunnelID: 3,\n\t\t\t\tPeerTunnelID: 1003,\n\t\t\t\tEncap: EncapTypeUDP,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"L2TPv3 UDP AF_INET6\",\n\t\t\tcfg: TunnelConfig{\n\t\t\t\tLocal: \"[::1]:6000\",\n\t\t\t\tPeer: \"[::1]:5000\",\n\t\t\t\tVersion: ProtocolVersion3,\n\t\t\t\tTunnelID: 4,\n\t\t\t\tPeerTunnelID: 1004,\n\t\t\t\tEncap: EncapTypeUDP,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"L2TPv3 IP AF_INET\",\n\t\t\tcfg: TunnelConfig{\n\t\t\t\tLocal: \"127.0.0.1:6000\",\n\t\t\t\tPeer: \"localhost:5000\",\n\t\t\t\tVersion: ProtocolVersion3,\n\t\t\t\tTunnelID: 5,\n\t\t\t\tPeerTunnelID: 1005,\n\t\t\t\tEncap: EncapTypeIP,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"L2TPv3 IP AF_INET6\",\n\t\t\tcfg: TunnelConfig{\n\t\t\t\tLocal: \"[::1]:6000\",\n\t\t\t\tPeer: \"[::1]:5000\",\n\t\t\t\tVersion: ProtocolVersion3,\n\t\t\t\tTunnelID: 6,\n\t\t\t\tPeerTunnelID: 1006,\n\t\t\t\tEncap: EncapTypeIP,\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, c := range cases {\n\t\tt.Run(c.name, func(t *testing.T) {\n\t\t\tctx, err := NewContext(\n\t\t\t\tLinuxNetlinkDataPlane,\n\t\t\t\tlevel.NewFilter(log.NewLogfmtLogger(os.Stderr),\n\t\t\t\t\tlevel.AllowDebug(), level.AllowInfo()))\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"NewContext(): %v\", err)\n\t\t\t}\n\t\t\tdefer ctx.Close()\n\n\t\t\t_, err = ctx.NewQuiescentTunnel(\"t1\", &c.cfg)\n\t\t\tif c.expectFail {\n\t\t\t\tif err == nil {\n\t\t\t\t\tt.Fatalf(\"Expected NewQuiescentTunnel(%v) to fail\", c.cfg)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatalf(\"NewQuiescentTunnel(%v): %v\", c.cfg, err)\n\t\t\t\t}\n\n\t\t\t\terr = checkTunnel(&c.cfg)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Errorf(\"NewQuiescentTunnel(%v): failed to validate: %v\", c.cfg, err)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}", "func checkIPConnection(host string, port string) bool {\n\tconn, err := net.Dial(\"tcp\", host+\":\"+port)\n\tif err != nil {\n\t\treturn false\n\t}\n\tdefer conn.Close()\n\n\treturn true\n}", "func isEsxRechable(esxIP string) bool {\n\tCommand := sshToLandingMachine + \"'ping -c 1 \" + esxIP + \" > /dev/null && echo true || echo false'\"\n\toutput, _ := exec.Command(\"/bin/sh\", \"-c\", Command).Output()\n\tcommandOutput := (string(output))\n\tif strings.Contains(string(commandOutput), \"false\") {\n\t\tfmt.Printf(getTime()+\" ESX-%s is not reachable\\n\", esxIP)\n\t\treturn false\n\t}\n\tfmt.Printf(getTime()+\" ESX-%s is reachable\\n\", esxIP)\n\treturn true\n\n}", "func (h *StandHystrix) doCheck() {\n\tif h.checkAliveFunc == nil || h.checkHystrixFunc == nil {\n\t\treturn\n\t}\n\tif h.IsHystrix() {\n\t\tisAlive := h.checkAliveFunc()\n\t\tif isAlive {\n\t\t\th.TriggerAlive()\n\t\t\th.GetCounter().Clear()\n\t\t\ttime.AfterFunc(time.Duration(h.checkHystrixInterval)*time.Second, h.doCheck)\n\t\t} else {\n\t\t\ttime.AfterFunc(time.Duration(h.checkAliveInterval)*time.Second, h.doCheck)\n\t\t}\n\t} else {\n\t\tisHystrix := h.checkHystrixFunc()\n\t\tif isHystrix {\n\t\t\th.TriggerHystrix()\n\t\t\ttime.AfterFunc(time.Duration(h.checkAliveInterval)*time.Second, h.doCheck)\n\t\t} else {\n\t\t\ttime.AfterFunc(time.Duration(h.checkHystrixInterval)*time.Second, h.doCheck)\n\t\t}\n\t}\n}", "func validateMultiIPForDonorIntf(d *db.DB, ifName *string) bool {\n\n\ttables := [2]string{\"INTERFACE\", \"PORTCHANNEL_INTERFACE\"}\n\tdonor_intf := false\n\tlog.Info(\"validateMultiIPForDonorIntf : intfName\", ifName)\n\tfor _, table := range tables {\n\t\tintfTble, err := d.GetTable(&db.TableSpec{Name:table})\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tintfKeys, err := intfTble.GetKeys()\n\t\tfor _, intfName := range intfKeys {\n\t\t\tintfEntry, err := d.GetEntry(&db.TableSpec{Name: table}, intfName)\n\t\t\tif(err != nil) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tunnumbered, ok := intfEntry.Field[\"unnumbered\"]\n\t\t\tif ok {\n\t\t\t\tif unnumbered == *ifName {\n\t\t\t\t\tdonor_intf = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tif donor_intf {\n\t\tloIntfTble, err := d.GetTable(&db.TableSpec{Name:\"LOOPBACK_INTERFACE\"})\n\t\tif err != nil {\n\t\t\tlog.Info(\"Table read error : return false\")\n\t\t\treturn false\n\t\t}\n\n\t\tloIntfKeys, err := loIntfTble.GetKeys()\n\t\tfor _, loIntfName := range loIntfKeys {\n\t\t\tif len(loIntfName.Comp) > 1 && strings.Contains(loIntfName.Comp[0], *ifName){\n\t\t\t\tif strings.Contains(loIntfName.Comp[1], \".\") {\n\t\t\t\t\tlog.Info(\"Multi IP exists\")\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\t\n\treturn false\n}", "func RespondingTCPChecks() mapval.Validator {\n\treturn mapval.MustCompile(mapval.Map{\"tcp.rtt.connect.us\": mapval.IsDuration})\n}", "func TestUnreachableMarks(t *testing.T) {\n\tseeds := []string {\"127.0.0.1:6000\",}\n\tmanager1 := CreatePeerManager(6000, 6001, nil, FullMode)\n\tmanager2 := CreatePeerManager(7000, 7001, seeds, FullMode)\n\tmanager3 := CreatePeerManager(8000, 8001, seeds, FullMode)\n\n\t// Change update period to lengthen the time between marking a peer unreachable \n\t// and the next status update\n\tmanager1.StatusUpdatePeriod=500*time.Millisecond\n\tmanager2.StatusUpdatePeriod=500*time.Millisecond\n\tmanager3.StatusUpdatePeriod=500*time.Millisecond\n\n\tmarkPeer := func(t *testing.T) {\n\t\tmanager1.MarkPeerUnreachable(\"127.0.0.1:8001\")\n\t\tmanager1.MarkPeerUnreachable(\"127.0.0.1:8001\")\n\t\tmanager1.MarkPeerUnreachable(\"127.0.0.1:8001\")\n\t\tavailable := GetPeerManagerAvailablePeers(manager1)\n\t\texpected := []string {\"127.0.0.1:6001\", \"127.0.0.1:7001\"}\n\t\tif !MapOnlyContains(available, expected) {\n\t\t\tt.Errorf(\"Peer 127.0.0.1:8001 wasn't marked unreachable %v\\n\", available)\n\t\t}\n\t}\n\n\t// After some time has passed all the peers should be available again\n\tallPeers := []string {\"127.0.0.1:6001\", \"127.0.0.1:7001\", \"127.0.0.1:8001\"}\n\tPeerManagerPropagationHelper(t, manager1, manager2, manager3,\n\t\tallPeers, allPeers, allPeers, markPeer, 3200*time.Millisecond, 8*time.Second)\n}", "func TestShortQuery(t *testing.T) {\n\tvar qerr *queryError\n\tdoh, _ := NewTransport(testURL, ips, nil, nil, nil)\n\t_, err := doh.Query([]byte{})\n\tif err == nil {\n\t\tt.Error(\"Empty query should fail\")\n\t} else if !errors.As(err, &qerr) {\n\t\tt.Errorf(\"Wrong error type: %v\", err)\n\t} else if qerr.status != BadQuery {\n\t\tt.Errorf(\"Wrong error status: %d\", qerr.status)\n\t}\n\n\t_, err = doh.Query([]byte{1})\n\tif err == nil {\n\t\tt.Error(\"One byte query should fail\")\n\t} else if !errors.As(err, &qerr) {\n\t\tt.Errorf(\"Wrong error type: %v\", err)\n\t} else if qerr.status != BadQuery {\n\t\tt.Errorf(\"Wrong error status: %d\", qerr.status)\n\t}\n}", "func (s *Server) addrAvailable(target net.IP) bool {\n\n\tif s.ICMPTimeout == 0 {\n\t\treturn true\n\t}\n\n\tpinger, err := ping.NewPinger(target.String())\n\tif err != nil {\n\t\tlog.Error(\"ping.NewPinger(): %v\", err)\n\t\treturn true\n\t}\n\n\tpinger.SetPrivileged(true)\n\tpinger.Timeout = time.Duration(s.ICMPTimeout) * time.Millisecond\n\tpinger.Count = 1\n\treply := false\n\tpinger.OnRecv = func(pkt *ping.Packet) {\n\t\t// log.Tracef(\"Received ICMP Reply from %v\", target)\n\t\treply = true\n\t}\n\tlog.Tracef(\"Sending ICMP Echo to %v\", target)\n\tpinger.Run()\n\n\tif reply {\n\t\tlog.Info(\"DHCP: IP conflict: %v is already used by another device\", target)\n\t\treturn false\n\t}\n\n\tlog.Tracef(\"ICMP procedure is complete: %v\", target)\n\treturn true\n}", "func (suite *TestAgentSuite) TestListAgentsWithPingTask(c *C) {\n\ttestCases := []*struct {\n\t\tquery *nqmModel.AgentQuery\n\t\tpageSize int32\n\t\tpagePosition int32\n\t\texpectedCountOfCurrentPage int\n\t\texpectedCountOfAll int32\n\t}{\n\t\t{ // All data\n\t\t\t&nqmModel.AgentQuery{IspId: -2, HasStatusParam: false},\n\t\t\t10, 1, 3, 3,\n\t\t},\n\t\t{ // All data(not-match ping task)\n\t\t\t&nqmModel.AgentQuery{IspId: -2, HasStatusParam: false},\n\t\t\t10, 1, 3, 3,\n\t\t},\n\t\t{ // 2nd page\n\t\t\t&nqmModel.AgentQuery{IspId: -2, HasStatusParam: false},\n\t\t\t2, 2, 1, 3,\n\t\t},\n\t\t{ // Match nothing for further page\n\t\t\t&nqmModel.AgentQuery{IspId: -2, HasStatusParam: false},\n\t\t\t10, 10, 0, 3,\n\t\t},\n\t\t{ // Match 1 row by all of the conditions\n\t\t\t&nqmModel.AgentQuery{\n\t\t\t\tName: \"ag-name-1\",\n\t\t\t\tConnectionId: \"ag-list-1\",\n\t\t\t\tHostname: \"hn-list-1\",\n\t\t\t\tIspId: 3,\n\t\t\t\tIpAddress: \"123.52\",\n\t\t\t\tHasStatusParam: true,\n\t\t\t\tStatus: true,\n\t\t\t},\n\t\t\t10, 1, 1, 1,\n\t\t},\n\t\t{ // Match 1 row(by special IP address)\n\t\t\t&nqmModel.AgentQuery{\n\t\t\t\tIspId: -2,\n\t\t\t\tHasStatusParam: false,\n\t\t\t\tIpAddress: \"12.37\",\n\t\t\t}, 10, 1, 1, 1,\n\t\t},\n\t\t{ // Match nothing\n\t\t\t&nqmModel.AgentQuery{\n\t\t\t\tIspId: -2,\n\t\t\t\tHasStatusParam: false,\n\t\t\t\tConnectionId: \"ag-list-1\",\n\t\t\t\tHostname: \"hn-list-2\",\n\t\t\t}, 10, 1, 0, 0,\n\t\t},\n\t}\n\ttestCasesForPingTask := []*struct {\n\t\tpingTaskId int32\n\t\tappliedInt string\n\t\texpectedApplying bool\n\t}{\n\t\t{38201, \"!N!\", true},\n\t\t{38202, \"!N!\", false},\n\t\t{38201, \"1\", true},\n\t\t{38202, \"0\", false},\n\t}\n\n\tfor _, testCase := range testCases {\n\t\tocheck.LogTestCase(c, testCase)\n\t\tfor j, testCaseForPingTask := range testCasesForPingTask {\n\t\t\tcommentPingTask := ocheck.TestCaseComment(j)\n\t\t\tocheck.LogTestCase(c, testCaseForPingTask)\n\n\t\t\tpaging := commonModel.Paging{\n\t\t\t\tSize: testCase.pageSize,\n\t\t\t\tPosition: testCase.pagePosition,\n\t\t\t\tOrderBy: []*commonModel.OrderByEntity{\n\t\t\t\t\t{\"id\", commonModel.Descending},\n\t\t\t\t\t{\"applied\", commonModel.Descending},\n\t\t\t\t\t{\"status\", commonModel.Ascending},\n\t\t\t\t\t{\"name\", commonModel.Ascending},\n\t\t\t\t\t{\"connection_id\", commonModel.Ascending},\n\t\t\t\t\t{\"comment\", commonModel.Ascending},\n\t\t\t\t\t{\"province\", commonModel.Ascending},\n\t\t\t\t\t{\"city\", commonModel.Ascending},\n\t\t\t\t\t{\"last_heartbeat_time\", commonModel.Ascending},\n\t\t\t\t\t{\"name_tag\", commonModel.Ascending},\n\t\t\t\t\t{\"group_tag\", commonModel.Descending},\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tfinalQuery := &nqmModel.AgentQueryWithPingTask{\n\t\t\t\tAgentQuery: *testCase.query,\n\t\t\t\tPingTaskId: testCaseForPingTask.pingTaskId,\n\t\t\t\tHasApplied: testCaseForPingTask.appliedInt,\n\t\t\t\tApplied: testCaseForPingTask.appliedInt != \"0\",\n\t\t\t}\n\t\t\ttestedResult, newPaging := ListAgentsWithPingTask(\n\t\t\t\tfinalQuery, paging,\n\t\t\t)\n\n\t\t\tc.Logf(\"[List] Query: %#v. Number of agents: %d\", testCase.query, len(testedResult))\n\n\t\t\tfor _, agent := range testedResult {\n\t\t\t\tc.Logf(\"\\t[List] Matched Agent: %#v.\", agent)\n\t\t\t\tc.Assert(agent.ApplyingPingTask, Equals, testCaseForPingTask.expectedApplying, commentPingTask)\n\t\t\t}\n\t\t\tc.Assert(testedResult, HasLen, testCase.expectedCountOfCurrentPage)\n\t\t\tc.Assert(newPaging.TotalCount, Equals, testCase.expectedCountOfAll)\n\t\t}\n\t}\n}", "func TestContractPresenceLeak(t *testing.T) {\n\tif testing.Short() {\n\t\tt.SkipNow()\n\t}\n\tt.Parallel()\n\t// create testing trio\n\th, c, _, err := newTestingTrio(t.Name())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer h.Close()\n\tdefer c.Close()\n\n\t// get the host's entry from the db\n\thostEntry, ok := c.hdb.Host(h.PublicKey())\n\tif !ok {\n\t\tt.Fatal(\"no entry for host in db\")\n\t}\n\n\t// set an allowance but don't use SetAllowance to avoid automatic contract\n\t// formation.\n\tc.mu.Lock()\n\tc.allowance = modules.DefaultAllowance\n\tc.mu.Unlock()\n\n\t// form a contract with the host\n\t_, contract, err := c.managedNewContract(hostEntry, types.SiacoinPrecision.Mul64(10), c.blockHeight+100)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Connect with bad challenge response. Try correct\n\t// and incorrect contract IDs. Compare errors.\n\twrongID := contract.ID\n\twrongID[0] ^= 0x01\n\tfcids := []types.FileContractID{contract.ID, wrongID}\n\tvar errors []error\n\n\tfor _, fcid := range fcids {\n\t\tvar challenge crypto.Hash\n\t\tvar signature crypto.Signature\n\t\tconn, err := net.Dial(\"tcp\", string(hostEntry.NetAddress))\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Couldn't dial tpc connection with host @ %v: %v.\", string(hostEntry.NetAddress), err)\n\t\t}\n\t\tif err := encoding.WriteObject(conn, modules.RPCDownload); err != nil {\n\t\t\tt.Fatalf(\"Couldn't initiate RPC: %v.\", err)\n\t\t}\n\t\tif err := encoding.WriteObject(conn, fcid); err != nil {\n\t\t\tt.Fatalf(\"Couldn't send fcid: %v.\", err)\n\t\t}\n\t\tif err := encoding.ReadObject(conn, &challenge, 32); err != nil {\n\t\t\tt.Fatalf(\"Couldn't read challenge: %v.\", err)\n\t\t}\n\t\tif err := encoding.WriteObject(conn, signature); err != nil {\n\t\t\tt.Fatalf(\"Couldn't send signature: %v.\", err)\n\t\t}\n\t\terr = modules.ReadNegotiationAcceptance(conn)\n\t\tif err == nil {\n\t\t\tt.Fatal(\"Expected an error, got success.\")\n\t\t}\n\t\terrors = append(errors, err)\n\t}\n\tif errors[0].Error() != errors[1].Error() {\n\t\tt.Fatalf(\"Expected to get equal errors, got %q and %q.\", errors[0], errors[1])\n\t}\n}", "func Test_Onu_DiscoverIndication_retry_on_discovery(t *testing.T) {\n\tonu := createTestOnu()\n\tstream := &mockStream{\n\t\tCallCount: 0,\n\t\tCalls: make(map[int]*openolt.Indication),\n\t\tfail: false,\n\t\tchannel: make(chan int, 10),\n\t}\n\tctx, cancel := context.WithCancel(context.TODO())\n\tgo onu.ProcessOnuMessages(ctx, stream, nil)\n\tonu.InternalState.SetState(OnuStateInitialized)\n\t_ = onu.InternalState.Event(OnuTxDiscover)\n\n\tselect {\n\tdefault:\n\tcase <-time.After(400 * time.Millisecond):\n\t\tassert.Equal(t, stream.CallCount, 4)\n\t}\n\tcancel()\n}", "func resolveDOH(hostname string) []net.IP {\n\n\t// TODO: parallelise the below so the v6 lookup does not wait for the v4 lookup to return and process\n\tip4 := resolveDOHipv4(hostname)\n\tip6 := resolveDOHipv6(hostname)\n\n\tips := append(ip4, ip6...)\n\n\treturn ips\n\n}", "func addressQuery(q *query.Address, sender connection.Info, oldToken token.Token) {\n\tlog.Warn(\"Address Queries not yet supported\")\n\t//FIXME CFE make it compatible with the new caches\n\t/*log.Debug(\"Start processing address query\", \"addressQuery\", q)\n\tassertion, ok := getAddressCache(q.SubjectAddr, q.Context).Get(q.SubjectAddr, q.Types)\n\t//TODO CFE add heuristic which assertion to return\n\tif ok {\n\t\tif assertion != nil {\n\t\t\tsendSection(assertion, oldToken, sender)\n\t\t\tlog.Debug(\"Finished handling query by sending address assertion from cache\", \"q\", q)\n\t\t\treturn\n\t\t}\n\t}\n\tlog.Debug(\"No entry found in address cache matching the query\")\n\n\tif q.ContainsOption(query.QOCachedAnswersOnly) {\n\t\tlog.Debug(\"Send a notification message back to the sender due to query option: 'Cached Answers only'\")\n\t\tsendNotificationMsg(oldToken, sender, section.NTNoAssertionAvail, \"\")\n\t\tlog.Debug(\"Finished handling query (unsuccessful) \", \"query\", q)\n\t\treturn\n\t}\n\n\tdelegate := getRootAddr()\n\tif delegate.Equal(serverConnInfo) {\n\t\tsendNotificationMsg(oldToken, sender, section.NTNoAssertionAvail, \"\")\n\t\tlog.Error(\"Stop processing query. I am authoritative and have no answer in cache\")\n\t\treturn\n\t}\n\t//we have a valid delegation\n\ttok := oldToken\n\tif !q.ContainsOption(query.QOTokenTracing) {\n\t\ttok = token.New()\n\t}\n\tnewQuery := *q\n\t//Upper bound for forwarded query expiration time\n\tif newQuery.Expiration > time.Now().Add(Config.AddressQueryValidity).Unix() {\n\t\tnewQuery.Expiration = time.Now().Add(Config.AddressQueryValidity).Unix()\n\t}\n\t//FIXME CFE allow multiple connection\n\t//FIXME CFE only send query if not already in cache.\n\tpendingQueries.Add(msgSectionSender{Section: q, Sender: sender, Token: oldToken})\n\tlog.Debug(\"Added query into to pending query cache\", \"query\", q)\n\tsendSection(&newQuery, tok, delegate)*/\n}", "func TestConnectRejectsInvalidAddrs(t *testing.T) {\n\tif testing.Short() {\n\t\tt.SkipNow()\n\t}\n\tt.Parallel()\n\tg := newNamedTestingGateway(t, \"1\")\n\tdefer g.Close()\n\n\tg2 := newNamedTestingGateway(t, \"2\")\n\tdefer g2.Close()\n\n\t_, g2Port, err := net.SplitHostPort(string(g2.Address()))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ttests := []struct {\n\t\taddr modules.NetAddress\n\t\twantErr bool\n\t\tmsg string\n\t}{\n\t\t{\n\t\t\taddr: \"127.0.0.1:123\",\n\t\t\twantErr: true,\n\t\t\tmsg: \"Connect should reject unreachable addresses\",\n\t\t},\n\t\t{\n\t\t\taddr: \"111.111.111.111:0\",\n\t\t\twantErr: true,\n\t\t\tmsg: \"Connect should reject invalid NetAddresses\",\n\t\t},\n\t\t{\n\t\t\taddr: modules.NetAddress(net.JoinHostPort(\"localhost\", g2Port)),\n\t\t\twantErr: true,\n\t\t\tmsg: \"Connect should reject non-IP addresses\",\n\t\t},\n\t\t{\n\t\t\taddr: g2.Address(),\n\t\t\tmsg: \"Connect failed to connect to another gateway\",\n\t\t},\n\t\t{\n\t\t\taddr: g2.Address(),\n\t\t\twantErr: true,\n\t\t\tmsg: \"Connect should reject an address it's already connected to\",\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\terr := g.Connect(tt.addr)\n\t\tif tt.wantErr != (err != nil) {\n\t\t\tt.Errorf(\"%v, wantErr: %v, err: %v\", tt.msg, tt.wantErr, err)\n\t\t}\n\t}\n}", "func main() {\n\tsuccess := 0\n\tban := NewBan()\n\tfor i := 0; i < 1000; i++ {\n\t\tfor j := 0; j < 100; j++ {\n\t\t\tgo func() {\n\t\t\t\tip := fmt.Sprintf(\"192.168.1.%d\", j)\n\t\t\t\tif !ban.visit(ip) {\n\t\t\t\t\tsuccess++\n\t\t\t\t}\n\t\t\t}()\n\t\t}\n\n\t}\n\tfmt.Println(\"success:\", success)\n}", "func performUpgradeVerification(c client.Client, cfg *osdUpgradeConfig, metricsClient metrics.Metrics, logger logr.Logger) (bool, error) {\n\n\tnamespacePrefixesToCheck := cfg.Verification.NamespacePrefixesToCheck\n\tnamespaceToIgnore := cfg.Verification.IgnoredNamespaces\n\n\t// Verify all ReplicaSets in the default, kube* and openshfit* namespaces are satisfied\n\treplicaSetList := &appsv1.ReplicaSetList{}\n\terr := c.List(context.TODO(), replicaSetList)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treadyRs := 0\n\ttotalRs := 0\n\tfor _, replicaSet := range replicaSetList.Items {\n\t\tfor _, namespacePrefix := range namespacePrefixesToCheck {\n\t\t\tfor _, ingoredNS := range namespaceToIgnore {\n\t\t\t\tif strings.HasPrefix(replicaSet.Namespace, namespacePrefix) && replicaSet.Namespace != ingoredNS {\n\t\t\t\t\ttotalRs = totalRs + 1\n\t\t\t\t\tif replicaSet.Status.ReadyReplicas == replicaSet.Status.Replicas {\n\t\t\t\t\t\treadyRs = readyRs + 1\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tif totalRs != readyRs {\n\t\tlogger.Info(fmt.Sprintf(\"not all replicaset are ready:expected number :%v , ready number %v\", totalRs, readyRs))\n\t\treturn false, nil\n\t}\n\n\t// Verify all Daemonsets in the default, kube* and openshift* namespaces are satisfied\n\tdaemonSetList := &appsv1.DaemonSetList{}\n\terr = c.List(context.TODO(), daemonSetList)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treadyDS := 0\n\ttotalDS := 0\n\tfor _, daemonSet := range daemonSetList.Items {\n\t\tfor _, namespacePrefix := range namespacePrefixesToCheck {\n\t\t\tfor _, ignoredNS := range namespaceToIgnore {\n\t\t\t\tif strings.HasPrefix(daemonSet.Namespace, namespacePrefix) && daemonSet.Namespace != ignoredNS {\n\t\t\t\t\ttotalDS = totalDS + 1\n\t\t\t\t\tif daemonSet.Status.DesiredNumberScheduled == daemonSet.Status.NumberReady {\n\t\t\t\t\t\treadyDS = readyDS + 1\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tif totalDS != readyDS {\n\t\tlogger.Info(fmt.Sprintf(\"not all daemonset are ready:expected number :%v , ready number %v\", totalDS, readyDS))\n\t\treturn false, nil\n\t}\n\n\t// If daemonsets and replicasets are satisfied, any active TargetDown alerts will eventually go away.\n\t// Wait for that to occur before declaring the verification complete.\n\tnamespacePrefixesAsRegex := make([]string, 0)\n\tnamespaceIgnoreAlert := make([]string, 0)\n\tfor _, namespacePrefix := range namespacePrefixesToCheck {\n\t\tnamespacePrefixesAsRegex = append(namespacePrefixesAsRegex, fmt.Sprintf(\"^%s-.*\", namespacePrefix))\n\t}\n\tnamespaceIgnoreAlert = append(namespaceIgnoreAlert, namespaceToIgnore...)\n\tisTargetDownFiring, err := metricsClient.IsAlertFiring(\"TargetDown\", namespacePrefixesAsRegex, namespaceIgnoreAlert)\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"can't query for alerts: %v\", err)\n\t}\n\tif isTargetDownFiring {\n\t\tlogger.Info(fmt.Sprintf(\"TargetDown alerts are still firing in namespaces %v\", namespacePrefixesAsRegex))\n\t\treturn false, nil\n\t}\n\n\treturn true, nil\n}", "func test_confirmTransactionStatus(t *testing.T) {\n\tt.Skip(nil)\n\t//services.RunOnTestNet()\n\ttxHash := lastTransaction.Hash()\n\tif len(txHash) <= 0 {\n\t\t// set an existing tx hash\n\t\ttxHash = lastTransactionHash\n\t}\n\t// check pending\n\tisPending := eth_gateway.EthWrapper.PendingConfirmation(txHash)\n\tif isPending {\n\t\t// check confirmation\n\t\ttxStatus := eth_gateway.EthWrapper.WaitForConfirmation(txHash, 3)\n\t\tif txStatus == 0 {\n\t\t\tt.Logf(\"transaction failure\")\n\t\t} else if txStatus == 1 {\n\t\t\tt.Logf(\"confirmation completed\")\n\n\t\t\tbal := eth_gateway.EthWrapper.CheckETHBalance(ethAddress02)\n\t\t\tt.Logf(\"balance updated : %v\", bal)\n\t\t}\n\t}\n}", "func TestICMP(t *testing.T) {\n\tif os.Getuid() != 0 {\n\t\tt.Logf(\"test disabled; must be root\")\n\t\treturn\n\t}\n\n\tvar (\n\t\tladdr *IPAddr\n\t\terr os.Error\n\t)\n\tif *srchost != \"\" {\n\t\tladdr, err = ResolveIPAddr(\"ip4\", *srchost)\n\t\tif err != nil {\n\t\t\tt.Fatalf(`net.ResolveIPAddr(\"ip4\", %v\") = %v, %v`, *srchost, laddr, err)\n\t\t}\n\t}\n\n\traddr, err := ResolveIPAddr(\"ip4\", *dsthost)\n\tif err != nil {\n\t\tt.Fatalf(`net.ResolveIPAddr(\"ip4\", %v\") = %v, %v`, *dsthost, raddr, err)\n\t}\n\n\tc, err := ListenIP(\"ip4:icmp\", laddr)\n\tif err != nil {\n\t\tt.Fatalf(`net.ListenIP(\"ip4:icmp\", %v) = %v, %v`, *srchost, c, err)\n\t}\n\n\tsendid := os.Getpid() & 0xffff\n\tconst sendseq = 61455\n\tconst pingpktlen = 128\n\tsendpkt := makePingRequest(sendid, sendseq, pingpktlen, []byte(\"Go Go Gadget Ping!!!\"))\n\n\tn, err := c.WriteToIP(sendpkt, raddr)\n\tif err != nil || n != pingpktlen {\n\t\tt.Fatalf(`net.WriteToIP(..., %v) = %v, %v`, raddr, n, err)\n\t}\n\n\tc.SetTimeout(100e6)\n\tresp := make([]byte, 1024)\n\tfor {\n\t\tn, from, err := c.ReadFrom(resp)\n\t\tif err != nil {\n\t\t\tt.Fatalf(`ReadFrom(...) = %v, %v, %v`, n, from, err)\n\t\t}\n\t\tif resp[0] != ICMP_ECHO_REPLY {\n\t\t\tcontinue\n\t\t}\n\t\trcvid, rcvseq := parsePingReply(resp)\n\t\tif rcvid != sendid || rcvseq != sendseq {\n\t\t\tt.Fatalf(`Ping reply saw id,seq=0x%x,0x%x (expected 0x%x, 0x%x)`, rcvid, rcvseq, sendid, sendseq)\n\t\t}\n\t\treturn\n\t}\n\tt.Fatalf(\"saw no ping return\")\n}", "func (s) TestResourceResolverOneDNSResource(t *testing.T) {\n\tfor _, test := range []struct {\n\t\tname string\n\t\ttarget string\n\t\twantTarget resolver.Target\n\t\taddrs []resolver.Address\n\t\twant []priorityConfig\n\t}{\n\t\t{\n\t\t\tname: \"watch DNS\",\n\t\t\ttarget: testDNSTarget,\n\t\t\twantTarget: resolver.Target{Scheme: \"dns\", Endpoint: testDNSTarget},\n\t\t\taddrs: []resolver.Address{{Addr: \"1.1.1.1\"}, {Addr: \"2.2.2.2\"}},\n\t\t\twant: []priorityConfig{{\n\t\t\t\tmechanism: DiscoveryMechanism{\n\t\t\t\t\tType: DiscoveryMechanismTypeLogicalDNS,\n\t\t\t\t\tDNSHostname: testDNSTarget,\n\t\t\t\t},\n\t\t\t\taddresses: []string{\"1.1.1.1\", \"2.2.2.2\"},\n\t\t\t\tchildNameGen: newNameGenerator(0),\n\t\t\t}},\n\t\t},\n\t} {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\tdnsTargetCh, dnsCloseCh, _, dnsR, cleanup := setupDNS()\n\t\t\tdefer cleanup()\n\t\t\tfakeClient := fakeclient.NewClient()\n\t\t\trr := newResourceResolver(&clusterResolverBalancer{xdsClient: fakeClient})\n\t\t\trr.updateMechanisms([]DiscoveryMechanism{{\n\t\t\t\tType: DiscoveryMechanismTypeLogicalDNS,\n\t\t\t\tDNSHostname: test.target,\n\t\t\t}})\n\t\t\tctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout)\n\t\t\tdefer ctxCancel()\n\t\t\tselect {\n\t\t\tcase target := <-dnsTargetCh:\n\t\t\t\tif diff := cmp.Diff(target, test.wantTarget); diff != \"\" {\n\t\t\t\t\tt.Fatalf(\"got unexpected DNS target to watch, diff (-got, +want): %v\", diff)\n\t\t\t\t}\n\t\t\tcase <-ctx.Done():\n\t\t\t\tt.Fatal(\"Timed out waiting for building DNS resolver\")\n\t\t\t}\n\n\t\t\t// Invoke callback, should get an update.\n\t\t\tdnsR.UpdateState(resolver.State{Addresses: test.addrs})\n\t\t\tselect {\n\t\t\tcase u := <-rr.updateChannel:\n\t\t\t\tif diff := cmp.Diff(u.priorities, test.want, cmp.AllowUnexported(priorityConfig{}, nameGenerator{})); diff != \"\" {\n\t\t\t\t\tt.Fatalf(\"got unexpected resource update, diff (-got, +want): %v\", diff)\n\t\t\t\t}\n\t\t\tcase <-ctx.Done():\n\t\t\t\tt.Fatal(\"Timed out waiting for update from update channel.\")\n\t\t\t}\n\t\t\t// Close the resource resolver. Should close the underlying resolver.\n\t\t\trr.stop()\n\t\t\tselect {\n\t\t\tcase <-dnsCloseCh:\n\t\t\tcase <-ctx.Done():\n\t\t\t\tt.Fatal(\"Timed out waiting for closing DNS resolver\")\n\t\t\t}\n\t\t})\n\t}\n}", "func TestQueryServerAddr(t *testing.T) {\n\tr1, _ := http.NewRequest(\"GET\",\n\t\tformatURL(\"query?hosts=127.0.0.1:65534\"), nil)\n\tw1 := newRecorder()\n\tqueryServerAddrs(w1, r1)\n\t// 200 - default server list\n\tif w1.Code != http.StatusOK {\n\t\tt.Errorf(\"Expected status code %v for queryServerAddr handler; got: %v\",\n\t\t\thttp.StatusOK, w1.Code)\n\t}\n\tif len(w1.Body.Bytes()) == 0 {\n\t\tt.Errorf(\"queryServerAddr handler body should not be empty\")\n\t}\n\t// body 1 json test\n\tm1 := &models.APIServerList{}\n\t_, modelMatches := w1.ExpectJSON(m1, m1)\n\tif !modelMatches {\n\t\tt.Errorf(\"queryServerAddr: expected and actual models do not match.\")\n\t}\n\t// no address specified\n\tr2, _ := http.NewRequest(\"GET\", formatURL(\"query?hosts=\"), nil)\n\tw2 := newRecorder()\n\tqueryServerAddrs(w2, r2)\n\t// body 2 json test\n\tm2 := &models.APIServerList{}\n\t_, modelMatches2 := w2.ExpectJSON(m2, m2)\n\tif !modelMatches2 {\n\t\tt.Errorf(\"queryServerAddr: expected and actual models do not match.\")\n\t}\n\tif w2.Code != http.StatusOK {\n\t\tt.Errorf(\"Expected status code %v for queryServerAddr handler; got: %v\",\n\t\t\thttp.StatusOK, w2.Code)\n\t}\n\tif len(w2.Body.Bytes()) == 0 {\n\t\tt.Errorf(\"queryServerAddr handler body should not be empty\")\n\t}\n}", "func (a *Api) checkFoundConfirmations(ctx context.Context, res http.ResponseWriter, results []*models.Confirmation, err error) []*models.Confirmation {\n\tif err != nil {\n\t\tlog.Println(\"Error finding confirmations \", err)\n\t\tstatusErr := &status.StatusError{status.NewStatus(http.StatusInternalServerError, STATUS_ERR_FINDING_CONFIRMATION)}\n\t\ta.sendModelAsResWithStatus(res, statusErr, http.StatusInternalServerError)\n\t\treturn nil\n\t} else if results == nil || len(results) == 0 {\n\t\tstatusErr := &status.StatusError{status.NewStatus(http.StatusNotFound, STATUS_NOT_FOUND)}\n\t\t//log.Println(\"No confirmations were found \", statusErr.Error())\n\t\ta.sendModelAsResWithStatus(res, statusErr, http.StatusNotFound)\n\t\treturn nil\n\t} else {\n\t\tfor i := range results {\n\t\t\tif err = a.addProfile(ctx, results[i]); err != nil {\n\t\t\t\t//report and move on\n\t\t\t\tlog.Println(\"Error getting profile\", err.Error())\n\t\t\t}\n\t\t}\n\t\treturn results\n\t}\n}", "func resendPendingQuery(query section.Section, oldToken token.Token, name, ipAddr string,\n\texpiration int64, s *Server) bool {\n\t//TODO CFE which port to choose?\n\tif tcpAddr, err := net.ResolveTCPAddr(\"tcp\", fmt.Sprintf(\"%v:%d\", ipAddr, 5022)); err != nil {\n\t\tconnInfo := connection.Info{Type: connection.TCP, TCPAddr: tcpAddr}\n\t\tif s.caches.RedirectCache.AddConnInfo(name, connInfo, expiration) {\n\t\t\ttok := token.New()\n\t\t\tif s.caches.PendingQueries.UpdateToken(oldToken, tok) {\n\t\t\t\tsendSection(query, tok, connInfo, s)\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\t//No redirect/delegation for connInfo in cache, send notification back to senders.\n\t}\n\treturn false\n}", "func Test_Onu_DiscoverIndication_send_on_discovery(t *testing.T) {\n\tonu := createTestOnu()\n\tstream := &mockStream{\n\t\tCallCount: 0,\n\t\tCalls: make(map[int]*openolt.Indication),\n\t\tfail: false,\n\t\tchannel: make(chan int, 10),\n\t}\n\tctx, cancel := context.WithCancel(context.TODO())\n\tgo onu.ProcessOnuMessages(ctx, stream, nil)\n\tonu.InternalState.SetState(OnuTxInitialize)\n\t_ = onu.InternalState.Event(OnuTxDiscover)\n\n\tselect {\n\tdefault:\n\tcase <-time.After(90 * time.Millisecond):\n\t\tcall := stream.Calls[1].GetOnuDiscInd()\n\t\tassert.Equal(t, stream.CallCount, 1)\n\t\tassert.Equal(t, call.IntfId, onu.PonPortID)\n\t\tassert.Equal(t, call.SerialNumber, onu.SerialNumber)\n\t}\n\tcancel()\n}", "func (r *Route) ConfirmReachable() {\n\tif entry := r.getCachedNeighborEntry(); entry != nil {\n\t\tentry.handleUpperLevelConfirmation()\n\t}\n}", "func TestConnectivityOnStartup(t *testing.T) {\n\treport := test.CheckRoutines(t)\n\tdefer report()\n\n\tlim := test.TimeOut(time.Second * 30)\n\tdefer lim.Stop()\n\n\t// Create a network with two interfaces\n\twan, err := vnet.NewRouter(&vnet.RouterConfig{\n\t\tCIDR: \"0.0.0.0/0\",\n\t\tLoggerFactory: logging.NewDefaultLoggerFactory(),\n\t})\n\tassert.NoError(t, err)\n\n\tnet0, err := vnet.NewNet(&vnet.NetConfig{\n\t\tStaticIPs: []string{\"192.168.0.1\"},\n\t})\n\tassert.NoError(t, err)\n\tassert.NoError(t, wan.AddNet(net0))\n\n\tnet1, err := vnet.NewNet(&vnet.NetConfig{\n\t\tStaticIPs: []string{\"192.168.0.2\"},\n\t})\n\tassert.NoError(t, err)\n\tassert.NoError(t, wan.AddNet(net1))\n\n\tassert.NoError(t, wan.Start())\n\n\taNotifier, aConnected := onConnected()\n\tbNotifier, bConnected := onConnected()\n\n\tKeepaliveInterval := time.Hour\n\tcfg0 := &AgentConfig{\n\t\tNetworkTypes: supportedNetworkTypes(),\n\t\tMulticastDNSMode: MulticastDNSModeDisabled,\n\t\tNet: net0,\n\t\tKeepaliveInterval: &KeepaliveInterval,\n\t\tCheckInterval: &KeepaliveInterval,\n\t}\n\n\taAgent, err := NewAgent(cfg0)\n\trequire.NoError(t, err)\n\trequire.NoError(t, aAgent.OnConnectionStateChange(aNotifier))\n\n\tcfg1 := &AgentConfig{\n\t\tNetworkTypes: supportedNetworkTypes(),\n\t\tMulticastDNSMode: MulticastDNSModeDisabled,\n\t\tNet: net1,\n\t\tKeepaliveInterval: &KeepaliveInterval,\n\t\tCheckInterval: &KeepaliveInterval,\n\t}\n\n\tbAgent, err := NewAgent(cfg1)\n\trequire.NoError(t, err)\n\trequire.NoError(t, bAgent.OnConnectionStateChange(bNotifier))\n\n\taConn, bConn := func(aAgent, bAgent *Agent) (*Conn, *Conn) {\n\t\t// Manual signaling\n\t\taUfrag, aPwd, err := aAgent.GetLocalUserCredentials()\n\t\tassert.NoError(t, err)\n\n\t\tbUfrag, bPwd, err := bAgent.GetLocalUserCredentials()\n\t\tassert.NoError(t, err)\n\n\t\tgatherAndExchangeCandidates(aAgent, bAgent)\n\n\t\taccepted := make(chan struct{})\n\t\taccepting := make(chan struct{})\n\t\tvar aConn *Conn\n\n\t\torigHdlr := aAgent.onConnectionStateChangeHdlr.Load()\n\t\tif origHdlr != nil {\n\t\t\tdefer check(aAgent.OnConnectionStateChange(origHdlr.(func(ConnectionState)))) //nolint:forcetypeassert\n\t\t}\n\t\tcheck(aAgent.OnConnectionStateChange(func(s ConnectionState) {\n\t\t\tif s == ConnectionStateChecking {\n\t\t\t\tclose(accepting)\n\t\t\t}\n\t\t\tif origHdlr != nil {\n\t\t\t\torigHdlr.(func(ConnectionState))(s) //nolint:forcetypeassert\n\t\t\t}\n\t\t}))\n\n\t\tgo func() {\n\t\t\tvar acceptErr error\n\t\t\taConn, acceptErr = aAgent.Accept(context.TODO(), bUfrag, bPwd)\n\t\t\tcheck(acceptErr)\n\t\t\tclose(accepted)\n\t\t}()\n\n\t\t<-accepting\n\n\t\tbConn, err := bAgent.Dial(context.TODO(), aUfrag, aPwd)\n\t\tcheck(err)\n\n\t\t// Ensure accepted\n\t\t<-accepted\n\t\treturn aConn, bConn\n\t}(aAgent, bAgent)\n\n\t// Ensure pair selected\n\t// Note: this assumes ConnectionStateConnected is thrown after selecting the final pair\n\t<-aConnected\n\t<-bConnected\n\n\tassert.NoError(t, wan.Stop())\n\tif !closePipe(t, aConn, bConn) {\n\t\treturn\n\t}\n}", "func (t *Fetcher) fetchAndVerify(ctx context.Context, cancelF context.CancelFunc,\n\treq *sciond.PathReq, earlyTrigger *util.Trigger) {\n\n\tdefer cancelF()\n\treply, err := t.getSegmentsFromNetwork(ctx, req)\n\tif err != nil {\n\t\tlog.Warn(\"Unable to retrieve paths from network\", \"err\", err)\n\t\treturn\n\t}\n\ttimer := earlyTrigger.Arm()\n\t// Cleanup early reply goroutine if function exits early\n\tif timer != nil {\n\t\tdefer timer.Stop()\n\t}\n\t// Build verification units\n\tunits := segverifier.BuildUnits(reply.Recs.Recs, reply.Recs.SRevInfos)\n\tunitResultsC := make(chan segverifier.UnitResult, len(units))\n\tfor _, unit := range units {\n\t\tgo unit.Verify(ctx, unitResultsC)\n\t}\nLoop:\n\tfor numResults := 0; numResults < len(units); numResults++ {\n\t\tselect {\n\t\tcase result := <-unitResultsC:\n\t\t\tif err, ok := result.Errors[-1]; ok {\n\t\t\t\tlog.Info(\"Segment verification failed\",\n\t\t\t\t\t\"segment\", result.Unit.SegMeta.Segment, \"err\", err)\n\t\t\t} else {\n\t\t\t\t// Verification succeeded\n\t\t\t\tn, err := t.pathDB.Insert(ctx, &result.Unit.SegMeta.Segment,\n\t\t\t\t\t[]proto.PathSegType{result.Unit.SegMeta.Type})\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Warn(\"Unable to insert segment into path database\",\n\t\t\t\t\t\t\"segment\", result.Unit.SegMeta.Segment, \"err\", err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif n > 0 {\n\t\t\t\t\tlog.Debug(\"Inserted segment into path database\",\n\t\t\t\t\t\t\"segment\", result.Unit.SegMeta.Segment)\n\t\t\t\t}\n\t\t\t}\n\t\t\t// Insert successfully verified revocations into the revcache\n\t\t\tfor index, revocation := range result.Unit.SRevInfos {\n\t\t\t\tif err, ok := result.Errors[index]; ok {\n\t\t\t\t\tlog.Info(\"Revocation verification failed\",\n\t\t\t\t\t\t\"revocation\", revocation, \"err\", err)\n\t\t\t\t} else {\n\t\t\t\t\t// Verification succeeded for this revocation, so we can add it to the cache\n\t\t\t\t\tinfo, err := revocation.RevInfo()\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t// This should be caught during network message sanitization\n\t\t\t\t\t\tpanic(err)\n\t\t\t\t\t}\n\t\t\t\t\tt.revocationCache.Set(\n\t\t\t\t\t\trevcache.NewKey(info.IA(), common.IFIDType(info.IfID)),\n\t\t\t\t\t\trevocation,\n\t\t\t\t\t\tinfo.RelativeTTL(time.Now()),\n\t\t\t\t\t)\n\t\t\t\t}\n\t\t\t}\n\t\tcase <-ctx.Done():\n\t\t\tbreak Loop\n\t\t}\n\t}\n}", "func hasToBeUpdated(home, foreign []corev1.EndpointSubset) bool {\n\tif len(home) != len(foreign) {\n\t\tklog.V(6).Info(\"the ep has to be updated because home and foreign subsets lengths are different\")\n\t\treturn true\n\t}\n\tfor i := 0; i < len(home); i++ {\n\t\tif len(home[i].Addresses) != len(foreign[i].Addresses) {\n\t\t\tklog.V(6).Info(\"the ep has to be updated because home and foreign addresses lengths are different\")\n\t\t\treturn true\n\t\t}\n\t\tfor j := 0; j < len(home[i].Addresses); j++ {\n\t\t\tif home[i].Addresses[j].IP != foreign[i].Addresses[j].IP {\n\t\t\t\tklog.V(6).Info(\"the ep has to be updated because home and foreign IPs are different\")\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\n\treturn false\n}", "func verifyConfiguration() {\n\n // check connection to docker\n if err := pingDocker(); err == nil {\n log.Printf(\"Connected to docker socket at: unix:///%s\\n\", config.DockerSock)\n } else {\n log.Println(err)\n os.Exit(1)\n }\n\n // check status of vault server\n if err := checkVaultHealth(); err == nil {\n log.Printf(\"Connected to vault server at: %s\\n\", config.VaultAddr)\n } else {\n log.Println(err)\n os.Exit(1)\n }\n}", "func testAddrIndexOperations(t *testing.T, db database.Db, newestBlock *dcrutil.Block, newestSha *chainhash.Hash, newestBlockIdx int64) {\n\t// Metadata about the current addr index state should be unset.\n\tsha, height, err := db.FetchAddrIndexTip()\n\tif err != database.ErrAddrIndexDoesNotExist {\n\t\tt.Fatalf(\"Address index metadata shouldn't be in db, hasn't been built up yet.\")\n\t}\n\n\tvar zeroHash chainhash.Hash\n\tif !sha.IsEqual(&zeroHash) {\n\t\tt.Fatalf(\"AddrIndexTip wrong hash got: %s, want %s\", sha, &zeroHash)\n\n\t}\n\n\tif height != -1 {\n\t\tt.Fatalf(\"Addrindex not built up, yet a block index tip has been set to: %d.\", height)\n\t}\n\n\t// Test enforcement of constraints for \"limit\" and \"skip\"\n\tvar fakeAddr dcrutil.Address\n\t_, _, err = db.FetchTxsForAddr(fakeAddr, -1, 0, false)\n\tif err == nil {\n\t\tt.Fatalf(\"Negative value for skip passed, should return an error\")\n\t}\n\n\t_, _, err = db.FetchTxsForAddr(fakeAddr, 0, -1, false)\n\tif err == nil {\n\t\tt.Fatalf(\"Negative value for limit passed, should return an error\")\n\t}\n\n\t// Simple test to index outputs(s) of the first tx.\n\ttestIndex := make(database.BlockAddrIndex, database.AddrIndexKeySize)\n\ttestTx, err := newestBlock.Tx(0)\n\tif err != nil {\n\t\tt.Fatalf(\"Block has no transactions, unable to test addr \"+\n\t\t\t\"indexing, err %v\", err)\n\t}\n\n\t// Extract the dest addr from the tx.\n\t_, testAddrs, _, err := txscript.ExtractPkScriptAddrs(testTx.MsgTx().TxOut[0].Version, testTx.MsgTx().TxOut[0].PkScript, &chaincfg.MainNetParams)\n\tif err != nil {\n\t\tt.Fatalf(\"Unable to decode tx output, err %v\", err)\n\t}\n\n\t// Extract the hash160 from the output script.\n\tvar hash160Bytes [ripemd160.Size]byte\n\ttestHash160 := testAddrs[0].(*dcrutil.AddressScriptHash).Hash160()\n\tcopy(hash160Bytes[:], testHash160[:])\n\n\t// Create a fake index.\n\tblktxLoc, _, _ := newestBlock.TxLoc()\n\ttestIndex = []*database.TxAddrIndex{\n\t\t&database.TxAddrIndex{\n\t\t\tHash160: hash160Bytes,\n\t\t\tHeight: uint32(newestBlockIdx),\n\t\t\tTxOffset: uint32(blktxLoc[0].TxStart),\n\t\t\tTxLen: uint32(blktxLoc[0].TxLen),\n\t\t},\n\t}\n\n\t// Insert our test addr index into the DB.\n\terr = db.UpdateAddrIndexForBlock(newestSha, newestBlockIdx, testIndex)\n\tif err != nil {\n\t\tt.Fatalf(\"UpdateAddrIndexForBlock: failed to index\"+\n\t\t\t\" addrs for block #%d (%s) \"+\n\t\t\t\"err %v\", newestBlockIdx, newestSha, err)\n\t}\n\n\t// Chain Tip of address should've been updated.\n\tassertAddrIndexTipIsUpdated(db, t, newestSha, newestBlockIdx)\n\n\t// Check index retrieval.\n\ttxReplies, _, err := db.FetchTxsForAddr(testAddrs[0], 0, 1000, false)\n\tif err != nil {\n\t\tt.Fatalf(\"FetchTxsForAddr failed to correctly fetch txs for an \"+\n\t\t\t\"address, err %v\", err)\n\t}\n\t// Should have one reply.\n\tif len(txReplies) != 1 {\n\t\tt.Fatalf(\"Failed to properly index tx by address.\")\n\t}\n\n\t// Our test tx and indexed tx should have the same sha.\n\tindexedTx := txReplies[0]\n\tif !bytes.Equal(indexedTx.Sha.Bytes(), testTx.Sha().Bytes()) {\n\t\tt.Fatalf(\"Failed to fetch proper indexed tx. Expected sha %v, \"+\n\t\t\t\"fetched %v\", testTx.Sha(), indexedTx.Sha)\n\t}\n\n\t// Shut down DB.\n\tdb.Sync()\n\tdb.Close()\n\n\t// Re-Open, tip still should be updated to current height and sha.\n\tdb, err = database.OpenDB(\"leveldb\", \"tstdbopmode\")\n\tif err != nil {\n\t\tt.Fatalf(\"Unable to re-open created db, err %v\", err)\n\t}\n\tassertAddrIndexTipIsUpdated(db, t, newestSha, newestBlockIdx)\n\n\t// Delete the entire index.\n\terr = db.PurgeAddrIndex()\n\tif err != nil {\n\t\tt.Fatalf(\"Couldn't delete address index, err %v\", err)\n\t}\n\n\t// Former index should no longer exist.\n\ttxReplies, _, err = db.FetchTxsForAddr(testAddrs[0], 0, 1000, false)\n\tif err != nil {\n\t\tt.Fatalf(\"Unable to fetch transactions for address: %v\", err)\n\t}\n\tif len(txReplies) != 0 {\n\t\tt.Fatalf(\"Address index was not successfully deleted. \"+\n\t\t\t\"Should have 0 tx's indexed, %v were returned.\",\n\t\t\tlen(txReplies))\n\t}\n\n\t// Tip should be blanked out.\n\tif _, _, err := db.FetchAddrIndexTip(); err != database.ErrAddrIndexDoesNotExist {\n\t\tt.Fatalf(\"Address index was not fully deleted.\")\n\t}\n\n}", "func TestNeighbors(t *testing.T) {\n\ttest := &Test{\n\t\tsetupCmds: []Cmd{\n\t\t\t{\"ip netns add nb-vm1\", true},\n\t\t\t{\"ip link add nb-vm1-eth0 type veth peer name eth0 netns nb-vm1\", true},\n\t\t\t{\"ip link set nb-vm1-eth0 up\", true},\n\t\t\t{\"ip netns exec nb-vm1 ip link set eth0 up\", true},\n\t\t\t{\"ip netns exec nb-vm1 ip addr add 192.168.33.33/24 dev eth0\", true},\n\t\t\t{\"sleep 10\", true},\n\t\t\t{\"sudo ip netns exec nb-vm1 ip neighbour add 192.168.33.252 dev eth0 lladdr a6:d1:a0:51:03:49\", true},\n\t\t},\n\n\t\ttearDownCmds: []Cmd{\n\t\t\t{\"ip link del nb-vm1-eth0\", true},\n\t\t\t{\"ip netns del nb-vm1\", true},\n\t\t},\n\n\t\tmode: OneShot,\n\n\t\tchecks: []CheckFunction{\n\t\t\tfunc(c *CheckContext) error {\n\t\t\t\tprefix := c.gremlin\n\n\t\t\t\tnode, err := c.gh.GetNode(prefix.V().Has(\"IPV4\", \"192.168.33.33/24\"))\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"Failed to find a node with IP 192.168.33.33/24\")\n\t\t\t\t}\n\n\t\t\t\tneighbors, ok := node.Metadata[\"Neighbors\"].(*topology.Neighbors)\n\t\t\t\tif !ok {\n\t\t\t\t\treturn fmt.Errorf(\"Wrong metadata type for Neighbors: %+v\", node.Metadata[\"Neighbors\"])\n\t\t\t\t}\n\n\t\t\t\tvar found bool\n\t\t\t\tfor _, nb := range *neighbors {\n\t\t\t\t\tif nb.MAC == \"a6:d1:a0:51:03:49\" {\n\t\t\t\t\t\tfound = true\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif !found {\n\t\t\t\t\treturn errors.New(\"unable to find neighbor entry with MAC: a6:d1:a0:51:03:49\")\n\t\t\t\t}\n\n\t\t\t\treturn nil\n\t\t\t},\n\t\t},\n\t}\n\tRunTest(t, test)\n}", "func (a API) PingChk() (isNew bool) {\n\tselect {\n\tcase o := <-a.Ch.(chan PingRes):\n\t\tif o.Err != nil {\n\t\t\ta.Result = o.Err\n\t\t} else {\n\t\t\ta.Result = o.Res\n\t\t}\n\t\tisNew = true\n\tdefault:\n\t}\n\treturn\n}", "func setupTestDNSServersRetry(t *testing.T) (s1, s2 *dnsTestServer) {\n\ts1 = runTestDNSServer(t, \"0\")\n\ts2 = runTestDNSServer(t, \"0\")\n\n\tq := DNSQuery{\"NS\", testDomain}\n\ta := DNSAnswers{{\"NS\", s1.Address() + \".\"}, {\"NS\", s2.Address() + \".\"}}\n\ts1.AddEntryToDNSDatabase(q, a)\n\ts2.AddEntryToDNSDatabase(q, a)\n\ts1.AddEntryToDNSDatabaseRetry(q, a)\n\ts2.AddEntryToDNSDatabaseRetry(q, a)\n\n\ts1.Server.Handler.(*dns.ServeMux).HandleFunc(testDomain+\".\", func(w dns.ResponseWriter, r *dns.Msg) {\n\t\tretryDNSHandler(t, w, r, s1, false)\n\t})\n\ts2.Server.Handler.(*dns.ServeMux).HandleFunc(testDomain+\".\", func(w dns.ResponseWriter, r *dns.Msg) {\n\t\tretryDNSHandler(t, w, r, s2, true)\n\t})\n\n\treturn s1, s2\n}", "func TestAllowedHostsEvent(t *testing.T) {\n\tvar err error\n\terr = testcert.GenerateCert(\"mail2.guerrillamail.com\", \"\", 365*24*time.Hour, false, 2048, \"P256\", \"../../tests/\")\n\tif err != nil {\n\t\tt.Error(\"failed to generate a test certificate\", err)\n\t\tt.FailNow()\n\t}\n\tdefer cleanTestArtifacts(t)\n\tmainlog, err = getTestLog()\n\tif err != nil {\n\t\tt.Error(\"could not get logger,\", err)\n\t\tt.FailNow()\n\t}\n\tif err := ioutil.WriteFile(\"configJsonD.json\", []byte(configJsonD), 0644); err != nil {\n\t\tt.Error(err)\n\t\tt.FailNow()\n\t}\n\t// start the server by emulating the serve command\n\n\tconf := &guerrilla.AppConfig{} // blank one\n\tif err = conf.Load([]byte(configJsonD)); err != nil { // load configJsonD\n\t\tt.Error(err)\n\t}\n\tcmd := &cobra.Command{}\n\tconfigPath = \"configJsonD.json\"\n\n\tgo func() {\n\t\tserve(cmd, []string{})\n\t}()\n\t// wait for start\n\tif _, err := grepTestlog(\"Listening on TCP 127.0.0.1:2552\", 0); err != nil {\n\t\tt.Error(\"server didn't start\")\n\t}\n\n\t// now connect and try RCPT TO with an invalid host\n\tif conn, buffin, err := test.Connect(conf.Servers[1], 20); err != nil {\n\t\tt.Error(\"Could not connect to new server\", conf.Servers[1].ListenInterface, err)\n\t} else {\n\t\tif result, err := test.Command(conn, buffin, \"HELO example.com\"); err == nil {\n\t\t\texpect := \"250 secure.test.com Hello\"\n\t\t\tif strings.Index(result, expect) != 0 {\n\t\t\t\tt.Error(\"Expected\", expect, \"but got\", result)\n\t\t\t} else {\n\t\t\t\tif result, err = test.Command(conn, buffin, \"RCPT TO:<[email protected]>\"); err == nil {\n\t\t\t\t\texpect := \"454 4.1.1 Error: Relay access denied: grr.la\"\n\t\t\t\t\tif strings.Index(result, expect) != 0 {\n\t\t\t\t\t\tt.Error(\"Expected:\", expect, \"but got:\", result)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t_ = conn.Close()\n\t}\n\n\t// now change the config by adding a host to allowed hosts\n\n\tnewConf := conf\n\tnewConf.AllowedHosts = append(newConf.AllowedHosts, \"grr.la\")\n\tif jsonbytes, err := json.Marshal(newConf); err == nil {\n\t\tif err = ioutil.WriteFile(\"configJsonD.json\", jsonbytes, 0644); err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t} else {\n\t\tt.Error(err)\n\t}\n\t// send a sighup signal to the server to reload config\n\tsigHup()\n\n\tif _, err := grepTestlog(\"allowed_hosts config changed\", 0); err != nil {\n\t\tt.Error(\"allowed_hosts config not changed\")\n\t\tt.FailNow()\n\t}\n\n\t// now repeat the same conversion, RCPT TO should be accepted\n\tif conn, buffin, err := test.Connect(conf.Servers[1], 20); err != nil {\n\t\tt.Error(\"Could not connect to new server\", conf.Servers[1].ListenInterface, err)\n\t} else {\n\t\tif result, err := test.Command(conn, buffin, \"HELO example.com\"); err == nil {\n\t\t\texpect := \"250 secure.test.com Hello\"\n\t\t\tif strings.Index(result, expect) != 0 {\n\t\t\t\tt.Error(\"Expected\", expect, \"but got\", result)\n\t\t\t} else {\n\t\t\t\tif result, err = test.Command(conn, buffin, \"RCPT TO:<[email protected]>\"); err == nil {\n\t\t\t\t\texpect := \"250 2.1.5 OK\"\n\t\t\t\t\tif strings.Index(result, expect) != 0 {\n\t\t\t\t\t\tt.Error(\"Expected:\", expect, \"but got:\", result)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t_ = conn.Close()\n\t}\n\n\t// shutdown wait for exit\n\td.Shutdown()\n\n\t// wait for shutdown\n\tif _, err := grepTestlog(\"Backend shutdown completed\", 0); err != nil {\n\t\tt.Error(\"server didn't stop\")\n\t}\n\n}", "func (tk *TestKeys) analysisNullNullDetectNoAddrs(logger model.Logger) bool {\n\tif tk.Control == nil {\n\t\t// we need control data to say we're in this case\n\t\treturn false\n\t}\n\tfor _, query := range tk.Queries {\n\t\tif len(query.Answers) > 0 {\n\t\t\t// when a query has answers, we're not in the NoAddresses case\n\t\t\treturn false\n\t\t}\n\t}\n\tif len(tk.TCPConnect) > 0 {\n\t\t// if we attempted TCP connect, we're not in the NoAddresses case\n\t\treturn false\n\t}\n\tif len(tk.TLSHandshakes) > 0 {\n\t\t// if we attempted TLS handshakes, we're not in the NoAddresses case\n\t\treturn false\n\t}\n\tif len(tk.Control.DNS.Addrs) > 0 {\n\t\t// when the TH resolved addresses, we're not in the NoAddresses case\n\t\treturn false\n\t}\n\tif len(tk.Control.TCPConnect) > 0 {\n\t\t// when the TH used addresses, we're not in the NoAddresses case\n\t\treturn false\n\t}\n\tlogger.Infof(\"website likely down: all DNS lookups failed for both probe and TH\")\n\ttk.NullNullFlags |= analysisFlagNullNullNoAddrs\n\treturn true\n}", "func (s *Server) checkTimeouts() {\n\ts.Debug(\"checking for timeouts\")\n\tfor _, peer := range s.peers {\n\t\t// if the echoTimeout flag is set, it means we didn't receive a response to our last request\n\t\tif peer.echoTimeout {\n\t\t\ts.WithFields(logrus.Fields{\n\t\t\t\t\"peer_name\": peer.name,\n\t\t\t\t\"peer_addr\": peer.addr,\n\t\t\t\t\"id\": peer.echoCounter,\n\t\t\t}).Debug(\"echo timeout\")\n\t\t\ts.promPeerTimeout.WithLabelValues(s.config.NodeName, peer.name).Inc()\n\t\t\ts.updatePeerStatus(peer, Timeout)\n\t\t}\n\t}\n}", "func (f FakeContainerImpl) DetectNetworkDestinations(pid int) ([]containers.NetworkDestination, error) {\n\tpanic(\"implement me\")\n}", "func ping(addrs ...string) ([]string, error) {\n\tres := make([]string, 0)\n\tmu := sync.Mutex{}\n\n\tadditionRes := func(addr string) {\n\t\tmu.Lock()\n\t\tdefer mu.Unlock()\n\t\tres = append(res, addr)\n\t}\n\n\tdiag := func(addr string) bool {\n\t\tconn, err := net.Dial(\"tcp\", addr)\n\t\tif err != nil {\n\t\t\treturn false\n\t\t}\n\t\tdefer conn.Close()\n\t\treturn true\n\t}\n\n\tping := func(ctx context.Context, addr string, f func(addr string)) {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tdefault:\n\t\t\tif diag(addr) {\n\t\t\t\tf(addr)\n\t\t\t}\n\t\t}\n\t}\n\n\twg := sync.WaitGroup{}\n\tfor _, addr := range addrs {\n\t\twg.Add(1)\n\t\tgo func(addr string) {\n\t\t\tdefer wg.Done()\n\t\t\tctx, cancel := context.WithTimeout(context.Background(), time.Second*1)\n\t\t\tdefer cancel()\n\t\t\tping(ctx, addr, additionRes)\n\t\t}(addr)\n\t}\n\twg.Wait()\n\n\treturn res, nil\n}", "func TestConnectionStateFailedDeleteAllCandidates(t *testing.T) {\n\treport := test.CheckRoutines(t)\n\tdefer report()\n\n\tlim := test.TimeOut(time.Second * 5)\n\tdefer lim.Stop()\n\n\toneSecond := time.Second\n\tKeepaliveInterval := time.Duration(0)\n\n\tcfg := &AgentConfig{\n\t\tNetworkTypes: supportedNetworkTypes(),\n\t\tDisconnectedTimeout: &oneSecond,\n\t\tFailedTimeout: &oneSecond,\n\t\tKeepaliveInterval: &KeepaliveInterval,\n\t}\n\n\taAgent, err := NewAgent(cfg)\n\tassert.NoError(t, err)\n\n\tbAgent, err := NewAgent(cfg)\n\tassert.NoError(t, err)\n\n\tisFailed := make(chan interface{})\n\tassert.NoError(t, aAgent.OnConnectionStateChange(func(c ConnectionState) {\n\t\tif c == ConnectionStateFailed {\n\t\t\tclose(isFailed)\n\t\t}\n\t}))\n\n\tconnect(aAgent, bAgent)\n\t<-isFailed\n\n\tdone := make(chan struct{})\n\tassert.NoError(t, aAgent.run(context.Background(), func(ctx context.Context, agent *Agent) {\n\t\tassert.Equal(t, len(aAgent.remoteCandidates), 0)\n\t\tassert.Equal(t, len(aAgent.localCandidates), 0)\n\t\tclose(done)\n\t}))\n\t<-done\n\n\tassert.NoError(t, aAgent.Close())\n\tassert.NoError(t, bAgent.Close())\n}", "func checkDNSConfig(c internalapi.RuntimeService, containerID string, expectedContent []string) {\n\tBy(\"get the content of /etc/resolv.conf via execSync\")\n\tcmd := []string{\"cat\", resolvConfigPath}\n\tstdout, stderr, err := c.ExecSync(containerID, cmd, time.Duration(defaultExecSyncTimeout)*time.Second)\n\tframework.ExpectNoError(err, \"failed to execSync in container %q\", containerID)\n\tfor _, content := range expectedContent {\n\t\tExpect(string(stdout)).To(ContainSubstring(content), \"The stdout output of execSync should contain %q\", content)\n\t}\n\tExpect(stderr).To(BeNil(), \"The stderr should be nil.\")\n\tframework.Logf(\"check DNS config succeed\")\n}", "func edgeDnsOpenApiHealthCheck(clientSecret string, host string, accessToken string, clientToken string) (string, backend.HealthStatus) {\n\tto := time.Now() // now\n\tfrom := to.Add(-5 * time.Minute) // five minutes ago\n\tinterval := Interval(FIVE_MINUTES)\n\n\tfromRounded := roundupTimeForInterval(from, interval)\n\ttoRounded := roundupTimeForInterval(to, interval)\n\topenurl := createOpenUrl(fromRounded, toRounded, interval) // The URL\n\tlog.DefaultLogger.Info(\"edgeDnsOpenApiHealthCheck\", \"openurl\", openurl)\n\n\tconfig := NewEdgegridConfig(clientSecret, host, accessToken, clientToken)\n\n\t// Send HEAD request to the OPEN API\n\tapireq, err := client.NewRequest(*config, \"HEAD\", openurl, nil)\n\tif err != nil {\n\t\tlog.DefaultLogger.Error(\"Error creating HEAD request\", \"err\", err)\n\t\treturn err.Error(), backend.HealthStatusError\n\t}\n\tapiresp, err := client.Do(*config, apireq)\n\tif err != nil {\n\t\tlog.DefaultLogger.Error(\"OPEN API communication error\", \"err\", err)\n\t\treturn err.Error(), backend.HealthStatusError\n\t}\n\n\tlog.DefaultLogger.Info(\"edgeDnsOpenApiHead\", \"Status\", apiresp.Status)\n\n\t// Error response. The datasource cannot reach the OPEN API.\n\tif apiresp.StatusCode != 200 {\n\t\tvar rspDto OpenApiErrorRspDto\n\t\terr := json.NewDecoder(apiresp.Body).Decode(&rspDto)\n\t\tmsg := \"Datasource failed: \"\n\t\tif err != nil { // A JSON decode error. Not the expected body. Use the response status for the error message.\n\t\t\tmsg += apiresp.Status\n\t\t} else {\n\t\t\tmsg += rspDto.Errors[0].Title // E.g. \"Some of the requested objects are unauthorized: [foo.bar.com]\"\n\t\t}\n\t\tlog.DefaultLogger.Error(\"edgeDnsOpenApiTest\", \"msg\", msg)\n\t\treturn msg, backend.HealthStatusError\n\t}\n\n\t// Success response\n\treturn \"Data source is working\", backend.HealthStatusOk\n}", "func ping(addr string) bool {\n\tconn, err := net.Dial(\"tcp\", addr)\n\tif err != nil {\n\t\treturn false\n\t}\n\tdefer conn.Close()\n\treturn true\n}", "func Test_Onu_DiscoverIndication_retry_on_discovery_stops(t *testing.T) {\n\tonu := createTestOnu()\n\tonu.DiscoveryRetryDelay = 500 * time.Millisecond\n\tstream := &mockStream{\n\t\tCallCount: 0,\n\t\tCalls: make(map[int]*openolt.Indication),\n\t\tfail: false,\n\t\tchannel: make(chan int, 10),\n\t}\n\tctx, cancel := context.WithCancel(context.TODO())\n\tgo onu.ProcessOnuMessages(ctx, stream, nil)\n\tonu.InternalState.SetState(OnuStateInitialized)\n\t_ = onu.InternalState.Event(OnuTxDiscover)\n\n\tgo func() {\n\t\tfor calls := range stream.channel {\n\t\t\tif calls == 2 {\n\t\t\t\tonu.InternalState.SetState(\"enabled\")\n\t\t\t}\n\t\t}\n\t}()\n\n\tselect {\n\tdefault:\n\tcase <-time.After(1 * time.Second):\n\t\tassert.Equal(t, stream.CallCount, 2)\n\t}\n\tcancel()\n}", "func TestNDPNeighborSolicit(t *testing.T) {\n\tb := []byte{\n\t\t0, 0, 0, 0,\n\t\t1, 2, 3, 4,\n\t\t5, 6, 7, 8,\n\t\t9, 10, 11, 12,\n\t\t13, 14, 15, 16,\n\t}\n\n\t// Test getting the Target Address.\n\tns := NDPNeighborSolicit(b)\n\taddr := testutil.MustParse6(\"102:304:506:708:90a:b0c:d0e:f10\")\n\tif got := ns.TargetAddress(); got != addr {\n\t\tt.Errorf(\"got ns.TargetAddress = %s, want %s\", got, addr)\n\t}\n\n\t// Test updating the Target Address.\n\taddr2 := testutil.MustParse6(\"1112:1314:1516:1718:191a:1b1c:1d1e:1f11\")\n\tns.SetTargetAddress(addr2)\n\tif got := ns.TargetAddress(); got != addr2 {\n\t\tt.Errorf(\"got ns.TargetAddress = %s, want %s\", got, addr2)\n\t}\n\t// Make sure the address got updated in the backing buffer.\n\tif got := tcpip.AddrFrom16Slice(b[ndpNSTargetAddessOffset:][:IPv6AddressSize]); got != addr2 {\n\t\tt.Errorf(\"got targetaddress buffer = %s, want %s\", got, addr2)\n\t}\n}", "func CheckResult(t *testing.T, results <-chan beacon.BeaconOrErr, expected beacon.Beacon) {\n\tbeacons := make([]beacon.BeaconOrErr, 0, 1)\n\tfor b := range results {\n\t\tbeacons = append(beacons, b)\n\t}\n\tSoMsg(\"Expect one result\", len(beacons), ShouldEqual, 1)\n\tSoMsg(\"Contains beacon\", beacons[0].Err, ShouldBeNil)\n\t// Make sure the segment is properly initialized.\n\t_, err := beacons[0].Beacon.Segment.ID()\n\txtest.FailOnErr(t, err)\n\t_, err = beacons[0].Beacon.Segment.FullId()\n\txtest.FailOnErr(t, err)\n\tSoMsg(\"Beacon.Segment should match\", beacons[0].Beacon.Segment, ShouldResemble,\n\t\texpected.Segment)\n\tSoMsg(\"Beacon.InIfId should match\", beacons[0].Beacon.InIfId, ShouldEqual, expected.InIfId)\n}", "func (dht *FullRT) CheckPeers(ctx context.Context, peers ...peer.ID) (int, int) {\n\tctx, span := internal.StartSpan(ctx, \"FullRT.CheckPeers\", trace.WithAttributes(attribute.Int(\"NumPeers\", len(peers))))\n\tdefer span.End()\n\n\tvar peerAddrs chan interface{}\n\tvar total int\n\tif len(peers) == 0 {\n\t\tdht.peerAddrsLk.RLock()\n\t\ttotal = len(dht.peerAddrs)\n\t\tpeerAddrs = make(chan interface{}, total)\n\t\tfor k, v := range dht.peerAddrs {\n\t\t\tpeerAddrs <- peer.AddrInfo{\n\t\t\t\tID: k,\n\t\t\t\tAddrs: v,\n\t\t\t}\n\t\t}\n\t\tclose(peerAddrs)\n\t\tdht.peerAddrsLk.RUnlock()\n\t} else {\n\t\ttotal = len(peers)\n\t\tpeerAddrs = make(chan interface{}, total)\n\t\tdht.peerAddrsLk.RLock()\n\t\tfor _, p := range peers {\n\t\t\tpeerAddrs <- peer.AddrInfo{\n\t\t\t\tID: p,\n\t\t\t\tAddrs: dht.peerAddrs[p],\n\t\t\t}\n\t\t}\n\t\tclose(peerAddrs)\n\t\tdht.peerAddrsLk.RUnlock()\n\t}\n\n\tvar success uint64\n\n\tworkers(100, func(i interface{}) {\n\t\ta := i.(peer.AddrInfo)\n\t\tdialctx, dialcancel := context.WithTimeout(ctx, time.Second*3)\n\t\tif err := dht.h.Connect(dialctx, a); err == nil {\n\t\t\tatomic.AddUint64(&success, 1)\n\t\t}\n\t\tdialcancel()\n\t}, peerAddrs)\n\treturn int(success), total\n}", "func testPINs(ctx context.Context, resetUsers bool, r *hwsecremote.CmdRunnerRemote, helper *hwsecremote.CmdHelperRemote, s *testing.State) {\n\tcryptohomeClient := helper.CryptohomeClient()\n\n\tsupportsLE, err := cryptohomeClient.SupportsLECredentials(ctx)\n\tif err != nil {\n\t\ts.Fatal(\"Failed to get supported policies: \", err)\n\t} else if !supportsLE {\n\t\ts.Fatal(\"Device does not support PinWeaver\")\n\t}\n\n\tif resetUsers {\n\t\tif err := helper.DaemonController().Stop(ctx, hwsec.CryptohomeDaemon); err != nil {\n\t\t\ts.Fatal(\"Failed to stop cryptohomeClient\")\n\t\t}\n\t\t// These are to ensure the machine is in a proper state.\n\t\t// Error is not check from these calls because the machine could have no users or le creds yet.\n\t\tr.Run(ctx, \"rm -rf /home/.shadow/low_entropy_creds\")\n\t\tcryptohomeClient.RemoveVault(ctx, user1)\n\t\tcryptohomeClient.RemoveVault(ctx, user2)\n\n\t\tif err := helper.DaemonController().Start(ctx, hwsec.CryptohomeDaemon); err != nil {\n\t\t\ts.Fatal(\"Failed to start cryptohomeClient: \", err)\n\t\t}\n\n\t\tif err := cryptohomeClient.UnmountAll(ctx); err != nil {\n\t\t\ts.Fatal(\"Failed to unmountAll: \", err)\n\t\t}\n\n\t\tif err := cryptohomeClient.MountVault(ctx, \"default\", hwsec.NewPassAuthConfig(user1, testPassword), true, hwsec.NewVaultConfig()); err != nil {\n\t\t\ts.Fatal(\"Failed to create initial user: \", err)\n\t\t}\n\t\tif err := cryptohomeClient.AddVaultKey(ctx, user1, testPassword, \"default\", goodPIN, keyLabel1, true); err != nil {\n\t\t\ts.Fatal(\"Failed to add le credential: \", err)\n\t\t}\n\n\t\toutput, err := cryptohomeClient.GetKeyData(ctx, user1, keyLabel1)\n\t\tif err != nil {\n\t\t\ts.Fatal(\"Failed to get key data: \", err)\n\t\t}\n\t\tif strings.Contains(output, \"auth_locked: true\") {\n\t\t\ts.Fatal(\"Newly created credential is auth locked\")\n\t\t}\n\n\t\tif err := cryptohomeClient.UnmountAll(ctx); err != nil {\n\t\t\ts.Fatal(\"Failed to unmountAll: \", err)\n\t\t}\n\t}\n\n\tif err := testMountCheckViaPIN(ctx, user1, cryptohomeClient); err != nil {\n\t\ts.Fatal(\"PIN failed with freshly created cryptohome: \", err)\n\t}\n\n\t// Run this twice to make sure wrong attempts don't sum up past a good attempt.\n\tif err := almostLockOutPIN(ctx, user1, cryptohomeClient); err != nil {\n\t\ts.Fatal(\"Failed to almost lock out PIN: \", err)\n\t}\n\tif err := testMountCheckViaPIN(ctx, user1, cryptohomeClient); err != nil {\n\t\ts.Fatal(\"PIN failed after almost locking it out: \", err)\n\t}\n\tif err := almostLockOutPIN(ctx, user1, cryptohomeClient); err != nil {\n\t\ts.Fatal(\"Failed to almost lock out PIN for the second time: \", err)\n\t}\n\tif err := testMountCheckViaPIN(ctx, user1, cryptohomeClient); err != nil {\n\t\ts.Fatal(\"PIN failed after almost locking it out for the second time: \", err)\n\t}\n\n\tif err := lockOutPIN(ctx, user1, cryptohomeClient); err != nil {\n\t\ts.Fatal(\"Failed to lock out PIN: \", err)\n\t}\n\tif err := testPINLockedOut(ctx, user1, cryptohomeClient); err != nil {\n\t\ts.Fatal(\"Verification of locked out PIN failed: \", err)\n\t}\n\tif err := testMountViaPassword(ctx, user1, cryptohomeClient); err != nil {\n\t\ts.Fatal(\"Password failed after locking out PIN: \", err)\n\t}\n\tif err := testMountCheckViaPIN(ctx, user1, cryptohomeClient); err != nil {\n\t\ts.Fatal(\"PIN failed after locking it out and resetting via password: \", err)\n\t}\n\n\t// Create a new user to test removing.\n\tif err := cryptohomeClient.MountVault(ctx, \"default\", hwsec.NewPassAuthConfig(user2, testPassword), true, hwsec.NewVaultConfig()); err != nil {\n\t\ts.Fatal(\"Failed to create user2: \", err)\n\t}\n\n\tleCredsBeforeAdd, err := leCredsFromDisk(ctx, r)\n\tif err != nil {\n\t\ts.Fatal(\"Failed to get le creds from disk: \", err)\n\t}\n\n\tif err := cryptohomeClient.AddVaultKey(ctx, user2, testPassword, \"default\", goodPIN, keyLabel1, true); err != nil {\n\t\ts.Fatalf(\"Failed to add le credential %s: %v\", keyLabel1, err)\n\t}\n\tif err := cryptohomeClient.AddVaultKey(ctx, user2, testPassword, \"default\", goodPIN, keyLabel2, true); err != nil {\n\t\ts.Fatalf(\"Failed to add le credential %s: %v\", keyLabel2, err)\n\t}\n\tif err := cryptohomeClient.UnmountAll(ctx); err != nil {\n\t\ts.Fatal(\"Failed to unmountAll: \", err)\n\t}\n\n\tleCredsAfterAdd, err := leCredsFromDisk(ctx, r)\n\tif err != nil {\n\t\ts.Fatal(\"Failed to get le creds from disk: \", err)\n\t}\n\n\tif _, err := cryptohomeClient.RemoveVault(ctx, user2); err != nil {\n\t\ts.Fatal(\"Failed to remove vault: \", err)\n\t}\n\n\tleCredsAfterRemove, err := leCredsFromDisk(ctx, r)\n\tif err != nil {\n\t\ts.Fatal(\"Failed to get le creds from disk: \", err)\n\t}\n\n\tif diff := cmp.Diff(leCredsAfterAdd, leCredsBeforeAdd); diff == \"\" {\n\t\ts.Fatal(\"LE cred not added successfully\")\n\t}\n\tif diff := cmp.Diff(leCredsAfterRemove, leCredsBeforeAdd); diff != \"\" {\n\t\ts.Fatal(\"LE cred not cleaned up successfully (-got +want): \", diff)\n\t}\n}", "func validateNtpOnCluster(ntpObj ntpTest) {\n\tBy(fmt.Sprintf(\"ts:%s Validating Cluster\", time.Now().String()))\n\n\tBy(fmt.Sprintf(\"Validates NTP config file on Quorum Nodes\"))\n\tfor _, qnode := range ts.tu.QuorumNodes {\n\t\tip := ts.tu.NameToIPMap[qnode]\n\t\tif ip == ntpObj.oldLeaderIP {\n\t\t\tcontinue // skip validation as cmd is paused on that node\n\t\t}\n\t\tvar ntpServers []string\n\t\tif ip == ntpObj.ntpLeaderIP {\n\t\t\tntpServers = ntpObj.externalNtpServers\n\t\t} else {\n\t\t\tntpServers = []string{ntpObj.ntpLeaderIP}\n\t\t}\n\n\t\tEventually(func() bool {\n\t\t\tntpConf := ts.tu.CommandOutput(ip, \"bash -c 'if [ -f /etc/pensando/pen-ntp/chrony.conf ] ; then cat /etc/pensando/pen-ntp/chrony.conf; fi' \")\n\t\t\tif strings.Count(ntpConf, \"server \") == len(ntpServers) {\n\t\t\t\tfor _, ntpServer := range ntpServers {\n\t\t\t\t\tif strings.Index(ntpConf, \"server \"+ntpServer+\" iburst\") == -1 {\n\t\t\t\t\t\tBy(fmt.Sprintf(\"%v not present in config. found %v\", ntpServer, ntpConf))\n\t\t\t\t\t\treturn false\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treturn true\n\t\t\t}\n\t\t\tBy(fmt.Sprintf(\"ntpserver: %v ntpconf: %v\", ntpServers, ntpConf))\n\t\t\treturn false\n\t\t}, 75, 5).Should(BeTrue(), \"NTP servers for %v quorum node should be %v\", qnode, ntpServers)\n\t}\n}", "func confirmOne(confirms <-chan amqp.Confirmation) {\n\tlog.Printf(\"waiting for confirmation of one publishing\")\n\n\tif confirmed := <-confirms; confirmed.Ack {\n\t\tlog.Printf(\"confirmed delivery with delivery tag: %d\", confirmed.DeliveryTag)\n\t} else {\n\t\tlog.Printf(\"failed delivery of delivery tag: %d\", confirmed.DeliveryTag)\n\t}\n}", "func testNSCAndICMP(t *testing.T, nodesCount int, nscPodFactory func(*v1.Node) *v1.Pod) {\n\tk8s, err := kube_testing.NewK8s()\n\tdefer k8s.Cleanup()\n\n\tExpect(err).To(BeNil())\n\n\ts1 := time.Now()\n\tk8s.Prepare(\"nsmd\", \"nsc\", \"nsmd-dataplane\", \"icmp-responder-nse\")\n\tlogrus.Printf(\"Cleanup done: %v\", time.Since(s1))\n\n\tnodes_setup := nsmd_test_utils.SetupNodes(k8s, nodesCount, defaultTimeout)\n\n\t// Run ICMP on latest node\n\t_ = nsmd_test_utils.DeployIcmp(k8s, nodes_setup[nodesCount-1].Node, \"icmp-responder-nse1\", defaultTimeout)\n\n\tnscPodNode := nsmd_test_utils.DeployNsc(k8s, nodes_setup[0].Node, \"nsc1\", defaultTimeout)\n\n\tvar nscInfo *nsmd_test_utils.NSCCheckInfo\n\n\tfailures := InterceptGomegaFailures(func() {\n\t\tnscInfo = nsmd_test_utils.CheckNSC(k8s, t, nscPodNode)\n\t})\n\t// Do dumping of container state to dig into what is happened.\n\tif len(failures) > 0 {\n\t\tlogrus.Errorf(\"Failues: %v\", failures)\n\t\tnsmd_test_utils.PrintLogs(k8s, nodes_setup)\n\t\tnscInfo.PrintLogs()\n\n\t\tt.Fail()\n\t}\n}", "func confirmOne(confirms <-chan amqp.Confirmation) {\n\tlog.Debug(\"waiting for confirmation of one publishing\")\n\n\tif confirmed := <-confirms; confirmed.Ack {\n\t\tlog.Debug(\"confirmed delivery with delivery tag: %d\", confirmed.DeliveryTag)\n\t} else {\n\t\tlog.Debug(\"failed delivery of delivery tag: %d\", confirmed.DeliveryTag)\n\t}\n}", "func confirmOne(confirms <-chan amqp.Confirmation) {\n\tlog.Info(fmt.Sprintf(\"waiting for confirmation of one publishing\"))\n\n\tif confirmed := <-confirms; confirmed.Ack {\n\t\tlog.Info(fmt.Sprintf(\"confirmed delivery with delivery tag: %d\", confirmed.DeliveryTag))\n\t} else {\n\t\tlog.Info(fmt.Sprintf(\"failed delivery of delivery tag: %d\", confirmed.DeliveryTag))\n\t}\n}", "func (q *ExecutionBroker) notConfirmedPending() bool {\n\treturn q.pending == insolar.InPending && !q.PendingConfirmed\n}", "func doDNSAnswer(t *testing.T, w dns.ResponseWriter, r *dns.Msg, d dnsDatabase, invertAnswers bool) {\n\tm := new(dns.Msg)\n\tm.SetReply(r)\n\tm.Authoritative = true\n\tq := m.Question[0]\n\tqtype := dns.TypeToString[q.Qtype]\n\tanswers := d[DNSQuery{qtype, strings.TrimSuffix(q.Name, \".\")}]\n\n\tvar seen = make(map[DNSAnswer]bool)\n\n\tfor _, r := range answers {\n\t\tif seen[r] {\n\t\t\tcontinue\n\t\t}\n\t\tseen[r] = true\n\n\t\trr, err := dns.NewRR(fmt.Sprintf(\"%s %s\", q.Name, r.String()))\n\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"err: %s\", err)\n\t\t}\n\n\t\tm.Answer = append(m.Answer, rr)\n\t}\n\n\tif invertAnswers {\n\t\tfor i, j := 0, len(m.Answer)-1; i < j; i, j = i+1, j-1 {\n\t\t\tm.Answer[i], m.Answer[j] = m.Answer[j], m.Answer[i]\n\t\t}\n\t}\n\n\tw.WriteMsg(m)\n}", "func Fping(ips []string) ([]string, error) {\n\tcommands := \"fping \" + strings.Join(ips, \" \")\n\tcmd := exec.Command(\"/bin/sh\", \"-c\", commands)\n\tip, err := cmd.Output()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\taliveIps := []string{}\n\tfor _, ip := range strings.Split(string(ip), \"\\n\") {\n\t\tif strings.HasSuffix(ip, \"is alive\") {\n\t\t\taliveIps = append(aliveIps, strings.Trim(ip, \" is alive\"))\n\t\t}\n\t}\n\treturn aliveIps, nil\n}", "func TestFreeStuckCandidateWithCheckQuorum(t *testing.T) {\n\ta := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tb := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tc := newTestRaft(3, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage())\n\tdefer closeAndFreeRaft(a)\n\tdefer closeAndFreeRaft(b)\n\tdefer closeAndFreeRaft(c)\n\n\ta.checkQuorum = true\n\tb.checkQuorum = true\n\tc.checkQuorum = true\n\n\tnt := newNetwork(a, b, c)\n\tsetRandomizedElectionTimeout(b, b.electionTimeout+1)\n\n\tfor i := 0; i < b.electionTimeout; i++ {\n\t\tb.tick()\n\t}\n\tnt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})\n\n\tnt.isolate(1)\n\tnt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})\n\n\tif b.state != StateFollower {\n\t\tt.Errorf(\"state = %s, want %s\", b.state, StateFollower)\n\t}\n\n\tif c.state != StateCandidate {\n\t\tt.Errorf(\"state = %s, want %s\", c.state, StateCandidate)\n\t}\n\n\tif c.Term != b.Term+1 {\n\t\tt.Errorf(\"term = %d, want %d\", c.Term, b.Term+1)\n\t}\n\n\t// Vote again for safety\n\tnt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})\n\n\tif b.state != StateFollower {\n\t\tt.Errorf(\"state = %s, want %s\", b.state, StateFollower)\n\t}\n\n\tif c.state != StateCandidate {\n\t\tt.Errorf(\"state = %s, want %s\", c.state, StateCandidate)\n\t}\n\n\tif c.Term != b.Term+2 {\n\t\tt.Errorf(\"term = %d, want %d\", c.Term, b.Term+2)\n\t}\n\n\tnt.recover()\n\tnt.send(pb.Message{From: 1, To: 3, Type: pb.MsgHeartbeat, Term: a.Term})\n\n\t// Disrupt the leader so that the stuck peer is freed\n\tif a.state != StateFollower {\n\t\tt.Errorf(\"state = %s, want %s\", a.state, StateFollower)\n\t}\n\n\tif c.Term != a.Term {\n\t\tt.Errorf(\"term = %d, want %d\", c.Term, a.Term)\n\t}\n\n\t// Vote again, should become leader this time\n\tnt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup})\n\n\tif c.state != StateLeader {\n\t\tt.Errorf(\"peer 3 state: %s, want %s\", c.state, StateLeader)\n\t}\n\n}", "func TestConnectionStateConnectingToFailed(t *testing.T) {\n\treport := test.CheckRoutines(t)\n\tdefer report()\n\n\tlim := test.TimeOut(time.Second * 5)\n\tdefer lim.Stop()\n\n\toneSecond := time.Second\n\tKeepaliveInterval := time.Duration(0)\n\n\tcfg := &AgentConfig{\n\t\tDisconnectedTimeout: &oneSecond,\n\t\tFailedTimeout: &oneSecond,\n\t\tKeepaliveInterval: &KeepaliveInterval,\n\t}\n\n\taAgent, err := NewAgent(cfg)\n\tassert.NoError(t, err)\n\n\tbAgent, err := NewAgent(cfg)\n\tassert.NoError(t, err)\n\n\tvar isFailed sync.WaitGroup\n\tvar isChecking sync.WaitGroup\n\n\tisFailed.Add(2)\n\tisChecking.Add(2)\n\n\tconnectionStateCheck := func(c ConnectionState) {\n\t\tswitch c {\n\t\tcase ConnectionStateFailed:\n\t\t\tisFailed.Done()\n\t\tcase ConnectionStateChecking:\n\t\t\tisChecking.Done()\n\t\tcase ConnectionStateCompleted:\n\t\t\tt.Errorf(\"Unexpected ConnectionState: %v\", c)\n\t\tdefault:\n\t\t}\n\t}\n\n\tassert.NoError(t, aAgent.OnConnectionStateChange(connectionStateCheck))\n\tassert.NoError(t, bAgent.OnConnectionStateChange(connectionStateCheck))\n\n\tgo func() {\n\t\t_, err := aAgent.Accept(context.TODO(), \"InvalidFrag\", \"InvalidPwd\")\n\t\tassert.Error(t, err)\n\t}()\n\n\tgo func() {\n\t\t_, err := bAgent.Dial(context.TODO(), \"InvalidFrag\", \"InvalidPwd\")\n\t\tassert.Error(t, err)\n\t}()\n\n\tisChecking.Wait()\n\tisFailed.Wait()\n\n\tassert.NoError(t, aAgent.Close())\n\tassert.NoError(t, bAgent.Close())\n}", "func (rf *Raft) agreeWithServers(process func(server int) bool) (agree bool) {\n\tdoneChan := make(chan int)\n\tfor i, _ := range rf.peers {\n\t\tif i == rf.me {\n\t\t\tcontinue\n\t\t}\n\t\tgo func(server int) {\n\t\t\tok := process(server)\n\t\t\tif ok {\n\t\t\t\tdoneChan <- server\n\t\t\t}\n\t\t}(i)\n\t}\n\tdeadline := time.After(rf.electionTimeout)\n\tdoneCount := 1\n\tpeerCount := len(rf.peers)\n\tfor {\n\t\tselect {\n\t\tcase <-deadline:\n\t\t\tDPrintf(\"Peer-%d, agreement timeout!\\n\", rf.me)\n\t\t\treturn false\n\t\tcase server := <-doneChan:\n\t\t\tif server >= 0 && server < peerCount {\n\t\t\t\tdoneCount += 1\n\t\t\t\tif doneCount >= peerCount/2+1 {\n\t\t\t\t\tDPrintf(\"Peer-%d's agreement is successful.\", rf.me)\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tDPrintf(\"Peer-%d find an illegal server number=%d when do agreement.\", rf.me, server)\n\t\t\t}\n\t\t}\n\t}\n}", "func validateIpOverlap(d *db.DB, intf string, ipPref string, tblName string) (string, error) {\n log.Info(\"Checking for IP overlap ....\")\n\n ipA, ipNetA, err := net.ParseCIDR(ipPref)\n if err != nil {\n log.Info(\"Failed to parse IP address: \", ipPref)\n return \"\", err\n }\n\n var allIntfKeys []db.Key\n\n for key, _ := range IntfTypeTblMap {\n intTbl := IntfTypeTblMap[key]\n keys, err := d.GetKeys(&db.TableSpec{Name:intTbl.cfgDb.intfTN})\n if err != nil {\n log.Info(\"Failed to get keys; err=%v\", err)\n return \"\", err\n }\n allIntfKeys = append(allIntfKeys, keys...)\n }\n\n if len(allIntfKeys) > 0 {\n for _, key := range allIntfKeys {\n if len(key.Comp) < 2 {\n continue\n }\n ipB, ipNetB, perr := net.ParseCIDR(key.Get(1))\n //Check if key has IP, if not continue\n if ipB == nil || perr != nil {\n continue\n }\n if ipNetA.Contains(ipB) || ipNetB.Contains(ipA) {\n if log.V(3) {\n log.Info(\"IP: \", ipPref, \" overlaps with \", key.Get(1), \" of \", key.Get(0))\n }\n //Handle IP overlap across different interface, reject if in same VRF\n intfType, _, ierr := getIntfTypeByName(key.Get(0))\n if ierr != nil {\n log.Errorf(\"Extracting Interface type for Interface: %s failed!\", key.Get(0))\n return \"\", ierr\n }\n intTbl := IntfTypeTblMap[intfType]\n if intf != key.Get(0) {\n vrfNameA, _ := d.GetMap(&db.TableSpec{Name:tblName+\"|\"+intf}, \"vrf_name\")\n vrfNameB, _ := d.GetMap(&db.TableSpec{Name:intTbl.cfgDb.intfTN+\"|\"+key.Get(0)}, \"vrf_name\")\n if vrfNameA == vrfNameB {\n errStr := \"IP \" + ipPref + \" overlaps with IP \" + key.Get(1) + \" of Interface \" + key.Get(0)\n log.Error(errStr)\n return \"\", errors.New(errStr)\n }\n } else {\n //Handle IP overlap on same interface, replace\n log.Error(\"Entry \", key.Get(1), \" on \", intf, \" needs to be deleted\")\n errStr := \"IP overlap on same interface with IP \" + key.Get(1)\n return key.Get(1), errors.New(errStr)\n }\n }\n }\n }\n return \"\", nil\n}", "func verify(srvChan chan string, channel, nick, hostname string, args []string) {\n\tmessage := \"NOTICE \" + channel + \" :\"\n\tif len(args) != 2 {\n\t\tmessage = \"NOTICE \" + channel + \" :ERROR: Invalid number of arguments\"\n\t} else {\n\t\tuname := args[0]\n\t\tpin := args[1]\n\t\treply := cmdDb.Cmd(\"get\", uname+\"Pin\")\n\t\tpinDb, err := (reply.Bytes())\n\t\tif err != nil {\n\t\t\tlog.Println(err.Error())\n\t\t\treturn\n\t\t}\n\t\tif string(pinDb) == pin {\n\t\t\tmessage += \"You are now verified as \" + uname\n\t\t\tcmdDb.Cmd(\"set\", uname+\"Host\", hostname)\n\t\t\tcmdDb.Cmd(\"set\", uname+\"Pin\", fmt.Sprintf(\"%06d\", rand.Intn(1000000)))\n\t\t} else {\n\t\t\tmessage += \"PIN does not match that of \" + uname\n\t\t}\n\t}\n\tlog.Println(message)\n\tsrvChan <- message\n}", "func reachableNakedDestinations(from echo.Instance) match.Matcher {\n\tsrcNw := from.Config().Cluster.NetworkName()\n\texcluded := match.And(\n\t\t// Only exclude naked if all subsets are naked. If an echo instance contains a mix of\n\t\t// subsets with and without sidecars, we'll leave it up to the test to determine what\n\t\t// is reachable.\n\t\tmatch.AllNaked,\n\t\t// TODO we probably don't actually reach all external, but for now maintaining what the tests did\n\t\tmatch.NotExternal,\n\t\tmatch.Not(match.Network(srcNw)))\n\treturn match.Not(excluded)\n}", "func (pexR *PEXReactor) ensurePeers() {\n\tnumOutPeers, _, numDialing := pexR.Switch.NumPeers()\n\tnumToDial := minNumOutboundPeers - (numOutPeers + numDialing)\n\tlog.Info(\"Ensure peers\", \"numOutPeers\", numOutPeers, \"numDialing\", numDialing, \"numToDial\", numToDial)\n\tif numToDial <= 0 {\n\t\treturn\n\t}\n\ttoDial := NewCMap()\n\n\t// Try to pick numToDial addresses to dial.\n\t// TODO: improve logic.\n\tfor i := 0; i < numToDial; i++ {\n\t\tnewBias := MinInt(numOutPeers, 8)*10 + 10\n\t\tvar picked *NetAddress\n\t\t// Try to fetch a new peer 3 times.\n\t\t// This caps the maximum number of tries to 3 * numToDial.\n\t\tfor j := 0; j < 3; j++ {\n\t\t\ttry := pexR.book.PickAddress(newBias)\n\t\t\tif try == nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\talreadySelected := toDial.Has(try.IP.String())\n\t\t\talreadyDialing := pexR.Switch.IsDialing(try)\n\t\t\talreadyConnected := pexR.Switch.Peers().Has(try.IP.String())\n\t\t\tif alreadySelected || alreadyDialing || alreadyConnected {\n\t\t\t\t/*\n\t\t\t\t\tlog.Info(\"Cannot dial address\", \"addr\", try,\n\t\t\t\t\t\t\"alreadySelected\", alreadySelected,\n\t\t\t\t\t\t\"alreadyDialing\", alreadyDialing,\n\t\t\t\t\t\t\"alreadyConnected\", alreadyConnected)\n\t\t\t\t*/\n\t\t\t\tcontinue\n\t\t\t} else {\n\t\t\t\tlog.Info(\"Will dial address\", \"addr\", try)\n\t\t\t\tpicked = try\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif picked == nil {\n\t\t\tcontinue\n\t\t}\n\t\ttoDial.Set(picked.IP.String(), picked)\n\t}\n\n\t// Dial picked addresses\n\tfor _, item := range toDial.Values() {\n\t\tgo func(picked *NetAddress) {\n\t\t\t_, err := pexR.Switch.DialPeerWithAddress(picked)\n\t\t\tif err != nil {\n\t\t\t\tpexR.book.MarkAttempt(picked)\n\t\t\t}\n\t\t}(item.(*NetAddress))\n\t}\n\n\t// If we need more addresses, pick a random peer and ask for more.\n\tif pexR.book.NeedMoreAddrs() {\n\t\tif peers := pexR.Switch.Peers().List(); len(peers) > 0 {\n\t\t\ti := rand.Int() % len(peers)\n\t\t\tpeer := peers[i]\n\t\t\tlog.Info(\"No addresses to dial. Sending pexRequest to random peer\", \"peer\", peer)\n\t\t\tpexR.RequestPEX(peer)\n\t\t}\n\t}\n}" ]
[ "0.56950366", "0.5444178", "0.52720666", "0.52533114", "0.52149844", "0.520094", "0.51740646", "0.51245964", "0.5120735", "0.5113124", "0.5092491", "0.50820696", "0.50752467", "0.50582534", "0.5049603", "0.5046325", "0.50083274", "0.49557894", "0.49459672", "0.49330953", "0.49275738", "0.49191406", "0.49173397", "0.4886307", "0.48805988", "0.48779425", "0.48666522", "0.4860231", "0.48581913", "0.48500544", "0.484564", "0.48404646", "0.48332363", "0.48209262", "0.48177862", "0.48119703", "0.47943866", "0.47811127", "0.47749135", "0.47660702", "0.47557503", "0.47545767", "0.47470757", "0.474007", "0.4731683", "0.47292712", "0.47216955", "0.47139645", "0.47051215", "0.46996272", "0.46922004", "0.46867004", "0.46795726", "0.4679295", "0.46772897", "0.46670693", "0.46670282", "0.466611", "0.4664764", "0.46633938", "0.4660815", "0.46552253", "0.46510074", "0.46471775", "0.46448383", "0.46423453", "0.46288112", "0.462844", "0.46270874", "0.46172857", "0.4611953", "0.46063423", "0.45975977", "0.45966524", "0.4587473", "0.45851323", "0.45839867", "0.4581589", "0.45810378", "0.45755503", "0.45754343", "0.4569044", "0.45678592", "0.45613593", "0.45591843", "0.45545465", "0.45536536", "0.45506424", "0.45469213", "0.45457238", "0.45438546", "0.4538098", "0.45316136", "0.453049", "0.4528731", "0.4522453", "0.4521634", "0.4520833", "0.451375", "0.45126125" ]
0.71566
0
Check that the DNSListener is called with a correct summary.
func TestListener(t *testing.T) { listener := &fakeListener{} doh, _ := NewTransport(testURL, ips, nil, nil, listener) transport := doh.(*transport) rt := makeTestRoundTripper() transport.client.Transport = rt go func() { req := <-rt.req trace := httptrace.ContextClientTrace(req.Context()) trace.GotConn(httptrace.GotConnInfo{ Conn: &fakeConn{ remoteAddr: &net.TCPAddr{ IP: net.ParseIP("192.0.2.2"), Port: 443, }}}) r, w := io.Pipe() rt.resp <- &http.Response{ StatusCode: 200, Body: r, Request: &http.Request{URL: parsedURL}, } w.Write([]byte{0, 0, 8, 9, 10}) w.Close() }() doh.Query(simpleQueryBytes) s := listener.summary if s.Latency < 0 { t.Errorf("Negative latency: %f", s.Latency) } if !bytes.Equal(s.Query, simpleQueryBytes) { t.Errorf("Wrong query: %v", s.Query) } if !bytes.Equal(s.Response, []byte{0xbe, 0xef, 8, 9, 10}) { t.Errorf("Wrong response: %v", s.Response) } if s.Server != "192.0.2.2" { t.Errorf("Wrong server IP string: %s", s.Server) } if s.Status != Complete { t.Errorf("Wrong status: %d", s.Status) } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (d *DNS) Check(ipaddr net.IP) error {\n\t// NOTE: We are ignoring error. It says: \"nodename nor servname\n\t// provided, or not known\" if there is no DNS name for the IP address.\n\tnames, _ := net.LookupAddr(ipaddr.String())\n\td.Names = names\n\treturn nil\n}", "func okHealthCheck(proxy *Proxy) error {\n\treturn nil\n}", "func Check(collection *collection.Collection, dbg, suggest bool) Results {\n\n\tresults := Results{}\n\n\t// Start tests\n\n\t/* Check to make sure that the public DNS server NS records match\n\t Check to make sure the one of the public and the private NS record servers match\n\t Check to make sure there are at least 1 NS server\n\t*/\n\n\tcollection.PublicMatchNS = reflect.DeepEqual(collection.DNS1NS, collection.DNS2NS)\n\tcollection.LocalMatchNS = reflect.DeepEqual(collection.DNS1NS, collection.LocalNS)\n\tif collection.PublicMatchNS && collection.LocalMatchNS && len(collection.LocalNS) > 0 {\n\t\tresults.ResultNS = true\n\t} else {\n\t\tresults.ResultNS = false\n\t}\n\n\t/* Check to make sure the public DNS server Glue records match\n\t Check to make sure the one of the public and the private Glue record servers match\n\t Check to make sure there the Glue record length matches the ns record length\n\t*/\n\n\tcollection.PublicMatchGlue = reflect.DeepEqual(collection.DNS1Glue, collection.DNS2Glue)\n\tcollection.LocalMatchGlue = reflect.DeepEqual(collection.DNS1Glue, collection.LocalGlue)\n\n\tif collection.PublicMatchGlue && collection.LocalMatchGlue && (len(collection.LocalNS) == len(collection.LocalGlue)) && len(collection.LocalNS) > 0 {\n\t\tresults.ResultGlue = true\n\t} else {\n\t\tresults.ResultGlue = false\n\t}\n\n\t/* Check to make sure that we can access all of the name servers and the numbers match */\n\n\tresults.ResultAccess = true\n\tfor _, a := range collection.EndpointStatus {\n\t\tif a && results.ResultAccess {\n\t\t} else {\n\t\t\tresults.ResultAccess = false\n\t\t}\n\t}\n\tif len(collection.EndpointStatus) != len(collection.LocalNS) || len(collection.EndpointStatus) < 1 {\n\t\tresults.ResultAccess = false\n\t}\n\n\t/* Check to make sure both public DNS server results match\n\t Check that the LocalDNS and one of the remotes match\n\t Check that there is more than 1 A record\n\t*/\n\n\tcollection.PublicMatchA = reflect.DeepEqual(collection.DNS1A, collection.DNS2A)\n\tcollection.LocalMatchA = reflect.DeepEqual(collection.DNS1A, collection.LocalA)\n\n\tif collection.PublicMatchA && collection.LocalMatchA && len(collection.LocalA) > 0 && (len(collection.LocalA) == len(collection.DNS1A)) {\n\t\tresults.ResultA = true\n\t} else {\n\t\tresults.ResultA = false\n\t}\n\n\t// check to make sure the SOA records match the domain name we expect\n\tresults.ResultSOAMatch = collection.SOAMatch\n\n\t// Show test results if suggest or debug\n\tif dbg || suggest {\n\t\tfmt.Printf(\"--------------------------------\\n\")\n\t\tdebugPrint(\"NS Record Test\", results.ResultNS)\n\t\tdebugPrint(\"Glue Record Test\", results.ResultGlue)\n\t\tdebugPrint(\"NS Access Test\", results.ResultAccess)\n\t\tdebugPrint(\"SOA Match Test\", results.ResultSOAMatch)\n\t\tdebugPrint(\"A Record Test\", results.ResultA)\n\t\tfmt.Printf(\"--------------------------------\\n\")\n\t}\n\n\t// only print datastructure if debug is on\n\tif dbg {\n\t\tcolor.Cyan.Printf(\"Results Debug:\\n%+v\\n\", results)\n\t}\n\n\treturn (results)\n}", "func (ss *SNSServer) DnsReady() (e error) {\n\n\t// if an SOA provider isn't given, we're done\n\tif ss.SOAProvider == \"\" {\n\t\treturn nil\n\t}\n\n\tvar (\n\t\tctx context.Context\n\t\tcancel context.CancelFunc\n\t)\n\n\tif ss.waitForDns > 0 {\n\t\tctx, cancel = context.WithTimeout(context.Background(), ss.waitForDns)\n\t} else {\n\t\tctx, cancel = context.WithCancel(context.Background())\n\t}\n\tdefer cancel()\n\n\t// Creating the dns client for our query\n\tclient := dns.Client{\n\t\tNet: \"tcp\", // tcp to connect to the SOA provider? or udp (default)?\n\t\tDialer: &net.Dialer{\n\t\t\tTimeout: ss.waitForDns,\n\t\t},\n\t}\n\t// the message contains what we are looking for - the SOA record of the host\n\tmsg := dns.Msg{}\n\tmsg.SetQuestion(strings.SplitN(ss.SelfUrl.Host, \":\", 2)[0]+\".\", dns.TypeANY)\n\n\tdefer cancel()\n\n\tvar check = func() <-chan struct{} {\n\t\tvar channel = make(chan struct{})\n\n\t\tgo func(c chan struct{}) {\n\t\t\tvar (\n\t\t\t\terr error\n\t\t\t\tresponse *dns.Msg\n\t\t\t)\n\n\t\t\tfor {\n\t\t\t\t// sending the dns query to the soa provider\n\t\t\t\tresponse, _, err = client.Exchange(&msg, ss.SOAProvider)\n\t\t\t\t// if we found a record, then we are done\n\t\t\t\tif err == nil && response != nil && response.Rcode == dns.RcodeSuccess && len(response.Answer) > 0 {\n\t\t\t\t\tc <- struct{}{}\n\t\t\t\t\tss.metrics.DnsReady.Add(1.0)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\t// otherwise, we keep trying\n\t\t\t\tss.metrics.DnsReadyQueryCount.Add(1.0)\n\t\t\t\tss.logger.Info(\"checking if server's DNS is ready\",\n\t\t\t\t\tzap.String(\"endpoint\", strings.SplitN(ss.SelfUrl.Host, \":\", 2)[0]+\".\"), zap.Error(err), zap.Any(\"response\", response))\n\t\t\t\ttime.Sleep(time.Second)\n\t\t\t}\n\t\t}(channel)\n\n\t\treturn channel\n\t}\n\n\tselect {\n\tcase <-check():\n\tcase <-ctx.Done():\n\t\te = ctx.Err()\n\t}\n\n\treturn\n}", "func (writer *connectivityHooks) dnsDoneHook(di httptrace.DNSDoneInfo) {\n\tstatusString := color.GreenString(\"OK\")\n\tif di.Err != nil {\n\t\tstatusString = color.RedString(\"ERROR\")\n\t\tfmt.Fprint(writer.w, dnsColorFunc(\"Unable to resolve the address : %v\\n\", scrubber.ScrubLine(di.Err.Error())))\n\t}\n\tfmt.Fprintf(writer.w, \"* %v [%v]\\n\\n\", dnsColorFunc(\"DNS Lookup\"), statusString)\n}", "func verifyDnsResourceRecordUpdate(reqLogger logr.Logger, fqdn string, txtValue string) bool {\n\treqLogger.Info(fmt.Sprintf(\"will query DNS in %v seconds\", waitTimePeriodDnsPropogationCheck))\n\n\ttime.Sleep(time.Duration(waitTimePeriodDnsPropogationCheck) * time.Second)\n\n\tdnsChangesPropogated, err := ValidateResourceRecordUpdatesUsingCloudflareDNS(reqLogger, fqdn, txtValue)\n\tif err != nil {\n\t\treqLogger.Error(err, \"could not validate DNS propagation.\")\n\t\treturn false\n\t}\n\n\treturn dnsChangesPropogated\n}", "func NewSummaryListener() Summary {\n\treturn Summary{triggerInterval: 10 * time.Second}\n}", "func (mr *MockDynamicCertPrivateMockRecorder) AddListener(arg0 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"AddListener\", reflect.TypeOf((*MockDynamicCertPrivate)(nil).AddListener), arg0)\n}", "func DNSHealth() error { return get(SysDNS) }", "func (ans *ANS) CheckCorrectSetup() error {\n\tconst testPath = \"/cf/consumer/v1/matched-events\"\n\tentireUrl := strings.TrimRight(ans.URL, \"/\") + testPath\n\n\tresponse, err := ans.sendRequest(http.MethodGet, entireUrl, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn handleStatusCode(entireUrl, http.StatusOK, response)\n}", "func (_Contract *ContractCallerSession) HasDNSRecords(node [32]byte, name [32]byte) (bool, error) {\n\treturn _Contract.Contract.HasDNSRecords(&_Contract.CallOpts, node, name)\n}", "func (r *checker) verifyLatency(summaries map[string]*dto.Summary, percentile float64, reporter health.Reporter) {\n\tfor peer, summary := range summaries {\n\t\tlatency, err := latencyAtQuantile(summary, percentile)\n\t\tif err != nil {\n\t\t\tr.WithError(err).\n\t\t\t\tWithField(\"peer\", peer).\n\t\t\t\tWithField(\"summary\", summary).\n\t\t\t\tWarn(\"Failed to verify latency.\")\n\t\t\tcontinue\n\t\t}\n\n\t\tif latency > r.LatencyThreshold {\n\t\t\treporter.Add(failureProbe(r.NodeName, peer, latency, r.LatencyThreshold))\n\t\t}\n\t}\n}", "func (r *checker) Check(ctx context.Context, reporter health.Reporter) {\n\tif err := r.check(ctx, reporter); err != nil {\n\t\tr.WithError(err).Debug(\"Failed to verify latency.\")\n\t\treturn\n\t}\n\tif reporter.NumProbes() == 0 {\n\t\treporter.Add(successProbe(r.NodeName, r.LatencyThreshold))\n\t}\n}", "func analyzeDns(w io.Writer, server, hostname string, samples, waitMillis int) {\n\tm := new(dns.Msg)\n\tm.Id = dns.Id()\n\tm.RecursionDesired = true\n\tm.Question = make([]dns.Question, 1)\n\tm.Question[0] = dns.Question{Name: dns.Fqdn(hostname), Qtype: dns.TypeA, Qclass: dns.ClassINET}\n\twait := time.Duration(waitMillis) * time.Millisecond\n\n\tc := new(dns.Client)\n\n\tfmt.Printf(\"QUERY %v (@%v): %v data bytes\\n\", hostname, server, m.Len())\n\n\trtts := make(DurationSlice, samples, samples)\n\tfor i := 0; i < samples; i++ {\n\t\tin, rtt, err := c.Exchange(m, server+\":53\")\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t\tcontinue\n\t\t}\n\t\trtts[i] = rtt\n\t\tfmt.Fprintf(w, \"%v bytes from %v: ttl=%v time=%v\\n\", in.Len(), server, time.Second*6, rtt)\n\t\ttime.Sleep(wait)\n\t}\n\n\t// NOTE: Potentially Eating Performance for Pretties\n\tvar min, max, avg, stddev time.Duration\n\tmin = rtts.Min()\n\tmax = rtts.Max()\n\tavg = rtts.Avg()\n\tstddev = rtts.Std()\n\n\tfmt.Fprintf(w, \"round-trip min/avg/max/stddev = %v/%v/%v/%v\\n\", min, avg, max, stddev)\n}", "func (h HTTPInvalidRequestLine) LogSummary(s string) error {\n\treturn nil\n}", "func CheckLogItem(logItem *LogItem) bool {\n\tevent := logItem.Event\n\n\tif event == \"LAN access from remote\" {\n\t\t// details includes remote and local addresses\n\t\treturn true\n\t}\n\tif event == \"Internet connected\" {\n\t\t// details includes WAN IP address\n\t\treturn true\n\t}\n\tif event == \"DoS Attack: ACK Scan\" {\n\t\t// details includes remote address and event has attach type\n\t\treturn true\n\t}\n\tif event == \"DoS Attack: ARP Attack\" {\n\t\t// details includes remote address and event has attach type\n\t\treturn true\n\t}\n\tif event == \"DoS Attack: Ascend Kill\" {\n\t\t// details includes remote address and event has attach type\n\t\treturn true\n\t}\n\tif event == \"DoS Attack: RST Scan\" {\n\t\t// details includes remote address and event has attach type\n\t\treturn true\n\t}\n\tif event == \"DoS Attack: SYN/ACK Scan\" {\n\t\t// details includes remote address and event has attach type\n\t\treturn true\n\t}\n\tif event == \"DoS Attack: TCP/UDP Chargen\" {\n\t\t// details includes remote address and event has attach type\n\t\treturn true\n\t}\n\tif event == \"DoS Attack: TCP/UDP Echo\" {\n\t\t// details includes remote address and event has attach type\n\t\treturn true\n\t}\n\tif event == \"DoS Attack: UDP Port Scan\" {\n\t\t// details includes remote address and event has attach type\n\t\treturn true\n\t}\n\tif strings.HasPrefix(event, \"DHCP IP: \") {\n\t\t// details includes MAC address and event has LAN IP address\n\t\treturn true\n\t}\n\tif event == \"Time synchronized with NTP server\" {\n\t\t// are there details?\n\t\treturn true\n\t}\n\tif strings.HasPrefix(event, \"email sent to: \") {\n\t\t// no details and event has e-mail address - this is really of no interest\n\t\treturn true\n\t}\n\tlog.Fatal(\"Do not know event \" + event)\n\n\treturn true\n}", "func DNSMonitor(bn string, bnNum int, ctx *zedrouterContext, status *types.NetworkInstanceStatus) {\n\tvar (\n\t\terr error\n\t\tsnapshotLen int32 = 1280 // draft-madi-dnsop-udp4dns-00\n\t\tpromiscuous = true // mainly for switched network\n\t\ttimeout = 10 * time.Second // collect enough packets in 10sec before processing\n\t\tfilter = \"udp and port 53\"\n\t\tswitched bool\n\t\t// XXX come back to handle TCP DNS snoop, more useful for zone transfer\n\t\t// https://github.com/google/gopacket/issues/236\n\t)\n\tif bnNum >= maxBridgeNumber {\n\t\tlog.Errorf(\"Can not snoop on brige number %d\", bnNum)\n\t\treturn\n\t}\n\tif status.Type == types.NetworkInstanceTypeSwitch {\n\t\tswitched = true\n\t\tfilter = \"udp and (port 53 or port 67)\"\n\t}\n\tlog.Functionf(\"(FlowStats) DNS Monitor on %s(bridge-num %d) switched=%v, filter=%s\", bn, bnNum, switched, filter)\n\n\thandle, err := pcap.OpenLive(bn, snapshotLen, promiscuous, timeout, false)\n\tif err != nil {\n\t\tlog.Errorf(\"Can not snoop on bridge %s\", bn)\n\t\treturn\n\t}\n\tdefer handle.Close()\n\n\terr = handle.SetBPFFilter(filter)\n\tif err != nil {\n\t\tlog.Errorf(\"Can not install DNS filter on %s\", bn)\n\t\treturn\n\t}\n\n\tdnssys[bnNum].Done = make(chan bool)\n\tdnssys[bnNum].channelOpen = true\n\tpacketSource := gopacket.NewPacketSource(handle, layers.LinkType(handle.LinkType()))\n\tdnsIn := packetSource.Packets()\n\tfor {\n\t\tselect {\n\t\tcase <-dnssys[bnNum].Done:\n\t\t\tlog.Noticef(\"(FlowStats) DNS Monitor exit on %s(bridge-num %d)\", bn, bnNum)\n\t\t\tdnssys[bnNum].channelOpen = false\n\t\t\tdnssys[bnNum].Lock()\n\t\t\tdnsDataRemove(bnNum)\n\t\t\tdnssys[bnNum].Unlock()\n\n\t\t\tclose(dnssys[bnNum].Done)\n\t\t\treturn\n\t\tcase packet, ok := <-dnsIn:\n\t\t\tif !ok {\n\t\t\t\tlog.Noticef(\"(FlowStats) dnsIn closed on %s(bridge-num %d)\", bn, bnNum)\n\t\t\t\tdnssys[bnNum].channelOpen = false\n\t\t\t\tdnssys[bnNum].Lock()\n\t\t\t\tdnsDataRemove(bnNum)\n\t\t\t\tdnssys[bnNum].Unlock()\n\n\t\t\t\tclose(dnssys[bnNum].Done)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tdnslayer := packet.Layer(layers.LayerTypeDNS)\n\t\t\tif switched && dnslayer == nil {\n\t\t\t\tdnssys[bnNum].Lock()\n\t\t\t\tcheckDHCPPacketInfo(bnNum, packet, ctx)\n\t\t\t\tdnssys[bnNum].Unlock()\n\t\t\t} else {\n\t\t\t\tdnssys[bnNum].Lock()\n\t\t\t\tcheckDNSPacketInfo(bnNum, packet, dnslayer)\n\t\t\t\tdnssys[bnNum].Unlock()\n\t\t\t}\n\t\t}\n\t}\n}", "func checkDNSConfig(c internalapi.RuntimeService, containerID string, expectedContent []string) {\n\tBy(\"get the content of /etc/resolv.conf via execSync\")\n\tcmd := []string{\"cat\", resolvConfigPath}\n\tstdout, stderr, err := c.ExecSync(containerID, cmd, time.Duration(defaultExecSyncTimeout)*time.Second)\n\tframework.ExpectNoError(err, \"failed to execSync in container %q\", containerID)\n\tfor _, content := range expectedContent {\n\t\tExpect(string(stdout)).To(ContainSubstring(content), \"The stdout output of execSync should contain %q\", content)\n\t}\n\tExpect(stderr).To(BeNil(), \"The stderr should be nil.\")\n\tframework.Logf(\"check DNS config succeed\")\n}", "func checkListeners(cli kube.CLIClient, namespace string) (diag.Messages, error) {\n\tpods, err := cli.Kube().CoreV1().Pods(namespace).List(context.Background(), metav1.ListOptions{\n\t\t// Find all running pods\n\t\tFieldSelector: \"status.phase=Running\",\n\t\t// Find all injected pods. We don't care about non-injected pods, because the new behavior\n\t\t// mirrors Kubernetes; this is only a breaking change for existing Istio users.\n\t\tLabelSelector: \"security.istio.io/tlsMode=istio\",\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar messages diag.Messages = make([]diag.Message, 0)\n\tg := errgroup.Group{}\n\n\tsem := semaphore.NewWeighted(25)\n\tfor _, pod := range pods.Items {\n\t\tpod := pod\n\t\tif !fromLegacyNetworkingVersion(pod) {\n\t\t\t// Skip check. This pod is already on a version where the change has been made; if they were going\n\t\t\t// to break they would already be broken.\n\t\t\tcontinue\n\t\t}\n\t\tg.Go(func() error {\n\t\t\t_ = sem.Acquire(context.Background(), 1)\n\t\t\tdefer sem.Release(1)\n\t\t\t// Fetch list of all clusters to get which ports we care about\n\t\t\tresp, err := cli.EnvoyDo(context.Background(), pod.Name, pod.Namespace, \"GET\", \"config_dump?resource=dynamic_active_clusters&mask=cluster.name\")\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\"failed to get config dump: \", err)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tports, err := extractInboundPorts(resp)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\"failed to get ports: \", err)\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\t// Next, look at what ports the pod is actually listening on\n\t\t\t// This requires parsing the output from ss; the version we use doesn't support JSON\n\t\t\tout, _, err := cli.PodExec(pod.Name, pod.Namespace, \"istio-proxy\", \"ss -ltnH\")\n\t\t\tif err != nil {\n\t\t\t\tif strings.Contains(err.Error(), \"executable file not found\") {\n\t\t\t\t\t// Likely distroless or other custom build without ss. Nothing we can do here...\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\tfmt.Println(\"failed to get listener state: \", err)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tfor _, ss := range strings.Split(out, \"\\n\") {\n\t\t\t\tif len(ss) == 0 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tbind, port, err := net.SplitHostPort(getColumn(ss, 3))\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(\"failed to get parse state: \", err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tip, _ := netip.ParseAddr(bind)\n\t\t\t\tportn, _ := strconv.Atoi(port)\n\t\t\t\tif _, f := ports[portn]; f {\n\t\t\t\t\tc := ports[portn]\n\t\t\t\t\tif bind == \"\" {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t} else if bind == \"*\" || ip.IsUnspecified() {\n\t\t\t\t\t\tc.Wildcard = true\n\t\t\t\t\t} else if ip.IsLoopback() {\n\t\t\t\t\t\tc.Lo = true\n\t\t\t\t\t} else {\n\t\t\t\t\t\tc.Explicit = true\n\t\t\t\t\t}\n\t\t\t\t\tports[portn] = c\n\t\t\t\t}\n\t\t\t}\n\n\t\t\torigin := &kube3.Origin{\n\t\t\t\tType: gvk.Pod,\n\t\t\t\tFullName: resource.FullName{\n\t\t\t\t\tNamespace: resource.Namespace(pod.Namespace),\n\t\t\t\t\tName: resource.LocalName(pod.Name),\n\t\t\t\t},\n\t\t\t\tResourceVersion: resource.Version(pod.ResourceVersion),\n\t\t\t}\n\t\t\tfor port, status := range ports {\n\t\t\t\t// Binding to localhost no longer works out of the box on Istio 1.10+, give them a warning.\n\t\t\t\tif status.Lo {\n\t\t\t\t\tmessages.Add(msg.NewLocalhostListener(&resource.Instance{Origin: origin}, fmt.Sprint(port)))\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\t}\n\tif err := g.Wait(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn messages, nil\n}", "func validateDNS(){\n dns, err := exec.Command(\"/bin/bash\", \"-c\", \"dig +short rackspace.com\").Output()\n if err != nil {\n log.Fatal(err)\n fmt.Println(\"Unable to validate DNS.\")\n fmt.Printf(\"%s\", err)\n os.Exit(1)\n }\n fmt.Printf(\"%s\", dns)\n}", "func (_Contract *ContractSession) HasDNSRecords(node [32]byte, name [32]byte) (bool, error) {\n\treturn _Contract.Contract.HasDNSRecords(&_Contract.CallOpts, node, name)\n}", "func checkListener(listener interface{}) (reflect.Type, error) {\n lisVal := reflect.TypeOf(listener)\n\n if lisVal.Kind() != reflect.Func || // Listener must obviously be a function\n lisVal.NumIn() != 1 || // Listener function must take only 1 input argument\n lisVal.NumOut() != 0 { // Listener must have no returning argument\n // Listener interface not valid\n return lisVal, ListenerInvalidErr\n }\n\n // Returns input value kind\n return lisVal.In(0), nil\n}", "func ConnVerifyHostname(c *tls.Conn, host string) error", "func DNSARecordsVerification(t *testing.T, hostNames []string) bool {\n\tt.Logf(\"Verifying DNS A Records...\")\n\tFQDNList := lib.FetchDNSARecordsFQDN(t, dnsVSUUID, AviClients[0])\n\tdiffString := DiffOfLists(FQDNList, hostNames)\n\tif len(diffString) == initialNumOfFQDN {\n\t\treturn true\n\t}\n\tnewSharedVSFQDN := DiffOfLists(diffString, initialFQDNList)\n\tvar val int\n\tfor _, fqdn := range newSharedVSFQDN {\n\t\tif strings.HasPrefix(fqdn, clusterName+\"--shared\") == true {\n\t\t\tval++\n\t\t}\n\t}\n\tif (len(newSharedVSFQDN) - val) == 0 {\n\t\treturn true\n\t}\n\treturn false\n}", "func (r *checker) check(ctx context.Context, reporter health.Reporter) error {\n\tpeers, err := r.getPeers()\n\tif err != nil {\n\t\treturn trace.Wrap(err, \"failed to discover nethealth peers\")\n\t}\n\n\tif len(peers) == 0 {\n\t\treturn nil\n\t}\n\n\tsummaries, err := r.LatencyClient.LatencySummariesMilli(ctx)\n\tif err != nil {\n\t\treturn trace.Wrap(err, \"failed to get latency summaries\")\n\t}\n\n\tr.verifyLatency(filterByK8s(summaries, peers), r.LatencyQuantile, reporter)\n\n\treturn nil\n}", "func (application *Application) RunServiceHealthCheck(topic string, message interface{}) {\n application.sendServiceDescriptorMessage(topic, message)\n}", "func (mr *MockenvDescriberMockRecorder) ValidateCFServiceDomainAliases() *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"ValidateCFServiceDomainAliases\", reflect.TypeOf((*MockenvDescriber)(nil).ValidateCFServiceDomainAliases))\n}", "func (e Event) Check() error {\n\tif len(e.Title) == 0 {\n\t\treturn fmt.Errorf(\"statsd.Event title is required\")\n\t}\n\tif len(e.Text) == 0 {\n\t\treturn fmt.Errorf(\"statsd.Event text is required\")\n\t}\n\treturn nil\n}", "func TestAddRemoveUpstreamDNSServer(t *testing.T) {\n\t// Prepare different cases\n\tcases := []struct {\n\t\tName string\n\t\tTestAdding bool\n\t\tFailInVPP bool\n\t\tExpectFailure bool\n\t\tInput net.IP\n\t\tExpected govppapi.Message\n\t}{\n\t\t{\n\t\t\tName: \"successful adding of IPv4 upstream DNS server\",\n\t\t\tTestAdding: true,\n\t\t\tInput: upstreamDNSServerIPv4,\n\t\t\tExpected: &dns.DNSNameServerAddDel{\n\t\t\t\tIsIP6: 0,\n\t\t\t\tIsAdd: 1,\n\t\t\t\tServerAddress: upstreamDNSServerIPv4,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"successful adding of IPv6 upstream DNS server\",\n\t\t\tTestAdding: true,\n\t\t\tInput: upstreamDNSServerIPv6,\n\t\t\tExpected: &dns.DNSNameServerAddDel{\n\t\t\t\tIsIP6: 1,\n\t\t\t\tIsAdd: 1,\n\t\t\t\tServerAddress: upstreamDNSServerIPv6,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"successful removal of IPv4 upstream DNS server\",\n\t\t\tTestAdding: false,\n\t\t\tInput: upstreamDNSServerIPv4,\n\t\t\tExpected: &dns.DNSNameServerAddDel{\n\t\t\t\tIsIP6: 0,\n\t\t\t\tIsAdd: 0,\n\t\t\t\tServerAddress: upstreamDNSServerIPv4,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"successful removal of IPv6 upstream DNS server\",\n\t\t\tTestAdding: false,\n\t\t\tInput: upstreamDNSServerIPv6,\n\t\t\tExpected: &dns.DNSNameServerAddDel{\n\t\t\t\tIsIP6: 1,\n\t\t\t\tIsAdd: 0,\n\t\t\t\tServerAddress: upstreamDNSServerIPv6,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"failure propagation from VPP\",\n\t\t\tTestAdding: false,\n\t\t\tFailInVPP: true,\n\t\t\tExpectFailure: true,\n\t\t\tInput: upstreamDNSServerIPv4,\n\t\t},\n\t\t{\n\t\t\tName: \"bad IP address input\",\n\t\t\tTestAdding: false,\n\t\t\tFailInVPP: true,\n\t\t\tExpectFailure: true,\n\t\t\tInput: nil,\n\t\t},\n\t}\n\n\t// Run all cases\n\tfor _, td := range cases {\n\t\tt.Run(td.Name, func(t *testing.T) {\n\t\t\tctx, vppCalls := setup(t)\n\t\t\tdefer teardown(ctx)\n\t\t\t// prepare reply\n\t\t\tif td.FailInVPP {\n\t\t\t\tctx.MockVpp.MockReply(&dns.DNSNameServerAddDelReply{Retval: 1})\n\t\t\t} else {\n\t\t\t\tctx.MockVpp.MockReply(&dns.DNSNameServerAddDelReply{})\n\t\t\t}\n\n\t\t\t// make the call\n\t\t\tvar err error\n\t\t\tif td.TestAdding {\n\t\t\t\terr = vppCalls.AddUpstreamDNSServer(td.Input)\n\t\t\t} else {\n\t\t\t\terr = vppCalls.DeleteUpstreamDNSServer(td.Input)\n\t\t\t}\n\n\t\t\t// verify result\n\t\t\tif td.ExpectFailure {\n\t\t\t\tExpect(err).Should(HaveOccurred())\n\t\t\t} else {\n\t\t\t\tExpect(err).ShouldNot(HaveOccurred())\n\t\t\t\tExpect(ctx.MockChannel.Msg).To(Equal(td.Expected))\n\t\t\t}\n\t\t})\n\t}\n}", "func (p *plug) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (int, error) {\n\tstart := time.Now()\n\trequests.Inc()\n\tstate := request.Request{W: w, Req: r}\n\tip := state.IP()\n\n\t// capture the written answer\n\trrw := dnstest.NewRecorder(w)\n\trcode, result, err := p.serveDNSInternal(ctx, rrw, r)\n\tif rcode > 0 {\n\t\t// actually send the answer if we have one\n\t\tanswer := new(dns.Msg)\n\t\tanswer.SetRcode(r, rcode)\n\t\tstate.SizeAndDo(answer)\n\t\terr = w.WriteMsg(answer)\n\t\tif err != nil {\n\t\t\treturn dns.RcodeServerFailure, err\n\t\t}\n\t}\n\n\t// increment counters\n\tswitch {\n\tcase err != nil:\n\t\terrorsTotal.Inc()\n\tcase result.Reason == dnsfilter.FilteredBlackList:\n\t\tfiltered.Inc()\n\t\tfilteredLists.Inc()\n\tcase result.Reason == dnsfilter.FilteredSafeBrowsing:\n\t\tfiltered.Inc()\n\t\tfilteredSafebrowsing.Inc()\n\tcase result.Reason == dnsfilter.FilteredParental:\n\t\tfiltered.Inc()\n\t\tfilteredParental.Inc()\n\tcase result.Reason == dnsfilter.FilteredInvalid:\n\t\tfiltered.Inc()\n\t\tfilteredInvalid.Inc()\n\tcase result.Reason == dnsfilter.FilteredSafeSearch:\n\t\t// the request was passsed through but not filtered, don't increment filtered\n\t\tsafesearch.Inc()\n\tcase result.Reason == dnsfilter.NotFilteredWhiteList:\n\t\twhitelisted.Inc()\n\tcase result.Reason == dnsfilter.NotFilteredNotFound:\n\t\t// do nothing\n\tcase result.Reason == dnsfilter.NotFilteredError:\n\t\ttext := \"SHOULD NOT HAPPEN: got DNSFILTER_NOTFILTERED_ERROR without err != nil!\"\n\t\tlog.Println(text)\n\t\terr = errors.New(text)\n\t\trcode = dns.RcodeServerFailure\n\t}\n\n\t// log\n\telapsed := time.Since(start)\n\telapsedTime.Observe(elapsed.Seconds())\n\tif p.settings.QueryLogEnabled {\n\t\tlogRequest(r, rrw.Msg, result, time.Since(start), ip)\n\t}\n\treturn rcode, err\n}", "func (record *DNSRecord) Check() (bool, []string) {\n\tnoErrors := true\n\tvar errs []string\n\n\tif strings.Trim(record.Name, \" \") == \"\" {\n\t\tnoErrors = false\n\t\terrs = append(errs, \"Empty record name\")\n\t}\n\n\tif strings.Trim(record.IPAddr, \" \") == \"\" {\n\t\tnoErrors = false\n\t\terrs = append(errs, \"Empty ip address\")\n\t}\n\n\treturn noErrors, errs\n}", "func (writer *connectivityHooks) dnsStartHook(di httptrace.DNSStartInfo) {\n\tfmt.Fprint(writer.w, dnsColorFunc(\"--- Starting DNS lookup to resolve '%v' ---\\n\", di.Host))\n}", "func CheckDNSRecordsExistence(names []string, dnsDomainID, dnsProviderType string) error {\n\tvar dnsProvider dnsproviders.Provider\n\tswitch dnsProviderType {\n\tcase \"route53\":\n\t\tdnsProvider = dnsproviders.Route53{\n\t\t\tRecordSet: dnsproviders.RecordSet{\n\t\t\t\tRecordSetType: \"A\",\n\t\t\t},\n\t\t\tHostedZoneID: dnsDomainID,\n\t\t\tSharedCreds: true,\n\t\t}\n\tdefault:\n\t\treturn nil\n\t}\n\treturn checkDNSRecordsExistence(names, dnsProvider)\n}", "func (cd *Coredns) testDNS() {\n\tvar successCount int\n\n\t//1. readEtcResolvConf -> compare nameserver with ClusterIP\n\t//nameserver either should be coredns clusterIP or nodeLocalcache DNS IP\n\trc := &ResolvConf{}\n\tdnstest := &Dnstest{}\n\n\terr := rc.readResolvConf()\n\tif err != nil {\n\t\tlog.Errorf(\"Failed to read /etc/resolv.conf file: %s\", err)\n\t\tdnstest.DnsResolution = \"Failed\"\n\t\t//cd.Dnstest = false\n\t\tcd.Dnstest = *dnstest\n\t\treturn\n\t}\n\tcd.ResolvConf = *rc\n\tlog.Infof(\"resolvconf values are: %+v\", rc)\n\n\t//2. Match nameserver in /etc/resolv.conf with ClusterIP ->it should match\n\t//from the nameserver IP -> check its coredns or nodeLocalDNSCache\n\tdnstest.Description = \"tests the internal and external DNS queries against ClusterIP and two Coredns Pod IPs\"\n\n\tif rc.Nameserver[0] == cd.ClusterIP {\n\t\tlog.Infof(\"Pod's nameserver is matching to ClusterIP: %s\", rc.Nameserver[0])\n\t} else if rc.Nameserver[0] == \"169.254.20.10\" {\n\t\tcd.HasNodeLocalCache = true\n\t\tlog.Infof(\"Pod's nameserver is matching to NodeLocal DNS Cache: %s\", rc.Nameserver[0])\n\t} else {\n\t\tlog.Warnf(\"Pod's Nameserver is not set to Coredns clusterIP or NodeLocal Cache IP...Review the --cluster-dns parameter of kubelet or check dnsPolicy field of Pod\")\n\t}\n\n\t//3. Test the DNS queries against multiple domains and host\n\t//As per miekg/dns library, domain names MUST be fully qualified before sending them, unqualified names in a message will result in a packing failure.\n\t//Fqdn() just adds . at the end of the query\n\t//If you make query for \"kuberenetes\" then query will be sent to COREDNS as \"kubernetes.\"\n\t//Due to that used FQDN for kubernetes like kubernetes.default.svc.cluster.local\n\tdomains := []string{\"amazon.com\", \"kubernetes.default.svc.cluster.local\"}\n\tdnstest.DomainsTested = domains\n\n\tnameservers := make([]string, 0, 3)\n\tnameservers = append(nameservers, rc.Nameserver...)\n\tnameservers = append(nameservers, cd.EndpointsIP[:2]...) //select only 2 endpoints\n\n\t//tests each DOMAIN against 3 NAMESERVERS (i.e. 1 ClusterIP and 2 COREDNS ENDPOINTS)\n\tdnstest.DnsTestResultForDomains = make([]DnsTestResultForDomain, 0)\n\n\tfor _, dom := range domains {\n\t\tfor _, ns := range nameservers {\n\t\t\tresult := lookupIP(dom, []string{ns})\n\t\t\tdnstest.DnsTestResultForDomains = append(dnstest.DnsTestResultForDomains, *result)\n\t\t}\n\t}\n\n\tfor _, res := range dnstest.DnsTestResultForDomains {\n\t\tif res.Result == \"success\" {\n\t\t\tsuccessCount++\n\t\t}\n\t}\n\tif successCount != len(dnstest.DnsTestResultForDomains) {\n\t\tdnstest.DnsResolution = \"failed\"\n\t}\n\tdnstest.DnsResolution = \"success\"\n\n\tcd.Dnstest = *dnstest\n\t//cd.Dnstest = success\n\tlog.Debugf(\"DNS test completed: %v *dnstest: %v\", cd.Dnstest, *dnstest)\n\n}", "func (mr *MockHostMockRecorder) Addrs() *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"Addrs\", reflect.TypeOf((*MockHost)(nil).Addrs))\n}", "func (d *Driver) existingNetChecks() {\n\t// Request all networks on the endpoint without any filters\n}", "func (_Contract *ContractCaller) HasDNSRecords(opts *bind.CallOpts, node [32]byte, name [32]byte) (bool, error) {\n\tvar out []interface{}\n\terr := _Contract.contract.Call(opts, &out, \"hasDNSRecords\", node, name)\n\n\tif err != nil {\n\t\treturn *new(bool), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(bool)).(*bool)\n\n\treturn out0, err\n\n}", "func DNSResolveCheck(host string, timeout time.Duration) Check {\n\tresolver := net.Resolver{}\n\treturn func() error {\n\t\tctx, cancel := context.WithTimeout(context.Background(), timeout)\n\t\tdefer cancel()\n\t\taddrs, err := resolver.LookupHost(ctx, host)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif len(addrs) < 1 {\n\t\t\treturn fmt.Errorf(\"could not resolve host\")\n\t\t}\n\t\treturn nil\n\t}\n}", "func DNSResolveCheck(host string, timeout time.Duration) Check {\n\tresolver := net.Resolver{}\n\treturn func() error {\n\t\tctx, cancel := context.WithTimeout(context.Background(), timeout)\n\t\tdefer cancel()\n\t\taddrs, err := resolver.LookupHost(ctx, host)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif len(addrs) < 1 {\n\t\t\treturn fmt.Errorf(\"could not resolve host\")\n\t\t}\n\t\treturn nil\n\t}\n}", "func CheckRSDPValid(txtAPI hwapi.APIInterfaces, config *tools.Configuration) (bool, error, error) {\n\treturn checkPresence(txtAPI, \"RSDP\") // the HWAPI will validate the RSDP\n}", "func Check(\n\thealthCheckFunc func() bool,\n\tpollDelay time.Duration,\n\thealthTimeout time.Duration,\n\tupdates chan<- bool,\n\tquit <-chan struct{},\n) {\n\tgo check(healthCheckFunc, pollDelay,\n\t\thealthTimeout, updates, quit)\n}", "func (mr *MockAllMockRecorder) Listener() *gomock.Call {\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"Listener\", reflect.TypeOf((*MockAll)(nil).Listener))\n}", "func HealthListener(site site.API) {\n\t// nop\n}", "func appCheckStatsCollect(ctx *zedrouterContext, config *types.AppNetworkConfig,\n\tstatus *types.AppNetworkStatus) {\n\n\toldIPAddr := status.GetStatsIPAddr\n\tif config != nil {\n\t\tstatus.GetStatsIPAddr = config.GetStatsIPAddr\n\t} else {\n\t\tstatus.GetStatsIPAddr = nil\n\t}\n\tpublishAppNetworkStatus(ctx, status)\n\tif status.GetStatsIPAddr == nil && oldIPAddr != nil ||\n\t\tstatus.GetStatsIPAddr != nil && !status.GetStatsIPAddr.Equal(oldIPAddr) {\n\t\tlog.Infof(\"appCheckStatsCollect: config ip %v, status ip %v\", status.GetStatsIPAddr, oldIPAddr)\n\t\tif oldIPAddr == nil && status.GetStatsIPAddr != nil {\n\t\t\tensureStatsCollectRunning(ctx)\n\t\t}\n\t\tappChangeContainerStatsACL(status.GetStatsIPAddr, oldIPAddr)\n\t}\n}", "func (f HandlerQueryFunc) QueryDNS(w RequestWriter, r *Msg) {\n\tgo f(w, r)\n}", "func MockListener(t *testing.T, address string) {\n\tladdr, err := net.ResolveUDPAddr(\"udp\", address)\n\tif err != nil {\n\t\tt.Fatal(\"Couldn't resolve address\", err)\n\t}\n\n\t_, err = net.ListenUDP(\"udp\", laddr)\n\tif err != nil {\n\t\tt.Fatalf(\"Couldn't listen to %s: %s\", address, err)\n\t}\n\n\ttime.Sleep(10 * time.Second)\n\treturn\n}", "func (p *Packet) CheckPacket() (err error) {\n}", "func (mr *MocklbDescriberMockRecorder) DescribeRule(arg0, arg1 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"DescribeRule\", reflect.TypeOf((*MocklbDescriber)(nil).DescribeRule), arg0, arg1)\n}", "func (c *Conn) shouldCheck() bool {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\tif time.Since(c.lastChecked) >= livenessCheckPeriod {\n\t\tc.lastChecked = time.Now()\n\t\treturn true\n\t}\n\treturn false\n}", "func verifyConfiguration() {\n\n // check connection to docker\n if err := pingDocker(); err == nil {\n log.Printf(\"Connected to docker socket at: unix:///%s\\n\", config.DockerSock)\n } else {\n log.Println(err)\n os.Exit(1)\n }\n\n // check status of vault server\n if err := checkVaultHealth(); err == nil {\n log.Printf(\"Connected to vault server at: %s\\n\", config.VaultAddr)\n } else {\n log.Println(err)\n os.Exit(1)\n }\n}", "func (s *DNSSeeder) loadDNS() {\n\tupdateDNS(s)\n}", "func (h HealthCheckerFunc) HealthCheck(target string, port uint16, proto string) (ok bool, err error) {\n\treturn h(target, port, proto)\n}", "func (d Config) Validate() error {\n\tif d.ID == \"\" {\n\t\treturn fmt.Errorf(\"dns discovery must be given a ID\")\n\t}\n\tswitch strings.ToUpper(d.Type) {\n\tcase \"SRV\":\n\tcase \"A\", \"AAAA\":\n\t\tif d.Port == 0 {\n\t\t\treturn fmt.Errorf(\"Port required for dns discovery type %s\", d.Type)\n\t\t}\n\tdefault:\n\t\treturn fmt.Errorf(\"invalid dns discovery records type %s\", d.Type)\n\t}\n\treturn nil\n}", "func (c *Controller) Check(w http.ResponseWriter, r *http.Request) {\n\n fmt.Fprintf(w, \"Checking for new certificates.\")\n c.monitor.Check()\n\n}", "func AddListener(lis Listener) bool {\n\treturn stdLogger.AddListener(lis)\n}", "func (e *Envoy) AddListener(name string, port uint16, l7rules policy.L7DataMap, isIngress bool, logger Logger, wg *completion.WaitGroup) {\n\te.xds.addListener(name, port, l7rules, isIngress, logger, wg)\n}", "func handleHealthCheck(m *MicroService, d *net.Dialer) bool {\r\n\tchange := false\r\n\tfor i, inst := range m.Instances {\r\n\t\t_, err := d.Dial(\"tcp\", inst.Host)\r\n\t\tif err != nil {\r\n\t\t\tif !m.isBlacklisted(i) {\r\n\t\t\t\tm.blackList(i, true)\r\n\t\t\t\tlogInfo(\"Instance: \" + inst.Host + \" is now marked as DOWN\")\r\n\t\t\t\tchange = true\r\n\t\t\t}\r\n\t\t} else {\r\n\t\t\tif m.isBlacklisted(i) {\r\n\t\t\t\tm.blackList(i, false)\r\n\t\t\t\tlogInfo(\"Instance: \" + inst.Host + \" is now marked as UP\")\r\n\t\t\t\tchange = true\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\treturn change\r\n}", "func Check(d *certdata.Data) *errors.Errors {\n\tvar e = errors.New(nil)\n\n\tswitch d.Type {\n\tcase \"EV\":\n\t\tif strings.LastIndex(d.Cert.Subject.CommonName, \"*\") > -1 {\n\t\t\te.Err(\"Certificate should not contain a wildcard\")\n\t\t}\n\t\tfor _, n := range d.Cert.DNSNames {\n\t\t\tif strings.LastIndex(n, \"*\") > -1 {\n\t\t\t\te.Err(\"Certificate subjectAltName '%s' should not contain a wildcard\", n)\n\t\t\t}\n\t\t}\n\tcase \"DV\", \"OV\":\n\t\tif strings.LastIndex(d.Cert.Subject.CommonName, \"*\") > 0 {\n\t\t\te.Err(\"Certificate wildcard is only allowed as prefix\")\n\t\t}\n\t\tfor _, n := range d.Cert.DNSNames {\n\t\t\tif strings.LastIndex(n, \"*\") > 0 {\n\t\t\t\te.Err(\"Certificate subjectAltName '%s' wildcard is only allowed as prefix\", n)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn e\n}", "func (h *MysqlHealthCheck) DoHealthCheck() (resHealthCheck *view.ResHealthCheck, err error) {\n\tif h.DSN == \"\" {\n\t\terr = errors.New(\"mysql dsn is nil\")\n\t\treturn\n\t}\n\t_, err = ParseDSN(h.DSN)\n\tif err != nil {\n\t\treturn\n\t}\n\tif !strings.Contains(h.DSN, \"timeout\") {\n\t\th.DSN += \"&timeout=\" + DefaultTimeOut\n\t}\n\tdb, err := Open(\"mysql\", h)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer db.Close()\n\tif db == nil {\n\t\terr = errors.New(\"can not get mysql connection\")\n\t\treturn\n\t}\n\tif err := db.DB().Ping(); err != nil {\n\t\treturn nil, err\n\t}\n\tresHealthCheck = view.HealthCheckResult(\"mysql\", true, \"success\")\n\treturn\n}", "func SummaryChecks(up int, down int) mapval.Validator {\n\treturn mapval.MustCompile(mapval.Map{\n\t\t\"summary\": mapval.Map{\n\t\t\t\"up\": uint16(up),\n\t\t\t\"down\": uint16(down),\n\t\t},\n\t})\n}", "func (srv *Server) trackListener(ln *net.Listener, add bool) bool {\n\tsrv.mu.Lock()\n\tdefer srv.mu.Unlock()\n\tif add {\n\t\tif srv.shuttingDown() {\n\t\t\treturn false\n\t\t}\n\t\tif srv.listeners == nil {\n\t\t\tsrv.listeners = make(map[*net.Listener]struct{})\n\t\t}\n\t\tsrv.listeners[ln] = struct{}{}\n\t\treturn true\n\t}\n\tdelete(srv.listeners, ln)\n\treturn true\n}", "func usageValidate(cmd string) string {\n\treturn \"Verifies that a Bridge is syntactically valid and can be generated. \" +\n\t\t\"Returns with an exit code of 0 in case of success, with an exit code of 1 \" +\n\t\t\"otherwise.\\n\" +\n\t\t\"\\n\" +\n\t\t\"USAGE:\\n\" +\n\t\t\" \" + cmd + \" FILE\\n\"\n}", "func (a App) checkIPAndUpdateDNS() {\n\tlastIPMu.Lock()\n\tdefer lastIPMu.Unlock()\n\n\t// if we don't know the current IP for this domain, try to get it\n\tif lastIP == nil {\n\t\tif recordGetter, ok := a.dnsProvider.(libdns.RecordGetter); ok {\n\t\t\trecs, err := recordGetter.GetRecords(a.ctx, a.eTLDplus1)\n\t\t\tif err == nil {\n\t\t\t\tfor _, r := range recs {\n\t\t\t\t\tif r.Type == \"A\" && r.Name == a.Domain {\n\t\t\t\t\t\tlastIP = net.ParseIP(r.Value)\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\ta.logger.Error(\"unable to get current records\", zap.Error(err))\n\t\t\t}\n\t\t}\n\t}\n\n\tip, err := a.ipSource.GetIPv4()\n\tif err != nil {\n\t\ta.logger.Error(\"checking IP address\", zap.Error(err))\n\t\treturn\n\t}\n\tif ip.Equal(lastIP) {\n\t\treturn\n\t}\n\n\ta.logger.Info(\"IP address changed\",\n\t\tzap.String(\"last_ip\", lastIP.String()),\n\t\tzap.String(\"new_ip\", ip.String()),\n\t)\n\terr = a.updateDNS(ip)\n\tif err != nil {\n\t\ta.logger.Error(\"updating DNS record(s) with new IP address\", zap.Error(err))\n\t\treturn\n\t}\n\n\tlastIP = ip\n}", "func CheckRunListener(checkRun event.CheckRun) (bool, error) {\n\tlogger.Infof(\"CheckRun event listener fired [%v]!\", checkRun)\n\treturn true, nil\n}", "func verifyDaemonSettings(config *Config) error {\n\treturn nil\n}", "func testListener(t *testing.T, handler func(io.ReadWriter)) string {\n\tassert := assert.New(t)\n\trequire := require.New(t)\n\taddress := fmt.Sprintf(\"127.0.0.1:%d\", rand.Int31n(16384)+20000)\n\tl, err := net.Listen(`tcp4`, address)\n\trequire.Nil(err)\n\n\tgo func() {\n\t\tc, err := l.Accept()\n\t\trequire.Nil(err)\n\t\tdefer func() {\n\t\t\tassert.Nil(c.Close())\n\t\t}()\n\n\t\tif handler != nil {\n\t\t\thandler(c)\n\t\t}\n\t}()\n\n\treturn address\n}", "func isDNSSEC(r dns.RR) bool {\n\tswitch r.Header().Rrtype {\n\tcase\n\t\tdns.TypeNSEC,\n\t\tdns.TypeNSEC3,\n\t\tdns.TypeDS,\n\t\tdns.TypeRRSIG,\n\t\tdns.TypeSIG,\n\t\tdns.TypeDNSKEY:\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}", "func sanityCheck() {\n\tlog.Info(\"Running a sanity check\")\n\n\tsanityCheckDockerSockExists()\n\tcheckMachineHostname()\n\tcheckMachineName()\n}", "func (pr *prepareResult) check(qd *queryDescr) error {\n\tcall := qd.kind == qkCall\n\tif call != pr.fc.IsProcedureCall() {\n\t\treturn fmt.Errorf(\"function code mismatch: query descriptor %s - function code %s\", qd.kind, pr.fc)\n\t}\n\n\tif !call {\n\t\t// only input parameters allowed\n\t\tfor _, f := range pr.parameterFields {\n\t\t\tif f.Out() {\n\t\t\t\treturn fmt.Errorf(\"invalid parameter %s\", f)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func (l *LogEntry) addDns(d *guardduty.DnsRequestAction) {\n\tl.DnsDomain = aws.StringValue(d.Domain)\n}", "func TestUsage(t *testing.T) {\n\tinitialize(t)\n\tusageSubscriber(\"testing\", t)\n\tassertUsageSubscriber(\"testing\", true, t)\n\tusageSubscriber(\"witness\", t)\n\tassertUsageSubscriber(\"witness\", true, t)\n\tassertUsageSubscriber(\"badguy\", false, t)\n\tusageWritePayloads(pcnt, t)\n\tusageReadCheck(\"witness\", pcnt, true, t)\n\tusageReadCheck(\"testing\", pcnt, false, t)\n}", "func (cr *cmdRunner) checkNdctl() (errOut error) {\n\tcr.checkOnce.Do(func() {\n\t\tif _, err := cr.lookPath(\"ndctl\"); err != nil {\n\t\t\terrOut = FaultMissingNdctl\n\t\t}\n\t})\n\n\treturn\n}", "func CheckFun(healthCheckFunc func() bool,\n\tonUp func(), onDown func(),\n\tpollDelay time.Duration,\n\thealthTimeout time.Duration, quit <-chan struct{}) {\n\tupdates := make(chan bool)\n\tCheck(healthCheckFunc, pollDelay, healthTimeout, updates, quit)\n\tgo func() {\n\t\tfor {\n\t\t\tup, ok := <-updates\n\t\t\tif !ok {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif up {\n\t\t\t\tonUp()\n\t\t\t} else {\n\t\t\t\tonDown()\n\t\t\t}\n\t\t}\n\t}()\n}", "func (s *Server) trackListener(ln *net.Listener, add bool) bool {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\tif s.listeners == nil {\n\t\ts.listeners = make(map[*net.Listener]struct{})\n\t}\n\tif add {\n\t\tif s.shuttingDown() {\n\t\t\treturn false\n\t\t}\n\t\ts.listeners[ln] = struct{}{}\n\t} else {\n\t\tdelete(s.listeners, ln)\n\t}\n\treturn true\n}", "func (mr *MockFirmamentSchedulerServerMockRecorder) AddNodeStats(arg0, arg1 interface{}) *gomock.Call {\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"AddNodeStats\", reflect.TypeOf((*MockFirmamentSchedulerServer)(nil).AddNodeStats), arg0, arg1)\n}", "func TCPDialCheck(addr string, timeout time.Duration) Check {\n\treturn func() error {\n\t\tconn, err := net.DialTimeout(\"tcp\", addr, timeout)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn conn.Close()\n\t}\n}", "func TCPDialCheck(addr string, timeout time.Duration) Check {\n\treturn func() error {\n\t\tconn, err := net.DialTimeout(\"tcp\", addr, timeout)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn conn.Close()\n\t}\n}", "func (a *AlienVault) OnDNSRequest(ctx context.Context, req *requests.DNSRequest) {\n\tif !a.System().Config().IsDomainInScope(req.Domain) {\n\t\treturn\n\t}\n\n\tbus := ctx.Value(requests.ContextEventBus).(*eventbus.EventBus)\n\tif bus == nil {\n\t\treturn\n\t}\n\n\ta.CheckRateLimit()\n\tbus.Publish(requests.LogTopic, fmt.Sprintf(\"Querying %s for %s subdomains\", a.String(), req.Domain))\n\ta.executeDNSQuery(ctx, req)\n\n\ta.CheckRateLimit()\n\ta.executeURLQuery(ctx, req)\n}", "func (a *Access) Validate(address string) bool {\n\tdefer timings.Track(\"Access.Validate\", time.Now(), TimingOut)\n\tif address == \"\" {\n\t\treturn false\n\t}\n\n\tvar addr string\n\taparts := strings.Split(address, \":\")\n\taddr = aparts[0]\n\n\tip := net.ParseIP(addr)\n\tif ip == nil {\n\t\tDebugOut.Printf(\"Bad IP address '%s'\\n\", addr)\n\t\treturn false\n\t}\n\n\tfor _, aa := range a.allowAddresses {\n\t\tif aa.Equal(ip) {\n\t\t\tDebugOut.Printf(\"Explicitly allowed address '%s'\\n\", addr)\n\t\t\treturn true\n\t\t}\n\t}\n\tfor _, an := range a.allowNetworks {\n\t\tif an.Contains(ip) {\n\t\t\tDebugOut.Printf(\"Explicitly allowed network '%s'\\n\", addr)\n\t\t\treturn true\n\t\t}\n\t}\n\n\tfor _, da := range a.denyAddresses {\n\t\tif da.Equal(ip) {\n\t\t\tDebugOut.Printf(\"Explicitly denied address '%s'\\n\", addr)\n\t\t\treturn false\n\t\t}\n\t}\n\tfor _, dn := range a.denyNetworks {\n\t\tif dn.Contains(ip) {\n\t\t\tDebugOut.Printf(\"Explicitly denied network '%s'\\n\", addr)\n\t\t\treturn false\n\t\t}\n\t}\n\t// POST: Not explicitly Denied or Allowed\n\tif a.denyAll && !a.allowAll {\n\t\tDebugOut.Printf(\"Explicit denyAll, and no allow for '%s'\\n\", addr)\n\t\treturn false\n\t}\n\n\t// If we have allows, but no denies, assume we want to deny if we get here\n\tif (len(a.allowAddresses) > 0 || len(a.allowNetworks) > 0) && (len(a.denyAddresses) == 0 && len(a.denyNetworks) == 0) {\n\t\tDebugOut.Printf(\"Implicit denyAll, and no allow for '%s'\\n\", addr)\n\t\treturn false\n\t}\n\n\tDebugOut.Printf(\"Implicit allow for '%s'\\n\", addr)\n\treturn true\n}", "func certificateCheckCallback(cert *git.Certificate, valid bool, hostname string) git.ErrorCode {\n\treturn 0\n}", "func (args *RequestArgs) Check() *constant.YiError {\n\tif args.AcceptedDomains == nil {\n\t\treturn constant.NewYiErrorf(constant.ERR_ARGS, \"Nil accepted domains\")\n\t}\n\treturn nil\n}", "func (tk *TestKeys) analysisNullNullDetectNoAddrs(logger model.Logger) bool {\n\tif tk.Control == nil {\n\t\t// we need control data to say we're in this case\n\t\treturn false\n\t}\n\tfor _, query := range tk.Queries {\n\t\tif len(query.Answers) > 0 {\n\t\t\t// when a query has answers, we're not in the NoAddresses case\n\t\t\treturn false\n\t\t}\n\t}\n\tif len(tk.TCPConnect) > 0 {\n\t\t// if we attempted TCP connect, we're not in the NoAddresses case\n\t\treturn false\n\t}\n\tif len(tk.TLSHandshakes) > 0 {\n\t\t// if we attempted TLS handshakes, we're not in the NoAddresses case\n\t\treturn false\n\t}\n\tif len(tk.Control.DNS.Addrs) > 0 {\n\t\t// when the TH resolved addresses, we're not in the NoAddresses case\n\t\treturn false\n\t}\n\tif len(tk.Control.TCPConnect) > 0 {\n\t\t// when the TH used addresses, we're not in the NoAddresses case\n\t\treturn false\n\t}\n\tlogger.Infof(\"website likely down: all DNS lookups failed for both probe and TH\")\n\ttk.NullNullFlags |= analysisFlagNullNullNoAddrs\n\treturn true\n}", "func verifyArgs() (bool, string) {\n\tvar errMsg string\n\tvar webhookURL string\n\n\tif *auth == \"\" {\n\t\terrMsg = \"Invalid authentication! It must not be empty.\\n\"\n\t\treturn false, errMsg\n\t}\n\n\tif *address == \"\" {\n\t\terrMsg = \"Invalid URL! It must not be empty.\\n\"\n\t\treturn false, errMsg\n\t}\n\n\tif *port < 1025 || *port > 65535 {\n\t\terrMsg = \"Invalid port! Please, check it is between 1025 and 65535.\\n\"\n\t\treturn false, errMsg\n\t}\n\n\tif *prefix != \"\" {\n\t\t*prefix = strings.Trim(*prefix, \"/\")\n\t}\n\n\twebhookURL = fmt.Sprintf(\"%s:%d\", *address, *port)\n\n\treturn true, webhookURL\n}", "func (cb callBacker) checkCall(fn_name string) (bool, error) {\n\tif cb.Scripter.HasEraValue(fn_name) {\n\t\terr := cb.Scripter.EraCall(fn_name)\n\t\treturn true, err\n\t}\n\treturn false, nil\n}", "func (c *Stats) ServiceCheck(check, message string, status client.Status, tags []string) error {\n\treturn c.client.SendServiceCheck(&client.DDServiceCheck{\n\t\tCheck: prependNamespace(c.namespace, check),\n\t\tHostname: c.host,\n\t\tMessage: message,\n\t\tStatus: status,\n\t\tTags: combineTags(c.tags, tags),\n\t\tTimestamp: time.Now().Unix(),\n\t})\n}", "func checkEnvoyStats(host string, port uint16) error {\n\tstate, ws, err := util.GetReadinessStats(host, port)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to get readiness stats: %v\", err)\n\t}\n\n\tif state != nil && admin.ServerInfo_State(*state) != admin.ServerInfo_LIVE {\n\t\treturn fmt.Errorf(\"server is not live, current state is: %v\", admin.ServerInfo_State(*state).String())\n\t}\n\n\tif !ws {\n\t\treturn fmt.Errorf(\"workers have not yet started\")\n\t}\n\n\treturn nil\n}", "func (s *ServiceNetwork) UpdateDNS(ctx context.Context, domainName string, hostedZoneID string, memberName string) (dnsName string, err error) {\n\trequuid := utils.GetReqIDFromContext(ctx)\n\n\t// update dns record\n\tdnsName = dns.GenDNSName(memberName, domainName)\n\tprivateIP := s.serverInfo.GetPrivateIP()\n\n\terr = s.dnsIns.UpdateDNSRecord(ctx, dnsName, privateIP, hostedZoneID)\n\tif err != nil {\n\t\tglog.Errorln(\"UpdateDNSRecord error\", err, \"requuid\", requuid, \"member\", memberName)\n\t\treturn dnsName, err\n\t}\n\n\t// make sure DNS returns the updated record\n\tdnsIP, err := s.dnsIns.WaitDNSRecordUpdated(ctx, dnsName, privateIP, hostedZoneID)\n\tif err != nil {\n\t\tglog.Errorln(\"WaitDNSRecordUpdated error\", err, \"expect privateIP\", privateIP, \"got\", dnsIP, \"requuid\", requuid, \"member\", memberName)\n\t\treturn dnsName, err\n\t}\n\n\terr = s.waitDNSLookup(ctx, dnsName, privateIP, requuid)\n\tif err != nil {\n\t\tglog.Errorln(\"waitDNSLookup error\", err, \"requuid\", requuid, \"member\", memberName)\n\t\treturn dnsName, err\n\t}\n\n\tglog.Infoln(\"updated dns\", dnsName, \"to ip\", privateIP, \"requuid\", requuid, \"member\", memberName)\n\treturn dnsName, nil\n}", "func (me TAttlistUrlType) IsSummary() bool { return me.String() == \"summary\" }", "func TestNonExistentHost(test *testing.T) {\n\tt := NewGomegaWithT(test)\n\n\t// Options that do not require administrative privileges.\n\topts := []nettrace.TraceOpt{\n\t\t&nettrace.WithLogging{},\n\t\t&nettrace.WithHTTPReqTrace{\n\t\t\tHeaderFields: nettrace.HdrFieldsOptWithValues,\n\t\t},\n\t\t&nettrace.WithSockTrace{},\n\t\t&nettrace.WithDNSQueryTrace{},\n\t}\n\tclient, err := nettrace.NewHTTPClient(nettrace.HTTPClientCfg{\n\t\tReqTimeout: 5 * time.Second,\n\t}, opts...)\n\tt.Expect(err).ToNot(HaveOccurred())\n\n\treq, err := http.NewRequest(\"GET\", \"https://non-existent-host.com\", nil)\n\tt.Expect(err).ToNot(HaveOccurred())\n\tresp, err := client.Do(req)\n\tt.Expect(err).To(HaveOccurred())\n\tt.Expect(resp).To(BeNil())\n\ttrace, _, err := client.GetTrace(\"non-existent host\")\n\tt.Expect(err).ToNot(HaveOccurred())\n\ttraceBeginAsRel := nettrace.Timestamp{IsRel: true, Rel: 0}\n\n\t// Dial trace\n\tt.Expect(trace.Dials).To(HaveLen(1)) // one failed Dial (DNS failed)\n\tdial := trace.Dials[0]\n\tt.Expect(dial.TraceID).ToNot(BeZero())\n\trelTimeIsInBetween(t, dial.DialBeginAt, traceBeginAsRel, trace.TraceEndAt)\n\trelTimeIsInBetween(t, dial.DialEndAt, dial.DialBeginAt, trace.TraceEndAt)\n\tt.Expect(dial.DstAddress).To(Equal(\"non-existent-host.com:443\"))\n\tt.Expect(dial.ResolverDials).ToNot(BeEmpty())\n\tfor _, resolvDial := range dial.ResolverDials {\n\t\trelTimeIsInBetween(t, resolvDial.DialBeginAt, dial.DialBeginAt, dial.DialEndAt)\n\t\trelTimeIsInBetween(t, resolvDial.DialEndAt, resolvDial.DialBeginAt, dial.DialEndAt)\n\t\tt.Expect(resolvDial.Nameserver).ToNot(BeZero())\n\t\tif !resolvDial.EstablishedConn.Undefined() {\n\t\t\tt.Expect(resolvDial.DialErr).To(BeZero())\n\t\t\tt.Expect(trace.UDPConns.Get(resolvDial.EstablishedConn)).ToNot(BeNil())\n\t\t}\n\t}\n\tt.Expect(dial.DialErr).ToNot(BeZero())\n\tt.Expect(dial.EstablishedConn).To(BeZero())\n\n\t// DNS trace\n\tt.Expect(trace.DNSQueries).ToNot(BeEmpty())\n\tfor _, dnsQuery := range trace.DNSQueries {\n\t\tt.Expect(dnsQuery.FromDial == dial.TraceID).To(BeTrue())\n\t\tt.Expect(dnsQuery.TraceID).ToNot(BeZero())\n\t\tudpConn := trace.UDPConns.Get(dnsQuery.Connection)\n\t\tt.Expect(udpConn).ToNot(BeNil())\n\n\t\tt.Expect(dnsQuery.DNSQueryMsgs).To(HaveLen(1))\n\t\tdnsMsg := dnsQuery.DNSQueryMsgs[0]\n\t\trelTimeIsInBetween(t, dnsMsg.SentAt, udpConn.SocketCreateAt, udpConn.ConnCloseAt)\n\t\tt.Expect(dnsMsg.Questions).To(HaveLen(1))\n\t\tt.Expect(dnsMsg.Questions[0].Name).To(HavePrefix(\"non-existent-host.com.\"))\n\t\tt.Expect(dnsMsg.Questions[0].Type).To(Or(\n\t\t\tEqual(nettrace.DNSResTypeA), Equal(nettrace.DNSResTypeAAAA)))\n\t\tt.Expect(dnsMsg.Truncated).To(BeFalse())\n\n\t\tt.Expect(dnsQuery.DNSReplyMsgs).To(HaveLen(1))\n\t\tdnsReply := dnsQuery.DNSReplyMsgs[0]\n\t\trelTimeIsInBetween(t, dnsReply.RecvAt, dnsMsg.SentAt, udpConn.ConnCloseAt)\n\t\tt.Expect(dnsReply.ID == dnsMsg.ID).To(BeTrue())\n\t\tt.Expect(dnsReply.RCode).To(Equal(nettrace.DNSRCodeNXDomain))\n\t\tt.Expect(dnsReply.Answers).To(BeEmpty())\n\t\tt.Expect(dnsReply.Truncated).To(BeFalse())\n\t}\n\n\t// UDP connection trace\n\tt.Expect(trace.UDPConns).ToNot(BeEmpty())\n\tfor _, udpConn := range trace.UDPConns {\n\t\tt.Expect(udpConn.TraceID).ToNot(BeZero())\n\t\tt.Expect(udpConn.FromDial == dial.TraceID).To(BeTrue())\n\t\trelTimeIsInBetween(t, udpConn.SocketCreateAt, dial.DialBeginAt, dial.DialEndAt)\n\t\trelTimeIsInBetween(t, udpConn.ConnCloseAt, udpConn.SocketCreateAt, dial.DialEndAt)\n\t\tt.Expect(net.ParseIP(udpConn.AddrTuple.SrcIP)).ToNot(BeNil())\n\t\tt.Expect(net.ParseIP(udpConn.AddrTuple.DstIP)).ToNot(BeNil())\n\t\tt.Expect(udpConn.AddrTuple.SrcPort).ToNot(BeZero())\n\t\tt.Expect(udpConn.AddrTuple.DstPort).ToNot(BeZero())\n\t\tt.Expect(udpConn.SocketTrace).ToNot(BeNil())\n\t\tt.Expect(udpConn.SocketTrace.SocketOps).ToNot(BeEmpty())\n\t\tfor _, socketOp := range udpConn.SocketTrace.SocketOps {\n\t\t\trelTimeIsInBetween(t, socketOp.CallAt, udpConn.SocketCreateAt, udpConn.ConnCloseAt)\n\t\t\trelTimeIsInBetween(t, socketOp.ReturnAt, socketOp.CallAt, udpConn.ConnCloseAt)\n\t\t}\n\t\tt.Expect(udpConn.Conntract).To(BeNil()) // WithConntrack requires root privileges\n\t\tt.Expect(udpConn.TotalRecvBytes).ToNot(BeZero())\n\t\tt.Expect(udpConn.TotalSentBytes).ToNot(BeZero())\n\t}\n\n\t// TCP connection trace\n\tt.Expect(trace.TCPConns).To(BeEmpty())\n\n\t// TLS tunnel trace\n\tt.Expect(trace.TLSTunnels).To(BeEmpty())\n\n\t// HTTP request trace\n\tt.Expect(trace.HTTPRequests).To(HaveLen(1))\n\thttpReq := trace.HTTPRequests[0]\n\tt.Expect(httpReq.TraceID).ToNot(BeZero())\n\tt.Expect(httpReq.TCPConn).To(BeZero())\n\tt.Expect(httpReq.ProtoMajor).To(BeEquivalentTo(1))\n\tt.Expect(httpReq.ProtoMinor).To(BeEquivalentTo(1))\n\trelTimeIsInBetween(t, httpReq.ReqSentAt, traceBeginAsRel, trace.TraceEndAt)\n\tt.Expect(httpReq.ReqError).ToNot(BeZero())\n\tt.Expect(httpReq.ReqMethod).To(Equal(\"GET\"))\n\tt.Expect(httpReq.ReqURL).To(Equal(\"https://non-existent-host.com\"))\n\tt.Expect(httpReq.ReqHeader).To(BeEmpty())\n\tt.Expect(httpReq.ReqContentLen).To(BeZero())\n\tt.Expect(httpReq.RespRecvAt.Undefined()).To(BeTrue())\n\tt.Expect(httpReq.RespStatusCode).To(BeZero())\n\tt.Expect(httpReq.RespHeader).To(BeEmpty())\n\tt.Expect(httpReq.RespContentLen).To(BeZero())\n\n\terr = client.Close()\n\tt.Expect(err).ToNot(HaveOccurred())\n}", "func DNSLabel(label string) error {\n\tDNSlenLimits := intranger.IntRanger(1, 63)\n\tif !DNSlenLimits.Containing(len(label)) {\n\t\treturn ErrInvalidDNSLabel.CommentF(\"DNS label length can be in range %v\", DNSlenLimits)\n\t}\n\tif !dnsLabelRe.MatchString(label) {\n\t\treturn ErrInvalidDNSLabel.Comment(\n\t\t\t\"must consist of a-Z 1-9 and '-'(dash) letters\",\n\t\t\t\"must start and end with a-Z 1-9 letters\",\n\t\t)\n\t}\n\tif numericRe.MatchString(label) {\n\t\treturn ErrInvalidLabel.CommentF(\"must not consist of all numeric values\")\n\t}\n\treturn nil\n}", "func (h *handler) ValidateBaseDNS(domain *DNSDomain) error {\n\tdnsProvider := h.providerFactory.GetProvider(domain)\n\tdnsNameFromService, err := dnsProvider.GetDomainName()\n\tif err != nil {\n\t\treturn errors.Errorf(\"Can't validate base DNS domain: %v\", err)\n\t}\n\n\tdnsNameFromCluster := strings.TrimSuffix(domain.Name, \".\")\n\tif dnsNameFromService == dnsNameFromCluster {\n\t\t// Valid domain\n\t\treturn nil\n\t}\n\tif matched, _ := regexp.MatchString(\".*\\\\.\"+dnsNameFromService, dnsNameFromCluster); !matched {\n\t\treturn errors.Errorf(\"Domain name isn't correlated properly to DNS service\")\n\t}\n\treturn nil\n}", "func TestLookupDNSPanicsOnInvalidType(t *testing.T) {\n\tdefer func() {\n\t\tif r := recover(); r == nil {\n\t\t\tt.Error(\"lookupDNS should panic if an invalid conntype is specified.\")\n\t\t}\n\t}()\n\tlookupDNS(context.Background(), nil, \"name\", \"wssorbashorsomething\")\n}", "func AddPingTimeout() {}", "func IsDNS(str string) bool {\n\tif str == \"\" || len(strings.Replace(str, \".\", \"\", -1)) > 255 {\n\t\t// constraints already violated\n\t\treturn false\n\t}\n\treturn !IsIP(str) && rxDNSName.MatchString(str)\n}", "func stdDNSHandler(t *testing.T, w dns.ResponseWriter, r *dns.Msg, s *dnsTestServer, invertAnswers bool) {\n\tdoDNSAnswer(t, w, r, s.DNSDatabase, invertAnswers)\n}", "func (o *ApplianceAllOfNetworkingIpv4Dhcp) HasDns() bool {\n\tif o != nil && o.Dns != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (ind ErrInvalidNetworkDriver) BadRequest() {}", "func (c *LdapChecker) Check(extConfig external.Check) *pkg.CheckResult {\n\tcheck := extConfig.(v1.LDAPCheck)\n\tld, err := ldap.DialURL(check.Host, ldap.DialWithTLSConfig(&tls.Config{\n\t\tInsecureSkipVerify: check.SkipTLSVerify,\n\t}))\n\tif err != nil {\n\t\treturn Failf(check, \"Failed to connect %v\", err)\n\t}\n\n\tif err := ld.Bind(check.Username, check.Password); err != nil {\n\t\treturn Failf(check, \"Failed to bind using %s %v\", check.Username, err)\n\t}\n\n\treq := &ldap.SearchRequest{\n\t\tScope: ldap.ScopeWholeSubtree,\n\t\tBaseDN: check.BindDN,\n\t\tFilter: check.UserSearch,\n\t}\n\n\ttimer := NewTimer()\n\tres, err := ld.Search(req)\n\n\tif err != nil {\n\t\treturn Failf(check, \"Failed to search %v\", check.Host, err)\n\t}\n\n\tif len(res.Entries) == 0 {\n\t\treturn Failf(check, \"no results returned\")\n\t}\n\n\treturn &pkg.CheckResult{\n\t\tCheck: check,\n\t\tPass: true,\n\t\tDuration: int64(timer.Elapsed()),\n\t}\n}", "func CheckSuiteListener(checkSuite event.CheckSuite) (bool, error) {\n\tlogger.Infof(\"CheckSuite event listener fired [%v]!\", checkSuite)\n\treturn true, nil\n}", "func (s *Suite) printTestSummary() {\n\tif s.ProblemsFound == 0 {\n\t\ts.Logger.Println(\"== SUCCESS: This test completed successfully\\n\")\n\t} else if s.ProblemsFound == 1 {\n\t\ts.Logger.Println(\"== FAILURE:\", s.ProblemsFound, \"problem found in this test\\n\")\n\t} else if s.ProblemsFound > 1 {\n\t\ts.Logger.Println(\"== FAILURE:\", s.ProblemsFound, \"problems found in this test\\n\")\n\t}\n\ts.ProblemsFound = 0\n}" ]
[ "0.56406116", "0.52390766", "0.5200705", "0.5123499", "0.49002868", "0.48812556", "0.48797923", "0.48549375", "0.47794652", "0.47729024", "0.47388643", "0.47244483", "0.4720166", "0.4683753", "0.46482006", "0.46386418", "0.4630596", "0.46139523", "0.45714936", "0.45633712", "0.45629203", "0.45406565", "0.45348868", "0.45154902", "0.44930178", "0.44737926", "0.44638437", "0.4455636", "0.44495568", "0.44470012", "0.44419575", "0.4441092", "0.44375813", "0.44368425", "0.44365278", "0.4431621", "0.4431495", "0.44261488", "0.44261488", "0.44199985", "0.44136977", "0.44111097", "0.44026965", "0.4393666", "0.43926674", "0.43889874", "0.43682986", "0.4359404", "0.43551204", "0.43534058", "0.4349625", "0.43490103", "0.43473285", "0.43472472", "0.43467656", "0.43407694", "0.43382245", "0.43372717", "0.43360493", "0.4332902", "0.4331431", "0.43240282", "0.43214333", "0.43204126", "0.43174043", "0.43152836", "0.43152112", "0.43151954", "0.4309285", "0.43025815", "0.42989504", "0.4295423", "0.42953536", "0.42882106", "0.42879912", "0.42874247", "0.42874247", "0.42849225", "0.4274095", "0.42729318", "0.42688394", "0.4264504", "0.42570862", "0.4248906", "0.42465404", "0.4244743", "0.42410293", "0.4237013", "0.42347422", "0.42243096", "0.42233887", "0.42222026", "0.42203248", "0.42178047", "0.42165473", "0.42134497", "0.42104563", "0.42082024", "0.42076507", "0.42070478" ]
0.5148379
3
Test a successful query over TCP
func TestAccept(t *testing.T) { doh := newFakeTransport() client, server := makePair() // Start the forwarder running. go Accept(doh, server) lbuf := make([]byte, 2) // Send Query queryData := simpleQueryBytes binary.BigEndian.PutUint16(lbuf, uint16(len(queryData))) n, err := client.Write(lbuf) if err != nil { t.Fatal(err) } if n != 2 { t.Error("Length write problem") } n, err = client.Write(queryData) if err != nil { t.Fatal(err) } if n != len(queryData) { t.Error("Query write problem") } // Read query queryRead := <-doh.query if !bytes.Equal(queryRead, queryData) { t.Error("Query mismatch") } // Send fake response responseData := []byte{1, 2, 8, 9, 10} doh.response <- responseData // Get Response n, err = client.Read(lbuf) if err != nil { t.Fatal(err) } if n != 2 { t.Error("Length read problem") } rlen := binary.BigEndian.Uint16(lbuf) resp := make([]byte, int(rlen)) n, err = client.Read(resp) if err != nil { t.Fatal(err) } if !bytes.Equal(responseData, resp) { t.Error("Response mismatch") } client.Close() }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (my *MySQL) Ping() (err os.Error) {\n defer my.unlock()\n defer catchOsError(&err)\n my.lock()\n\n if my.conn == nil {\n return NOT_CONN_ERROR\n }\n if my.unreaded_rows {\n return UNREADED_ROWS_ERROR\n }\n\n // Send command\n my.sendCmd(_COM_PING)\n // Get server response\n my.getResult(nil)\n\n return\n}", "func TestShortQuery(t *testing.T) {\n\tvar qerr *queryError\n\tdoh, _ := NewTransport(testURL, ips, nil, nil, nil)\n\t_, err := doh.Query([]byte{})\n\tif err == nil {\n\t\tt.Error(\"Empty query should fail\")\n\t} else if !errors.As(err, &qerr) {\n\t\tt.Errorf(\"Wrong error type: %v\", err)\n\t} else if qerr.status != BadQuery {\n\t\tt.Errorf(\"Wrong error status: %d\", qerr.status)\n\t}\n\n\t_, err = doh.Query([]byte{1})\n\tif err == nil {\n\t\tt.Error(\"One byte query should fail\")\n\t} else if !errors.As(err, &qerr) {\n\t\tt.Errorf(\"Wrong error type: %v\", err)\n\t} else if qerr.status != BadQuery {\n\t\tt.Errorf(\"Wrong error status: %d\", qerr.status)\n\t}\n}", "func (c *client) testConnection() error {\n\treturn c.db.Ping()\n}", "func TestConnection(ip string, port int, user, key string) error {\n\tclient, err := NewClient(ip, port, user, key)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn client.Shell(false, \"exit\")\n}", "func tcpData(host string, port int, sourceIP string, healthCheck HealthCheck) (Status, error, string) {\n\n\ttcpAddr, err := net.ResolveTCPAddr(\"tcp\", fmt.Sprintf(\"%s:%d\", host, port))\n\tif err != nil {\n\t\treturn Offline, err, fmt.Sprintf(\"failed to resolve to an address: %s:%d\", host, port)\n\t}\n\n\tlocalAddr, errl := net.ResolveIPAddr(\"ip\", sourceIP)\n\tif errl != nil {\n\t\treturn Offline, errl, fmt.Sprintf(\"failed to resolve to an ip adress: %s\", sourceIP)\n\t}\n\n\tlocalTCPAddr := net.TCPAddr{\n\t\tIP: localAddr.IP,\n\t}\n\n\t// Custom dialer with\n\tconn, err := net.DialTCP(\"tcp\", &localTCPAddr, tcpAddr)\n\tif err != nil {\n\t\treturn Offline, err, fmt.Sprintf(\"failed to dail from source: %+v target: %+v\", localTCPAddr, *tcpAddr)\n\t}\n\n\tdefer conn.Close()\n\n\tfmt.Fprintf(conn, healthCheck.TCPRequest)\n\tr, err := regexp.Compile(healthCheck.TCPReply)\n\tif err != nil {\n\t\treturn Offline, err, fmt.Sprintf(\"regex Compile failed on %s\", healthCheck.TCPReply)\n\t}\n\n\tconn.SetReadDeadline(time.Now().Add(time.Duration(healthCheck.Timeout) * time.Second))\n\tfor {\n\t\tline, err := bufio.NewReader(conn).ReadString('\\n')\n\t\tif err != nil {\n\t\t\treturn Offline, err, fmt.Sprintf(\"failed with last input %s\", line)\n\t\t}\n\n\t\tif r.MatchString(line) {\n\t\t\treturn Online, nil, \"OK\"\n\t\t}\n\t}\n}", "func TestTCPSimple(t *testing.T) {\n name := \"hi.txt\"\n contents := \"bye\"\n exptime := 300000\n conn, err := net.Dial(\"tcp\", \"localhost:8080\")\n if err != nil {\n t.Error(err.Error()) // report error through testing framework\n }\n scanner := bufio.NewScanner(conn)\n\n // Write a file\n _,err = fmt.Fprintf(conn, \"write %v %v %v\\r\\n%v\\r\\n\", name, len(contents), exptime ,contents)\n if err !=nil {\n fmt.Printf(\"error in writing in buffer\\n\")\n }\n scanner.Scan() // read first line\n resp := scanner.Text() // extract the text from the buffer\n version := VerifyWriteSucess(t,resp)\n\n // try read now\n fmt.Fprintf(conn, \"read %v\\r\\n\", name)\n scanner.Scan()\n output := scanner.Text()\n scanner.Scan() \n VerifyReadSucess(t,output,version,contents,scanner.Text())\n}", "func ping(addr string) bool {\n\tconn, err := net.Dial(\"tcp\", addr)\n\tif err != nil {\n\t\treturn false\n\t}\n\tdefer conn.Close()\n\treturn true\n}", "func TestTCPClientConnect(t *testing.T) {\n\tclient, err := clients.New(testAddr, \"\")\n\tif err != nil {\n\t\tt.Fatalf(\"Client failed to connect - %s\", err.Error())\n\t\tt.FailNow()\n\t}\n\tdefer client.Close()\n\n\tif err := client.Ping(); err != nil {\n\t\tt.Fatalf(\"ping failed\")\n\t}\n\tif msg := <-client.Messages(); msg.Data != \"pong\" {\n\t\tt.Fatalf(\"Unexpected data: Expecting 'pong' got %s\", msg.Data)\n\t}\n\tclient.Ping()\n}", "func CheckPing(addr string) bool {\n\tgotit := false\n\tfor {\n\t\tconn, err := net.Dial(\"tcp\", addr+\":22\")\n\t\tif err == nil {\n\t\t\t// log.Fatal(err.Error())\n\t\t\tfmt.Print(\"got conncetion\")\n\t\t\tgotit = true\n\t\t\tconn.Close()\n\t\t\tbreak\n\t\t}\n\t\ttime.Sleep(10 * time.Second)\n\t}\n\treturn gotit\n}", "func TestBadTCPClientConnect(t *testing.T) {\n\tclient, err := clients.New(\"321321321\", \"\")\n\tif err == nil {\n\t\tt.Fatalf(\"Client succeeded to connect\")\n\t}\n\tclient, err = clients.New(testAddr, \"hil\")\n\tif err != nil {\n\t\tt.Fatalf(\"Client failed to connect - %s\", err.Error())\n\t\tt.FailNow()\n\t}\n\tdefer client.Close()\n\n\tif err := client.Ping(); err != nil {\n\t\tt.Fatalf(\"ping failed\")\n\t}\n\tif msg := <-client.Messages(); msg.Data != \"pong\" {\n\t\tt.Fatalf(\"Unexpected data: Expecting 'pong' got %s\", msg.Data)\n\t}\n\tclient.Ping()\n}", "func TestQuery(t *testing.T) {\n\t_, smc := getConnectionToShardMasterRaftLeader(t)\n\n\tfireQueryRequest(t, smc, &pb.QueryArgs{Num: -1})\n\n}", "func TestAcceptFail(t *testing.T) {\n\tdoh := newFakeTransport()\n\tclient, server := makePair()\n\n\t// Start the forwarder running.\n\tgo Accept(doh, server)\n\n\tlbuf := make([]byte, 2)\n\t// Send Query\n\tqueryData := simpleQueryBytes\n\tbinary.BigEndian.PutUint16(lbuf, uint16(len(queryData)))\n\tclient.Write(lbuf)\n\tclient.Write(queryData)\n\n\t// Indicate that the query failed\n\tdoh.err = errors.New(\"fake error\")\n\n\t// Read query\n\tqueryRead := <-doh.query\n\tif !bytes.Equal(queryRead, queryData) {\n\t\tt.Error(\"Query mismatch\")\n\t}\n\n\t// Accept should have closed the socket.\n\tn, _ := client.Read(lbuf)\n\tif n != 0 {\n\t\tt.Error(\"Expected to read 0 bytes\")\n\t}\n}", "func GetCommandToTestTCPConnection(host string, port int32) string {\n\treturn fmt.Sprintf(\"if (-Not (Test-NetConnection %s -Port %d).TcpTestSucceeded)\"+\n\t\t\" {Write-Output 'connection failed:'; exit 10}\", host, port)\n}", "func TestTCPSimple(t *testing.T) {\n\tname := \"hi.txt\"\n\tcontents := \"bye\"\n\texptime := 300000\n\tconn, err := net.Dial(\"tcp\", \"localhost:8080\")\n\tif err != nil {\n\t\tt.Error(err.Error()) // report error through testing framework\n\t}\n\tscanner := bufio.NewScanner(conn)\n\t// Write a file\n\tfmt.Fprintf(conn, \"write %v %v %v\\r\\n%v\\r\\n\", name, len(contents), exptime, contents)\n\tscanner.Scan() // read first line\n\tresp := scanner.Text() // extract the text from the buffer\n\tarr := strings.Split(resp, \" \") // split into OK and <version>\n\texpect(t, arr[0], \"OK\")\n\tver, err := strconv.Atoi(arr[1]) // parse version as number\n\tif err != nil {\n\t\tt.Error(\"Non-numeric version found\")\n\t}\n\tversion := int64(ver)\n\n\tfmt.Fprintf(conn, \"read %v\\r\\n\", name) // try a read now\n\tscanner.Scan()\n\n\tarr = strings.Split(scanner.Text(), \" \")\n\texpect(t, arr[0], \"CONTENTS\")\n\texpect(t, arr[1], fmt.Sprintf(\"%v\", version)) // expect only accepts strings, convert int version to string\n\texpect(t, arr[2], fmt.Sprintf(\"%v\", len(contents)))\n\tscanner.Scan()\n\texpect(t, contents, scanner.Text())\n\tconn.Close()\n}", "func query(object string, server string, tcpport string) (string, error) {\r\n\t// open connnection\r\n\tloggers.Info.Printf(\"whois.query() setup connection\")\r\n\tconn, err := net.DialTimeout(\"tcp\", net.JoinHostPort(server, tcpport), time.Second*30)\r\n\tif err != nil {\r\n\t\treturn \"\", fmt.Errorf(\"whois: connect to whois server failed: %v\", err)\r\n\t}\r\n\tdefer conn.Close()\r\n\t// set connection write timeout\r\n\t_ = conn.SetWriteDeadline(time.Now().Add(time.Second * 30))\r\n\t_, err = conn.Write([]byte(object + \"\\r\\n\"))\r\n\tif err != nil {\r\n\t\treturn \"\", fmt.Errorf(\"whois: send to whois server failed: %v\", err)\r\n\t}\r\n\t// set connection read timeout\r\n\t_ = conn.SetReadDeadline(time.Now().Add(time.Second * 30))\r\n\tbuffer, err := ioutil.ReadAll(conn)\r\n\tif err != nil {\r\n\t\treturn \"\", fmt.Errorf(\"whois: read from whois server failed: %v\", err)\r\n\t}\r\n\t// return result\r\n\treturn string(buffer), nil\r\n}", "func hostDockerQuery() {\n\tlog.Println(\"hostDockerQuery\")\n\tfor {\n\t\ttime.Sleep(2 * time.Second)\n\t\tc, err := net.Dial(\"unix\", \"/var/run/dockerConnection/hostconnection.sock\")\n\t\tif err != nil {\n\t\t\tcontinue;\n\t\t}\n\t\t// send to socket\n\t\tlog.Println(\"sending request to server\")\n\t\tfmt.Fprintf(c, \"hi\" + \"\\n\")\n\t\t// listen for reply\n\t\tmessage, _ := bufio.NewReader(c).ReadString('\\n')\n\t\t//log.Println(\"Message from server: \" + message)\n\t\tlog.Println(\"Received update from host server\")\n\n\t\t// set this to be the latest response\n\t\tlatestHostServerResponse = message\n\t}\n}", "func TestDialTCP(t *testing.T) {\n\tt.Logf(\"Running DialTCP test to %s:%s\", TEST_HOST, TEST_PORT)\n\tdb, err = DialTCP(TEST_HOST, TEST_USER, TEST_PASSWD, TEST_DBNAME)\n\tif err != nil {\n\t\tt.Logf(\"Error %s\", err)\n\t\tt.Fail()\n\t}\n\terr = db.Close()\n\tif err != nil {\n\t\tt.Logf(\"Error %s\", err)\n\t\tt.Fail()\n\t}\n}", "func checkRemoteConnection(serverAddr string) error {\n\n\tlog.Println(\"Connecting to sync server at\", serverAddr)\n\n\tconn := getRemoteConnection(serverAddr, false)\n\tif conn == nil {\n\t\treturn errors.New(\"Cannot connect to sync server: \" + serverAddr)\n\t}\n\n\terr := tcpSend(conn, \"Test\\n\")\n\tif err != nil {\n\t\treturn errors.New(\"Cannot send data to remote server. \" + err.Error())\n\t}\n\n\treturn nil\n}", "func main() {\n conn, err := net.Dial(\"tcp\", defaultHost + \":\" + strconv.Itoa(defaultPort))\n if err != nil {\n fmt.Printf(\"Dial error\\n\")\n return\n }\n\n reader := bufio.NewReader(conn)\n testPut(conn, reader, \"foo\", \"bar\")\n testGet(conn, reader, \"foo\")\n\n testPut(conn, reader, \"foo\", \"sun\")\n testGet(conn, reader, \"foo\")\n\n testPut(conn, reader, \"foo\", \"sweet\")\n testGet(conn, reader, \"foo\")\n\n testDelete(conn, reader, \"foo\")\n testGet(conn, reader, \"foo\")\n\n conn.Close()\n}", "func TCP(address string) bool {\n\tconn, err := net.DialTimeout(\"tcp\", address, timeout)\n\tif err != nil {\n\t\treturn false\n\t}\n\tconn.Close()\n\treturn true\n}", "func TestSendFailed(t *testing.T) {\n\tdoh, _ := NewTransport(testURL, ips, nil, nil, nil)\n\ttransport := doh.(*transport)\n\trt := makeTestRoundTripper()\n\ttransport.client.Transport = rt\n\n\trt.err = errors.New(\"test\")\n\t_, err := doh.Query(simpleQueryBytes)\n\tvar qerr *queryError\n\tif err == nil {\n\t\tt.Error(\"Send failure should be reported\")\n\t} else if !errors.As(err, &qerr) {\n\t\tt.Errorf(\"Wrong error type: %v\", err)\n\t} else if qerr.status != SendFailed {\n\t\tt.Errorf(\"Wrong error status: %d\", qerr.status)\n\t} else if !errors.Is(qerr, rt.err) {\n\t\tt.Errorf(\"Underlying error is not retained\")\n\t}\n}", "func (c *Client) queryNode(miniProtocol multiplex.MiniProtocol, dataItems []cbor.DataItem) (*multiplex.ServiceDataUnit, error) {\n\n\t// Step 1: Create message for the request\n\tsdu := multiplex.NewServiceDataUnit(miniProtocol, multiplex.MessageModeInitiator, dataItems)\n\tif log.IsLevelEnabled(log.DebugLevel) {\n\t\tlog.Debug(\"Multiplexed Request:\")\n\t\tfmt.Println(\">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> R E Q U E S T >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\")\n\t\tfmt.Println(sdu.Debug())\n\t}\n\n\t// Step 2: Send the request\n\tmessageResponse, err := c.socket.Write(sdu.Bytes())\n\tif err != nil && err != io.EOF {\n\t\treturn nil, fmt.Errorf(\"Error writing to socket %w\", err)\n\t}\n\tif log.IsLevelEnabled(log.DebugLevel) && messageResponse != nil {\n\t\tlog.Debug(\"Multiplexed Response:\")\n\t\tfmt.Println(\"<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< R E S P O N S E <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\")\n\t\tfmt.Println(messageResponse.Debug())\n\t}\n\n\treturn messageResponse, nil\n}", "func TestServerShortLivedConn(t *testing.T) {\n\tserver := newTestServer()\n\tdefer server.Stop()\n\n\tlistener, err := net.Listen(\"tcp\", \"127.0.0.1:0\")\n\tif err != nil {\n\t\tt.Fatal(\"can't listen:\", err)\n\t}\n\tdefer listener.Close()\n\tgo server.ServeListener(listener)\n\n\tvar (\n\t\trequest = `{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"rpc_modules\"}` + \"\\n\"\n\t\twantResp = `{\"jsonrpc\":\"2.0\",\"id\":1,\"result\":{\"nftest\":\"1.0\",\"rpc\":\"1.0\",\"test\":\"1.0\"}}` + \"\\n\"\n\t\tdeadline = time.Now().Add(10 * time.Second)\n\t)\n\tfor i := 0; i < 20; i++ {\n\t\tconn, err := net.Dial(\"tcp\", listener.Addr().String())\n\t\tif err != nil {\n\t\t\tt.Fatal(\"can't dial:\", err)\n\t\t}\n\t\tdefer conn.Close()\n\t\tconn.SetDeadline(deadline)\n\t\t// Write the request, then half-close the connection so the server stops reading.\n\t\tconn.Write([]byte(request))\n\t\tconn.(*net.TCPConn).CloseWrite()\n\t\t// Now try to get the response.\n\t\tbuf := make([]byte, 2000)\n\t\tn, err := conn.Read(buf)\n\t\tif err != nil {\n\t\t\tt.Fatal(\"read error:\", err)\n\t\t}\n\t\tif !bytes.Equal(buf[:n], []byte(wantResp)) {\n\t\t\tt.Fatalf(\"wrong response: %s\", buf[:n])\n\t\t}\n\t}\n}", "func testDBConnection(conn *TestConnection) TestConnectionResponse {\n\tconnectionString := fmt.Sprintf(\"host=%s port=%d user=%s \"+\n\t\"password=%s dbname=%s sslmode=disable\",\n\t\tconn.Host,\n\t\tconn.Port,\n\t\tconn.User,\n\t\tconn.Password,\n\t\tconn.DBName)\n\n\t// Response object\n\tvar response TestConnectionResponse\n\tresponse.Valid = true\n\n\tcontext, err := sql.Open(\"postgres\", connectionString)\n\tif (err != nil) {\n\t\tresponse.Valid = false\n\t\treturn response\n\t}\n\t\t\n\terr = context.Ping()\n\tif (err != nil) {\n\t\tresponse.Valid = false\n\t\treturn response\n\t}\n\n\treturn response\n}", "func (monitor *XMPPMonitor) test() bool {\n\tvar talk *xmpp.Client\n\tvar err error\n\toptions := xmpp.Options{Host: monitor.Target,\n\t\tUser: monitor.Username,\n\t\tPassword: monitor.Password,\n\t\tNoTLS: true,\n\t\tStartTLS: true,\n\t}\n\n\ttalk, err = options.NewClient()\n\tif err != nil && strings.Index(err.Error(), \"PLAIN \") < 0 {\n\t\tmonitor.lastFailReason = err.Error()\n\t\tlogrus.Errorf(\"XMPP error: %s\", monitor.lastFailReason)\n\t\treturn false\n\t}\n\n\tif talk != nil {\n\t\ttalk.Close()\n\t}\n\n\treturn true\n}", "func (db *DB) simple(query jdh.Query) error {\n\tconn, err := net.Dial(\"tcp\", db.port)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer conn.Close()\n\tenc := json.NewEncoder(conn)\n\treq := &Request{Query: query}\n\tenc.Encode(req)\n\tdec := json.NewDecoder(conn)\n\tans := &Answer{}\n\tif err := dec.Decode(ans); err != nil {\n\t\treturn err\n\t}\n\tif _, err := ans.GetMessage(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func TestListener(t *testing.T) {\n\tlistener := &fakeListener{}\n\tdoh, _ := NewTransport(testURL, ips, nil, nil, listener)\n\ttransport := doh.(*transport)\n\trt := makeTestRoundTripper()\n\ttransport.client.Transport = rt\n\n\tgo func() {\n\t\treq := <-rt.req\n\t\ttrace := httptrace.ContextClientTrace(req.Context())\n\t\ttrace.GotConn(httptrace.GotConnInfo{\n\t\t\tConn: &fakeConn{\n\t\t\t\tremoteAddr: &net.TCPAddr{\n\t\t\t\t\tIP: net.ParseIP(\"192.0.2.2\"),\n\t\t\t\t\tPort: 443,\n\t\t\t\t}}})\n\n\t\tr, w := io.Pipe()\n\t\trt.resp <- &http.Response{\n\t\t\tStatusCode: 200,\n\t\t\tBody: r,\n\t\t\tRequest: &http.Request{URL: parsedURL},\n\t\t}\n\t\tw.Write([]byte{0, 0, 8, 9, 10})\n\t\tw.Close()\n\t}()\n\n\tdoh.Query(simpleQueryBytes)\n\ts := listener.summary\n\tif s.Latency < 0 {\n\t\tt.Errorf(\"Negative latency: %f\", s.Latency)\n\t}\n\tif !bytes.Equal(s.Query, simpleQueryBytes) {\n\t\tt.Errorf(\"Wrong query: %v\", s.Query)\n\t}\n\tif !bytes.Equal(s.Response, []byte{0xbe, 0xef, 8, 9, 10}) {\n\t\tt.Errorf(\"Wrong response: %v\", s.Response)\n\t}\n\tif s.Server != \"192.0.2.2\" {\n\t\tt.Errorf(\"Wrong server IP string: %s\", s.Server)\n\t}\n\tif s.Status != Complete {\n\t\tt.Errorf(\"Wrong status: %d\", s.Status)\n\t}\n}", "func (c *Conn) succeeded() bool {\n\tselect {\n\tcase <-c.connectDone:\n\t\treturn c.connectErr == nil\n\tdefault:\n\t\treturn false\n\t}\n}", "func (c *Client) writeQuery(conn net.Conn, query []byte) error {\n\tvar err error\n\n\tif c.Timeout > 0 {\n\t\t_ = conn.SetWriteDeadline(time.Now().Add(c.Timeout))\n\t}\n\n\t// Write to the connection\n\tif _, ok := conn.(*net.TCPConn); ok {\n\t\tl := make([]byte, 2)\n\t\tbinary.BigEndian.PutUint16(l, uint16(len(query)))\n\t\t_, err = (&net.Buffers{l, query}).WriteTo(conn)\n\t} else {\n\t\t_, err = conn.Write(query)\n\t}\n\n\treturn err\n}", "func waitTCP(addr string) {\n\tlog.Printf(\"Waiting for TCP to be available at %s\", addr)\n\t// Try once a second to connect\n\tfor startTime := time.Now(); time.Since(startTime) < 10*time.Second; time.Sleep(time.Second) {\n\t\tconn, err := net.DialTimeout(\"tcp\", addr, time.Second)\n\n\t\tif err == nil {\n\t\t\t// Connection successful\n\t\t\tlog.Printf(\"TCP came up on %s\", addr)\n\t\t\tcloseErr := conn.Close()\n\t\t\tif closeErr != nil {\n\t\t\t\tlog.Printf(\"Error closing TCP connection in waitTCP: %s\", closeErr)\n\t\t\t}\n\n\t\t\treturn\n\t\t}\n\n\t\tlog.Printf(\"Tried to connect to %s, got error: %s. Will retry in 1 second.\", addr, err)\n\t}\n\n\t// Timed out\n\tpanic(fmt.Sprintf(\"Timeout out waiting for service to start on %s\", addr))\n}", "func awaitResult(conn net.Conn) {\n msg := make([]byte, 0)\n data := make([]byte, 256)\n for {\n size, err := conn.Read(data)\n if err != nil {\n break\n }\n\n msg = append(msg, data[:size]...)\n }\n if len(msg) == 0 {\n printDisconnected()\n return\n }\n\n args, err := message.FromJSON(msg)\n if err != nil {\n\tfmt.Println(\"Error parsing message.\")\n\treturn\n }\n\n printResult(args.Hash, args.Nonce)\n conn.Close()\n}", "func checkIPConnection(host string, port string) bool {\n\tconn, err := net.Dial(\"tcp\", host+\":\"+port)\n\tif err != nil {\n\t\treturn false\n\t}\n\tdefer conn.Close()\n\n\treturn true\n}", "func (tc *TestCase) Test(name string, client *QueryClient) error {\n\tvar errs []string\n\tif tc.Name != \"\" {\n\t\tname = tc.Name\n\t}\n\n\t// wait for all previous test cases to have been settled in cache\n\tclient.server.QueryPlanCacheWait()\n\n\tcatcher := NewQueryCatcher()\n\tdefer catcher.Close()\n\n\tqr, err := exec(client, tc.Query, tc.BindVars)\n\tif err != nil {\n\t\treturn vterrors.Wrapf(err, \"%s: Execute failed\", name)\n\t}\n\n\tif tc.Result != nil {\n\t\tresult := RowsToStrings(qr)\n\t\tif !reflect.DeepEqual(result, tc.Result) {\n\t\t\terrs = append(errs, fmt.Sprintf(\"Result mismatch:\\n'%+v' does not match\\n'%+v'\", result, tc.Result))\n\t\t}\n\t}\n\n\tif tc.RowsAffected != nil {\n\t\twant := tc.RowsAffected.(int)\n\t\tif int(qr.RowsAffected) != want {\n\t\t\terrs = append(errs, fmt.Sprintf(\"RowsAffected mismatch: %d, want %d\", int(qr.RowsAffected), want))\n\t\t}\n\t}\n\n\tif tc.RowsReturned != nil {\n\t\twant := tc.RowsReturned.(int)\n\t\tif len(qr.Rows) != want {\n\t\t\terrs = append(errs, fmt.Sprintf(\"RowsReturned mismatch: %d, want %d\", len(qr.Rows), want))\n\t\t}\n\t}\n\n\tqueryInfo, err := catcher.Next()\n\tif err != nil {\n\t\terrs = append(errs, fmt.Sprintf(\"Query catcher failed: %v\", err))\n\t}\n\tif tc.Rewritten != nil {\n\t\t// Work-around for a quirk. The stream comments also contain\n\t\t// \"; \". So, we have to get rid of the additional artifacts\n\t\t// to make it match expected results.\n\t\tunstripped := strings.Split(queryInfo.RewrittenSQL(), \"; \")\n\t\tgot := make([]string, 0, len(unstripped))\n\t\tfor _, str := range unstripped {\n\t\t\tif str == \"\" || str == \"*/\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tgot = append(got, str)\n\t\t}\n\t\tif !reflect.DeepEqual(got, tc.Rewritten) {\n\t\t\terrs = append(errs, fmt.Sprintf(\"Rewritten mismatch:\\n'%q' does not match\\n'%q'\", got, tc.Rewritten))\n\t\t}\n\t}\n\tif tc.Plan != \"\" {\n\t\tif queryInfo.PlanType != tc.Plan {\n\t\t\terrs = append(errs, fmt.Sprintf(\"Plan mismatch: %s, want %s\", queryInfo.PlanType, tc.Plan))\n\t\t}\n\t}\n\tif len(errs) != 0 {\n\t\tif name == \"\" {\n\t\t\treturn errors.New(strings.Join(errs, \"\\n\"))\n\t\t}\n\t\treturn errors.New(fmt.Sprintf(\"%s failed:\\n\", name) + strings.Join(errs, \"\\n\"))\n\t}\n\treturn nil\n}", "func TestConnection() {\n\tcon := Connect()\n\tdefer con.Close()\n\terr := con.Ping()\n\tif err != nil {\n\t\tfmt.Println(fmt.Errorf(\"%s\", err.Error()))\n\t\treturn\n\t}\n\tfmt.Println(\"Database connected\")\n}", "func poll(t *testing.T, address string) {\n\titerations := 1000\n\tfor i := 0; i < iterations; i++ {\n\t\t_, err := net.Dial(\"tcp\", address)\n\t\tt.Logf(\"Dial %s attempt %d\", address, i)\n\t\tif err == nil {\n\t\t\tbreak\n\t\t}\n\t\tif i == (iterations - 1) {\n\t\t\tt.Fatalf(\"Couldn't establish connection to %s\", address)\n\t\t}\n\t}\n}", "func res(conn net.Conn){\n reader := bufio.NewReader(conn) // set a new reader for tcp\n // for loop is used for continuos reading from port\n for {\n text,err:=reader.ReadString('\\n')\n if err != nil {\n fmt.Println(\"Client DISCONNECTED\")\n return\n } else {\n fmt.Println(\"text : \" + text) // print the recieved data\n data_to_db <- text\n }\n }\n }", "func (m *Measurement) DialTCP() *TCPResponse {\n\n\t// Simply check that the server is up and can accept connections.\n\tresult := &TCPResponse{\n\t\tHost: m.Host,\n\t\tPort: m.Port,\n\t\tLatency: 0,\n\t\tTimeout: m.Timeout,\n\t\tSequence: m.count,\n\t}\n\n\taddress := fmt.Sprintf(\"%s:%d\", m.Host, m.Port)\n\tstart := time.Now()\n\n\tconn, err := net.DialTimeout(\"tcp\", address, time.Duration(m.Timeout)*time.Second)\n\tif err != nil {\n\n\t\tm.appendFailed(result)\n\t\treturn result\n\t}\n\tdefer conn.Close()\n\n\tresult.Latency = time.Since(start).Seconds() * 1000\n\n\tm.appendSuccessful(result)\n\treturn result\n}", "func (node *Node) ResponseSuccess(hash []byte, z int) {\n\tokmsg := make([]byte, 0)\n\tokmsg = append(okmsg, hash...)\n\tokmsg = append(okmsg, Received)\n\tZ := make([]byte, 4)\n\tbinary.BigEndian.PutUint32(Z, uint32(z))\n\tokmsg = append(okmsg, Z...)\n\tsid := make([]byte, 4)\n\tbinary.BigEndian.PutUint32(sid, uint32(node.SelfPeer.Sid))\n\tokmsg = append(okmsg, sid...)\n\thashkey := ConvertToFixedSize(hash)\n\tnode.mux.Lock()\n\traptorq := node.Cache[hashkey]\n\tnode.mux.Unlock()\n\tsenderPubKey := raptorq.SenderPubKey\n\tfor _, peer := range node.AllPeers {\n\t\tif peer.PubKey != senderPubKey {\n\t\t\tcontinue\n\t\t}\n\t\ttcpaddr := net.JoinHostPort(peer.Ip, peer.TCPPort)\n\t\tconn, err := net.Dial(\"tcp\", tcpaddr)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"dial to tcp addr %v failed with %v\", tcpaddr, err)\n\t\t\tbackoff := ExpBackoffDelay(1000, 15000, 1.35)\n\t\t\tfor i := 0; i < 10; i++ {\n\t\t\t\ttime.Sleep(backoff(i, 0))\n\t\t\t\tconn, err = net.Dial(\"tcp\", tcpaddr)\n\t\t\t\tif err == nil {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tlog.Printf(\"dial to tcp addr %v failed with %v (retry %v)\", tcpaddr, err, i)\n\t\t\t}\n\t\t\tlog.Printf(\"retry exhausted\")\n\t\t}\n\t\tif err == nil && conn != nil {\n\t\t\t_, err = conn.Write(okmsg)\n\t\t\tlog.Printf(\"node %v send okay message for block %v to sender %v\", node.SelfPeer.Sid, z, tcpaddr)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"send received message to sender %v failed with %v\", tcpaddr, err)\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n}", "func TestAcceptClose(t *testing.T) {\n\tdoh := newFakeTransport()\n\tclient, server := makePair()\n\n\t// Start the forwarder running.\n\tgo Accept(doh, server)\n\n\tlbuf := make([]byte, 2)\n\t// Send Query\n\tqueryData := simpleQueryBytes\n\tbinary.BigEndian.PutUint16(lbuf, uint16(len(queryData)))\n\tclient.Write(lbuf)\n\tclient.Write(queryData)\n\n\t// Read query\n\tqueryRead := <-doh.query\n\tif !bytes.Equal(queryRead, queryData) {\n\t\tt.Error(\"Query mismatch\")\n\t}\n\n\t// Close the TCP connection\n\tclient.Close()\n\n\t// Send fake response too late.\n\tresponseData := []byte{1, 2, 8, 9, 10}\n\tdoh.response <- responseData\n}", "func tcpClient(){\n for {\n // tries to connect with server\n if indicator == 0 {\n fmt.Println(\"Connecting .....\")\n conn,err := net.Dial(\"tcp\",\":6600\")\n if err != nil{\n\n } else {\n // connected to server\n indicator = 1\n fmt.Println(\"Connected\")\n go TcpServerReader(conn)// read incoming data\n go TcpServerWriter(conn) // write data to server\n go database(conn)\n }\n }\n }\n }", "func testExit(sc *ssh.Client, target string) bool {\n\tlog.Printf(\"Making a test connection to %v\", target)\n\tc, err := sc.Dial(\"tcp\", target)\n\tif nil != err {\n\t\tlog.Printf(\"Connection to %v failed: %v\", target, err)\n\t\treturn false\n\t}\n\tlog.Printf(\"Connection to %v successful\", target)\n\tc.Close()\n\treturn true\n}", "func (s *Server) TestConnection(ctx context.Context, request *TestConnection_Request) (response *TestConnection_Response, err error) {\n\tlogging.Log(fmt.Sprintf(\"TestConnection - incoming request: %+v\", request))\n\t// response = new(TestConnection_Response)\n\n\treturn &TestConnection_Response{Success: true}, err\n}", "func TestTCPClient(t *testing.T) {\n\tclient, err := clients.New(testAddr, \"\")\n\tif err != nil {\n\t\tt.Fatalf(\"failed to connect - %s\", err.Error())\n\t\tt.FailNow()\n\t}\n\tdefer client.Close()\n\n\t// subscribe should fail with no tags\n\tif err := client.Subscribe([]string{}); err == nil {\n\t\tt.Fatalf(\"Subscription succeeded with missing tags!\")\n\t}\n\n\t// test ability to subscribe\n\tif err := client.Subscribe([]string{\"a\"}); err != nil {\n\t\tt.Fatalf(\"client subscriptions failed %s\", err.Error())\n\t}\n\n\t// test ability to list (subscriptions)\n\tif err := client.List(); err != nil {\n\t\tt.Fatalf(\"listing subscriptions failed %s\", err.Error())\n\t}\n\tif msg := <-client.Messages(); msg.Data == \"\\\"a\\\"\" {\n\t\tt.Fatalf(\"Failed to 'list' - '%s' '%#v'\", msg.Error, msg.Data)\n\t}\n\n\t// test publish\n\tif err := client.Publish([]string{\"a\"}, \"testpublish\"); err != nil {\n\t\tt.Fatalf(\"publishing failed %s\", err.Error())\n\t}\n\tif err := client.Publish([]string{}, \"nopublish\"); err == nil {\n\t\tt.Fatalf(\"publishing no tags succeeded %s\", err.Error())\n\t}\n\tif err := client.Publish([]string{\"a\"}, \"\"); err == nil {\n\t\tt.Fatalf(\"publishing no data succeeded %s\", err.Error())\n\t}\n\n\t// test ability to unsubscribe\n\tif err := client.Unsubscribe([]string{\"a\"}); err != nil {\n\t\tt.Fatalf(\"client unsubscriptions failed %s\", err.Error())\n\t}\n\tif err := client.Unsubscribe([]string{}); err == nil {\n\t\tt.Fatalf(\"client unsubscriptions no tags succeeded %s\", err.Error())\n\t}\n\n\t// test ability to list (no subscriptions)\n\tif err := client.List(); err != nil {\n\t\tt.Fatalf(\"listing subscriptions failed %s\", err.Error())\n\t}\n\tif msg := <-client.Messages(); msg.Data != \"\" {\n\t\tt.Fatalf(\"Failed to 'list' - %s\", msg.Error)\n\t}\n}", "func (o *PluginDnsClient) Query(queries []utils.DnsQueryParams, socket transport.SocketApi) error {\n\tif o.IsNameServer() {\n\t\treturn fmt.Errorf(\"Querying is not permitted for Dns Name Servers!\")\n\t}\n\tquestions, err := utils.BuildQuestions(queries)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(questions) > 0 {\n\t\tdata := o.dnsPktBuilder.BuildQueryPkt(questions, o.Tctx.Simulation)\n\t\tif socket == nil {\n\t\t\treturn fmt.Errorf(\"Invalid Socket in Query!\")\n\t\t}\n\t\ttransportErr, _ := o.socket.Write(data)\n\t\tif transportErr != transport.SeOK {\n\t\t\to.stats.socketWriteError++\n\t\t\treturn transportErr.Error()\n\t\t}\n\t\to.stats.pktTxDnsQuery++ // successfully sent query\n\t\to.stats.txBytes += uint64(len(data)) // number of bytes sent\n\t}\n\treturn nil\n}", "func (c *client) queryListener() {\n\t// Close the Transporter on exit\n\tdefer c.Close()\n\n\t// Set up connection for slave\n\tfor qry := range c.queries {\n\t\tqry := qry\n\t\ttime.Sleep(15 * time.Millisecond)\n\t\td, e := c.Send(qry.Query)\n\t\tgo qry.sendResponse(d, e)\n\t}\n}", "func main() {\n sendData := []byte(\"HEAD / HTTP/1.0\\r\\n\\r\\n\")\n\n target := os.Args[1]\n\n tcpAddr, err := net.ResolveTCPAddr(\"tcp4\", target + \":80\")\n checkError(err)\n\n conn, err := net.DialTCP(\"tcp\", nil, tcpAddr)\n checkError(err)\n\n _, err = conn.Write(sendData)\n checkError(err)\n\n result, err := ioutil.ReadAll(conn)\n checkError(err)\n\n fmt.Println(string(result))\n\n os.Exit(0)\n}", "func TestSingleClient(t *testing.T) {\n name := \"hi.txt\"\n contents := \"bye\"\n exptime := 300000\n conn, err := net.Dial(\"tcp\", \"localhost:8080\")\n if err != nil {\n t.Error(err.Error()) // report error through testing framework\n }\n scanner := bufio.NewScanner(conn)\n\n\n // Write a file\n _,err = fmt.Fprintf(conn, \"write %v %v %v\\r\\n%v\\r\\n\", name, len(contents), exptime ,contents)\n if err !=nil {\n fmt.Printf(\"error in writing in buffer\\n\")\n }\n scanner.Scan() // read first line\n resp := scanner.Text() // extract the text from the buffer\n version := VerifyWriteSucess(t,resp)\n\n\n // try read now\n fmt.Fprintf(conn, \"read %v\\r\\n\", name)\n scanner.Scan()\n output := scanner.Text()\n scanner.Scan() \n VerifyReadSucess(t,output,version,contents,scanner.Text())\n\n // try a cas command\n contents = \"GO is for distributed computing\"\n fmt.Fprintf(conn, \"cas %v %v %v %v\\r\\n%v\\r\\n\",name,version,len(contents),exptime,contents)\n scanner.Scan() // read first line\n version = VerifyCasSucess(t,scanner.Text())\n\n\n // try delete\n fmt.Fprintf(conn,\"delete %v\\r\\n\",name)\n scanner.Scan()\n VerifyDeleteSucess(t,scanner.Text())\n\n //try delete file not found\n fmt.Fprintf(conn,\"delete %v\\r\\n\",name)\n scanner.Scan()\n VerifyFileNotFound(t,scanner.Text())\n\n //try read file not found\n fmt.Fprintf(conn, \"read %v\\r\\n\", name)\n scanner.Scan() \n VerifyFileNotFound(t,scanner.Text())\n}", "func IsOk(conn redis.Conn) bool {\n\tresp, err := redis.String(conn.Do(\"PING\"))\n\tif err != nil {\n\t\treturn false\n\t}\n\treturn resp == \"PONG\"\n}", "func Query(addr string, cmd []byte) ([]byte, error) {\n\tudpAddr, err := net.ResolveUDPAddr(\"udp\", addr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tconn, err := net.DialUDP(\"udp\", nil, udpAddr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer conn.Close()\n\n\t_, err = conn.Write(cmd)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar buf [1024]byte\n\tconn.SetReadDeadline(time.Now().Add(5000 * time.Millisecond))\n\tn, err := conn.Read(buf[0:])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn buf[0:n], nil\n}", "func TCPDialCheck(addr string, timeout time.Duration) Check {\n\treturn func() error {\n\t\tconn, err := net.DialTimeout(\"tcp\", addr, timeout)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn conn.Close()\n\t}\n}", "func TCPDialCheck(addr string, timeout time.Duration) Check {\n\treturn func() error {\n\t\tconn, err := net.DialTimeout(\"tcp\", addr, timeout)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn conn.Close()\n\t}\n}", "func (c *Conn) failed() bool {\n\tselect {\n\tcase <-c.connectDone:\n\t\treturn c.connectErr != nil\n\tdefault:\n\t\treturn false\n\t}\n}", "func checkConnection(conn net.Conn, err error) {\n\tif err != nil {\n\t\tfmt.Printf(\"err %v connecting\", err)\n\t\tos.Exit(1)\n\t}\n\tfmt.Printf(\"Connection is made with %v\\n\", conn)\n}", "func BenchmarkDialTCP(b *testing.B) {\n\tfor i := 0; i < b.N; i++ {\n\t\tDialTCP(TEST_HOST, TEST_USER, TEST_PASSWD, TEST_DBNAME)\n\t}\n}", "func echo(conn net.Conn){\n defer conn.Close()\n bData, _ := recvDataB(conn)\n sendDataB(conn, bData, OK_CODE)\n}", "func TestResponse(t *testing.T) {\n\tdoh, _ := NewTransport(testURL, ips, nil, nil, nil)\n\ttransport := doh.(*transport)\n\trt := makeTestRoundTripper()\n\ttransport.client.Transport = rt\n\n\t// Fake server.\n\tgo func() {\n\t\t<-rt.req\n\t\tr, w := io.Pipe()\n\t\trt.resp <- &http.Response{\n\t\t\tStatusCode: 200,\n\t\t\tBody: r,\n\t\t\tRequest: &http.Request{URL: parsedURL},\n\t\t}\n\t\t// The DOH response should have a zero query ID.\n\t\tvar modifiedQuery dnsmessage.Message = simpleQuery\n\t\tmodifiedQuery.Header.ID = 0\n\t\tw.Write(mustPack(&modifiedQuery))\n\t\tw.Close()\n\t}()\n\n\tresp, err := doh.Query(simpleQueryBytes)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\t// Parse the response as a DNS message.\n\trespParsed := mustUnpack(resp)\n\n\t// Query() should reconstitute the query ID in the response.\n\tif respParsed.Header.ID != simpleQuery.Header.ID ||\n\t\t!queriesMostlyEqual(*respParsed, simpleQuery) {\n\t\tt.Errorf(\"Unexpected response %v\", resp)\n\t}\n}", "func TestConnReadNonzeroAndEOF(t *testing.T) {\n\t// This test is racy: it assumes that after a write to a\n\t// localhost TCP connection, the peer TCP connection can\n\t// immediately read it. Because it's racy, we skip this test\n\t// in short mode, and then retry it several times with an\n\t// increasing sleep in between our final write (via srv.Close\n\t// below) and the following read.\n\tif testing.Short() {\n\t\tt.Skip(\"skipping in short mode\")\n\t}\n\tvar err error\n\tfor delay := time.Millisecond; delay <= 64*time.Millisecond; delay *= 2 {\n\t\tif err = testConnReadNonzeroAndEOF(t, delay); err == nil {\n\t\t\treturn\n\t\t}\n\t}\n\tt.Error(err)\n}", "func TestConnReadNonzeroAndEOF(t *testing.T) {\n\t// This test is racy: it assumes that after a write to a\n\t// localhost TCP connection, the peer TCP connection can\n\t// immediately read it. Because it's racy, we skip this test\n\t// in short mode, and then retry it several times with an\n\t// increasing sleep in between our final write (via srv.Close\n\t// below) and the following read.\n\tif testing.Short() {\n\t\tt.Skip(\"skipping in short mode\")\n\t}\n\tvar err error\n\tfor delay := time.Millisecond; delay <= 64*time.Millisecond; delay *= 2 {\n\t\tif err = testConnReadNonzeroAndEOF(t, delay); err == nil {\n\t\t\treturn\n\t\t}\n\t}\n\tt.Error(err)\n}", "func WaitSuccessfulDial(address string) error {\n\tctx, cancel := context.WithTimeout(context.Background(), waitDur)\n\tvar lastErr error\n\tdefer cancel()\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn multierr.Combine(ctx.Err(), lastErr)\n\t\tdefault:\n\t\t}\n\t\tvar conn net.Conn\n\t\tconn, lastErr = net.Dial(\"tcp\", address)\n\t\tif lastErr == nil {\n\t\t\treturn conn.Close()\n\t\t}\n\t\tlastErr = errors.Wrap(lastErr, 0)\n\t}\n}", "func (my *MySQL) Start(sql string, params ...interface{}) (\n res *Result, err os.Error) {\n\n defer my.unlockIfError(&err)\n defer catchOsError(&err)\n my.lock()\n\n if my.conn == nil {\n return nil, NOT_CONN_ERROR\n }\n if my.unreaded_rows {\n return nil, UNREADED_ROWS_ERROR\n }\n\n if len(params) != 0 {\n sql = fmt.Sprintf(sql, params...)\n }\n // Send query\n my.sendCmd(_COM_QUERY, sql)\n\n // Get command response\n res = my.getResponse(true)\n return\n}", "func TestTCPRetryRPC(t *testing.T) {\n\ttestTLSCerts = nil\n\ttestRegister(t, false)\n\ttestServer(t, false)\n\ttestBtree(t, false)\n\ttestStatsAndBucketstats(t)\n}", "func (cp *singleConnectionPool) OnSuccess(c *Connection) error { return nil }", "func connectionCheck(conn net.Conn, info chan string, ID int) {\n\n\t// basically just need to make sure we get some sort of response other than an error\n\treader := bufio.NewReader(conn)\n\t// open for loop to constantly be checking for errors\n\tfor {\n\t\t_, err := reader.ReadString('\\n')\n\t\tif err != nil {\n\t\t\t// report loss of the connection\n\t\t\tinfo <- \"Node:\" + strconv.Itoa(ID) + \" has disconnected.\"\n\t\t\t// end our check for this connection\n\t\t\treturn\n\t\t}\n\t}\n}", "func (c *Controller) callAndWait(p []*Packet, checkError bool, f func(*Packet) bool) error {\n\tc.packetConnLock.Lock()\n\tdefer c.packetConnLock.Unlock()\n\n\tcheckSeqs := map[uint16]bool{}\n\tfor _, packet := range p {\n\t\tif seq, err := packet.Seq(); err == nil {\n\t\t\tcheckSeqs[seq] = true\n\t\t}\n\t}\n\n\tconn, err := NewPacketConn()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer conn.Close()\n\n\tsessInfo := c.getSessionInfo()\n\tif err := conn.Auth(sessInfo.UserID, sessInfo.Authorize, c.timeout); err != nil {\n\t\treturn err\n\t}\n\n\t// Prevent the bg thread from blocking on a\n\t// channel send forever.\n\tdoneChan := make(chan struct{}, 1)\n\tdefer close(doneChan)\n\n\tpackets := make(chan *Packet, 16)\n\terrChan := make(chan error, 1)\n\tgo func() {\n\t\tdefer close(packets)\n\t\tfor {\n\t\t\tpacket, err := conn.Read()\n\t\t\tif err != nil {\n\t\t\t\terrChan <- err\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif checkError && packet.IsResponse {\n\t\t\t\tseq, err := packet.Seq()\n\t\t\t\tif err == nil && checkSeqs[seq] && len(packet.Data) > 0 {\n\t\t\t\t\tif packet.Data[len(packet.Data)-1] != 0 {\n\t\t\t\t\t\terrChan <- RemoteCallError\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tselect {\n\t\t\tcase packets <- packet:\n\t\t\tcase <-doneChan:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\tfor _, subPacket := range p {\n\t\tif err := conn.Write(subPacket); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\ttimeout := time.After(c.timeout)\n\tfor {\n\t\tselect {\n\t\tcase packet, ok := <-packets:\n\t\t\tif !ok {\n\t\t\t\t// Could be a race condition between packets and errChan.\n\t\t\t\tselect {\n\t\t\t\tcase err := <-errChan:\n\t\t\t\t\treturn err\n\t\t\t\tdefault:\n\t\t\t\t\treturn errors.New(\"connection closed\")\n\t\t\t\t}\n\t\t\t}\n\t\t\tif f(packet) {\n\t\t\t\treturn nil\n\t\t\t}\n\t\tcase err := <-errChan:\n\t\t\treturn err\n\t\tcase <-timeout:\n\t\t\treturn errors.New(\"timeout waiting for response\")\n\t\t}\n\t}\n}", "func TestTCPProbeTimeout(t *testing.T) {\n\tprobeExpectTimeout(t, 49)\n\tprobeExpectTimeout(t, 50)\n\tprobeExpectTimeout(t, 51)\n}", "func TestHTTPError(t *testing.T) {\n\tdoh, _ := NewTransport(testURL, ips, nil, nil, nil)\n\ttransport := doh.(*transport)\n\trt := makeTestRoundTripper()\n\ttransport.client.Transport = rt\n\n\tgo func() {\n\t\t<-rt.req\n\t\tr, w := io.Pipe()\n\t\trt.resp <- &http.Response{\n\t\t\tStatusCode: 500,\n\t\t\tBody: r,\n\t\t\tRequest: &http.Request{URL: parsedURL},\n\t\t}\n\t\tw.Write([]byte{0, 0, 8, 9, 10})\n\t\tw.Close()\n\t}()\n\n\t_, err := doh.Query(simpleQueryBytes)\n\tvar qerr *queryError\n\tif err == nil {\n\t\tt.Error(\"Empty body should cause an error\")\n\t} else if !errors.As(err, &qerr) {\n\t\tt.Errorf(\"Wrong error type: %v\", err)\n\t} else if qerr.status != HTTPError {\n\t\tt.Errorf(\"Wrong error status: %d\", qerr.status)\n\t}\n}", "func call(srv string, rpcname string,\n\targs interface{}, reply interface{}) bool {\n\tc, err := rpc.DialHTTP(\"tcp\", srv)\n\tif err != nil {\n\t\treturn false\n\t}\n\tdefer c.Close()\n\n\terr = c.Call(rpcname, args, reply)\n\tif err == nil {\n\t\treturn true\n\t}\n\n\tfmt.Println(err)\n\treturn false\n}", "func workWithClient(conn net.Conn, db *sql.DB) {\n defer conn.Close()\n fmt.Println(\"connected\")\n newMsgCh := make(chan message, CHAN_NEW_MESSAGE_SIZE)\n\n LISTEN_LOOP:\n for {\n select {\n /// read command from user\n case req := <-recvDataCmd(conn):\n sErr := executeCommand(conn, db, req.data, req.err, &newMsgCh)\n if sErr.Err != nil {\n if sErr.Err == io.EOF {\n break LISTEN_LOOP\n }\n sendError(conn, sErr)\n }\n case msg := <- newMsgCh:\n sendData(conn, msg.ToStr(), OK_CODE)\n } // end select\n } // end for\n}", "func handle_conn_err(err error) {\n if err == io.EOF {\n fmt.Println(\"Connection went away\")\n } else if neterr, ok := err.(net.Error); ok && neterr.Timeout() {\n fmt.Println(\"Connection Timeout\")\n } else if operr, ok := err.(*net.OpError); ok {\n if operr.Op == \"dial\" {\n fmt.Println(\"Couldn't reach host\")\n } else if operr.Op == \"read\" {\n fmt.Println(\"Can't read closed connection\")\n } else {\n fmt.Printf(\"Failed to perform op: '%s'\\n\", operr.Op)\n }\n }\n}", "func client(serverIp string, serverPort string) {\n //TCPAddr\n tcpAddr, err := net.ResolveTCPAddr(\"tcp\", serverIp + serverPort)\n checkErrorClient(err)\n\n //TCPConn\n conn, err := net.DialTCP(\"tcp\", nil, tcpAddr)\n checkErrorClient(err)\n reader := bufio.NewReader(os.Stdin)\n buf := make([]byte, SendBufferSize)\n\n for {\n readTotal, err := reader.Read(buf)\n if err != nil {\n if err != io.EOF {\n checkErrorClient(err)\n }\n break\n }\n _, err = conn.Write(buf[:readTotal])\n checkErrorClient(err)\n }\n\n checkErrorClient(err)\n os.Exit(0)\n}", "func ConnTest(ConnURL string) int{\n\ttotal := 0\n\tfor {\n\t\ttotal += 1\n\t\tdb, err := sql.Open(\"mysql\", ConnURL)\n\t\t_, err2 := db.Query(\"SELECT * FROM test;\")\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\treturn total\n\t\t}\n\t\tif err2 != nil {\n\t\t\tfmt.Println(err2)\n\t\t\treturn total\n\t\t}\n\t}\n}", "func (m *Mock) TCPResponse(req *acomm.Request) (interface{}, *url.URL, error) {\n\tvar err error\n\tif !m.Data.TCPResponse {\n\t\terr = errors.New(\"response did not match\")\n\t}\n\treturn nil, nil, err\n}", "func connStream(t *testing.T) {\n}", "func (db *DB) Ping() (string, error) {\n\n\tvar response string\n\ttimeout := time.Second * 3\n\tconn, err := net.DialTimeout(\"tcp\", net.JoinHostPort(db.host, \"8093\"), timeout)\n\tif err != nil {\n\t\tresponse = fmt.Sprintf(\"Connection error %v\", err.Error())\n\t\treturn response, err\n\t}\n\tif conn != nil {\n\t\tdefer conn.Close()\n\t\tresponse = fmt.Sprintf(\"Connection successful to %v\", net.JoinHostPort(db.host, \"8093\"))\n\t}\n\n\treturn response, nil\n}", "func healthCheckVM(conn net.Conn) {\n\tfor {\n\t\tconn.Write([]byte{0})\n\t\ttime.Sleep(2 * time.Second)\n\t}\n}", "func TestNewPacketFrom(t *testing.T) {\n\n\tt.Log(\"Start TestNewPacketFrom +++++++++++++\")\n\t// Get those go-to queries\n\ttcases := tmake()\n\n\tfor _, tcase := range tcases {\n\t\tt.Log(\"Testing for: \", tcase.Serialized)\n\t\tns := NewMySQLPacketFrom(0, tcase.ns.Serialized[HEADER_SIZE+1:])\n\t\tif ns.Length != tcase.ns.Length {\n\t\t\tt.Log(\"Length expected\", tcase.ns.Length, \"instead got\", ns.Length)\n\t\t}\n\t\tif ns.Sqid != tcase.ns.Sqid {\n\t\t\tt.Log(\"Length expected\", tcase.ns.Sqid, \"instead got\", ns.Sqid)\n\t\t}\n\t\tif ns.Cmd != tcase.ns.Cmd {\n\t\t\tt.Log(\"Command expected\", tcase.ns.Cmd, \"instead got\", ns.Cmd)\n\t\t\tt.Fail()\n\t\t}\n\t\tif !reflect.DeepEqual(ns.Serialized, tcase.ns.Serialized) {\n\t\t\tt.Log(\"Serialized expected\", tcase.ns.Serialized, \"instead got\", ns.Serialized)\n\t\t\tt.Fail()\n\t\t}\n\t\tt.Log(\"Done testing for: \", tcase.Serialized)\n\t}\n\n\tt.Log(\"End TestNewPacketFrom +++++++++++++\")\n\n}", "func TestWhoisQuery(t *testing.T) {\n\t// Retry WhoisQuery up to 3 times for network timeout errors.\n\tfor i := 0; i < 3; i++ {\n\t\tres, err := WhoisQuery(\"koding.com\", \"whois.arin.net\", 5*time.Second)\n\t\tif e, ok := err.(net.Error); ok && e.Timeout() {\n\t\t\tcontinue\n\t\t}\n\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tif res == \"\" {\n\t\t\tt.Fatal(\"Whois response empty.\")\n\t\t}\n\n\t\t// Use a the street name to validate the response\n\t\tif regexp.MustCompile(`(?i)brannan`).MatchString(res) != true {\n\t\t\tt.Fatal(\"Response does not match as expected.\" +\n\t\t\t\t`Wanted the regexp \"brannan\" to match`)\n\t\t}\n\n\t\treturn\n\t}\n\n\tt.Fatal(\"exceeded max retry attempts for WhoisQuery\")\n}", "func (c Client) SendQuery(message dns.Msg) (dns.Msg, error) {\n\t// Open a new QUIC stream\n\tlog.Debugln(\"opening new quic stream\")\n\tstream, err := c.Session.OpenStream()\n\tif err != nil {\n\t\treturn dns.Msg{}, errors.New(\"quic stream open: \" + err.Error())\n\t}\n\n\t// Pack the DNS message for transmission\n\tlog.Debugln(\"packing dns message\")\n\tpacked, err := message.Pack()\n\tif err != nil {\n\t\t_ = stream.Close()\n\t\treturn dns.Msg{}, errors.New(\"dns message pack: \" + err.Error())\n\t}\n\n\t// Send the DNS query over QUIC\n\tlog.Debugln(\"writing packed format to the stream\")\n\t_, err = stream.Write(packed)\n\t_ = stream.Close()\n\tif err != nil {\n\t\treturn dns.Msg{}, errors.New(\"quic stream write: \" + err.Error())\n\t}\n\n\t// Read the response\n\tlog.Debugln(\"reading server response\")\n\tresponse, err := ioutil.ReadAll(stream)\n\tif err != nil {\n\t\treturn dns.Msg{}, errors.New(\"quic stream read: \" + err.Error())\n\t}\n\n\t// Unpack the DNS message\n\tlog.Debugln(\"unpacking response dns message\")\n\tvar msg dns.Msg\n\terr = msg.Unpack(response)\n\tif err != nil {\n\t\treturn dns.Msg{}, errors.New(\"dns message unpack: \" + err.Error())\n\t}\n\n\treturn msg, nil // nil error\n}", "func TestQueryIntegration(t *testing.T) {\n\tqueryData := []byte{\n\t\t111, 222, // [0-1] query ID\n\t\t1, 0, // [2-3] flags, RD=1\n\t\t0, 1, // [4-5] QDCOUNT (number of queries) = 1\n\t\t0, 0, // [6-7] ANCOUNT (number of answers) = 0\n\t\t0, 0, // [8-9] NSCOUNT (number of authoritative answers) = 0\n\t\t0, 0, // [10-11] ARCOUNT (number of additional records) = 0\n\t\t// Start of first query\n\t\t7, 'y', 'o', 'u', 't', 'u', 'b', 'e',\n\t\t3, 'c', 'o', 'm',\n\t\t0, // null terminator of FQDN (DNS root)\n\t\t0, 1, // QTYPE = A\n\t\t0, 1, // QCLASS = IN (Internet)\n\t}\n\n\ttestQuery := func(queryData []byte) {\n\n\t\tdoh, err := NewTransport(testURL, ips, nil, nil, nil)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tresp, err2 := doh.Query(queryData)\n\t\tif err2 != nil {\n\t\t\tt.Fatal(err2)\n\t\t}\n\t\tif resp[0] != queryData[0] || resp[1] != queryData[1] {\n\t\t\tt.Error(\"Query ID mismatch\")\n\t\t}\n\t\tif len(resp) <= len(queryData) {\n\t\t\tt.Error(\"Response is short\")\n\t\t}\n\t}\n\n\ttestQuery(queryData)\n\n\tpaddedQueryBytes, err := AddEdnsPadding(simpleQueryBytes)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ttestQuery(paddedQueryBytes)\n}", "func TestEmptyResponse(t *testing.T) {\n\tdoh, _ := NewTransport(testURL, ips, nil, nil, nil)\n\ttransport := doh.(*transport)\n\trt := makeTestRoundTripper()\n\ttransport.client.Transport = rt\n\n\t// Fake server.\n\tgo func() {\n\t\t<-rt.req\n\t\t// Make an empty body.\n\t\tr, w := io.Pipe()\n\t\tw.Close()\n\t\trt.resp <- &http.Response{\n\t\t\tStatusCode: 200,\n\t\t\tBody: r,\n\t\t\tRequest: &http.Request{URL: parsedURL},\n\t\t}\n\t}()\n\n\t_, err := doh.Query(simpleQueryBytes)\n\tvar qerr *queryError\n\tif err == nil {\n\t\tt.Error(\"Empty body should cause an error\")\n\t} else if !errors.As(err, &qerr) {\n\t\tt.Errorf(\"Wrong error type: %v\", err)\n\t} else if qerr.status != BadResponse {\n\t\tt.Errorf(\"Wrong error status: %d\", qerr.status)\n\t}\n}", "func (rt RouteTable) ConnectionSuccess(id string, address overlay.NodeAddress) {\n\t// TODO: What should we do ?\n\treturn\n}", "func (conn *n1qlConn) doClientRequest(query string, requestValues *url.Values) (*http.Response, error) {\n\n\tstmtType := txStatementType(query)\n\tok := false\n\tfor !ok {\n\n\t\tvar request *http.Request\n\t\tvar err error\n\t\tvar selectedNode, numNodes int\n\t\tvar queryAPI string\n\t\tvar txParams map[string]string\n\n\t\t// select query API\n\t\tif conn.txid != \"\" && query != N1QL_DEFAULT_STATEMENT {\n\t\t\ttxParams = map[string]string{\"txid\": conn.txid, \"tximplicit\": \"\"}\n\t\t\tqueryAPI = conn.txService\n\t\t} else {\n\t\t\tif stmtType == TX_START && TxTimeout != \"\" {\n\t\t\t\ttxParams = map[string]string{\"txtimeout\": TxTimeout}\n\t\t\t}\n\t\t\trand.Seed(time.Now().Unix())\n\t\t\tnumNodes = len(conn.queryAPIs)\n\n\t\t\tselectedNode = rand.Intn(numNodes)\n\t\t\tconn.lock.RLock()\n\t\t\tqueryAPI = conn.queryAPIs[selectedNode]\n\t\t\tconn.lock.RUnlock()\n\t\t}\n\n\t\tif query != \"\" {\n\t\t\trequest, err = prepareRequest(query, queryAPI, nil, txParams)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t} else {\n\t\t\tif requestValues != nil {\n\t\t\t\trequest, _ = http.NewRequest(\"POST\", queryAPI, bytes.NewBufferString(requestValues.Encode()))\n\t\t\t} else {\n\t\t\t\trequest, _ = http.NewRequest(\"POST\", queryAPI, nil)\n\t\t\t}\n\t\t\trequest.Header.Add(\"Content-Type\", \"application/x-www-form-urlencoded\")\n\t\t\tsetCBUserAgent(request)\n\t\t\tif hasUsernamePassword() {\n\t\t\t\trequest.SetBasicAuth(username, password)\n\t\t\t}\n\t\t}\n\n\t\tresp, err := conn.client.Do(request)\n\t\tif err != nil {\n\t\t\t// if this is the last node return with error\n\t\t\tif conn.txService != \"\" || numNodes == 1 {\n\t\t\t\tconn.SetTxValues(\"\", \"\")\n\t\t\t\tbreak\n\t\t\t}\n\t\t\t// remove the node that failed from the list of query nodes\n\t\t\tconn.lock.Lock()\n\t\t\tconn.queryAPIs = append(conn.queryAPIs[:selectedNode], conn.queryAPIs[selectedNode+1:]...)\n\t\t\tconn.lock.Unlock()\n\t\t\tcontinue\n\t\t} else {\n\t\t\tif stmtType == TX_START {\n\t\t\t\ttxid := getTxid(resp)\n\t\t\t\tif txid != \"\" {\n\t\t\t\t\tconn.SetTxValues(txid, queryAPI)\n\t\t\t\t}\n\t\t\t} else if stmtType == TX_COMMIT || stmtType == TX_ROLLBACK {\n\t\t\t\tconn.SetTxValues(\"\", \"\")\n\t\t\t}\n\t\t\treturn resp, nil\n\n\t\t}\n\t}\n\n\treturn nil, fmt.Errorf(\"N1QL: Query nodes not responding\")\n}", "func (pc *ProadClient) CheckConnection() {\n\trequest, err := http.NewRequest(\"GET\", \"https://192.168.0.15/api/test/validate_key\", http.NoBody)\n\tresponse := pc.Do(request)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\trespBytes, err := ioutil.ReadAll(response.Body)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\tdefer response.Body.Close()\n\tfmt.Println(string(respBytes))\n}", "func callOnceAndCheck(t *testing.T, client *Client) {\n\n\targs := &StreamingArgs{3, 5, -1, -1}\n\trowChan := make(chan *StreamingReply, 10)\n\tc := client.StreamGo(\"StreamingArith.Thrive\", args, rowChan)\n\n\tcount := 0\n\tfor row := range rowChan {\n\t\tif row.Index != count {\n\t\t\tt.Fatal(\"unexpected value:\", row.Index)\n\t\t}\n\t\tcount++\n\n\t\t// log.Println(\"Values: \", row.C, row.Index)\n\t}\n\n\tif c.Error != nil {\n\t\tt.Fatal(\"unexpected error:\", c.Error.Error())\n\t}\n\n\tif count != 5 {\n\t\tt.Fatal(\"Didn't receive the right number of packets back:\", count)\n\t}\n}", "func (db *DB) Query(query string, args ...interface{}) (rows *sql.Rows, err error) {\n\tidx, readReplica := db.readReplicaRR()\n\trows, err = readReplica.Query(query, args...)\n\t// If it is a connection issue with the target, then try another endpoint.\n\t// Currently, this is an overkill. Ideally, catch all errors related to network.\n\tif err != nil {\n\t\tif err := checkBeat(readReplica); err != nil {\n\t\t\t// proactively quarantine the down readReplica\n\t\t\tgo db.quarantineReadReplica(idx)\n\t\t\t_, readReplica := db.readReplicaRR()\n\t\t\treturn readReplica.Query(query, args...)\n\t\t}\n\t}\n\n\t// if the error is not related to network issue,\n\t// return the original error\n\treturn\n}", "func (p *Proxy) respondTCP(d *DNSContext) error {\n\tresp := d.Res\n\tconn := d.Conn\n\n\tbytes, err := resp.Pack()\n\tif err != nil {\n\t\treturn errorx.Decorate(err, \"couldn't convert message into wire format: %s\", resp.String())\n\t}\n\n\terr = proxyutil.WritePrefixed(bytes, conn)\n\n\tif proxyutil.IsConnClosed(err) {\n\t\treturn err\n\t}\n\tif err != nil {\n\t\treturn errorx.Decorate(err, \"conn.Write() returned error\")\n\t}\n\n\treturn nil\n}", "func connect(num int, serverHost string, serverPort int, result *chan resultConn) {\n\tfmt.Println(\"Start connection \", num)\n\tstart := time.Now()\n\tres := resultConn{}\n\tvar msgOut []byte\n\tconn, err := net.Dial(transport, fmt.Sprintf(\"%s:%d\", serverHost, serverPort))\n\tif err != nil {\n\t\t// handle error\n\t\tfmt.Println(err)\n\t} else {\n\t\treader := bufio.NewReader(conn)\n\t\tbuf := make([]byte, 1024)\n\t\tfor msgIndex := 0; msgIndex < messageCount; msgIndex++ {\n\t\t\tmsgOut = randomProtoMsg()\n\t\t\tconn.Write(msgOut)\n\t\t\tres.byteOut += len(msgOut)\n\t\t\tanswerSize, err := reader.Read(buf)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t} else {\n\t\t\t\tmsg := buf[0:answerSize]\n\t\t\t\tif showMsg {\n\t\t\t\t\tfmt.Printf(\n\t\t\t\t\t\t\"msg out: %s\\nmsg in: %s\\n\",\n\t\t\t\t\t\tstring(msgOut), string(msg))\n\t\t\t\t}\n\t\t\t\tres.byteIn += answerSize\n\t\t\t}\n\t\t}\n\t}\n\tres.time = float32(time.Since(start)) / 1E9\n\tfmt.Println(\"End connection\", num, \"time\", res.time)\n\t(*result) <- res\n}", "func WaitForTCP(ctx context.Context, rAddr string) error {\n\tdialer := net.Dialer{}\n\tconn, err := dialer.DialContext(ctx, \"tcp\", rAddr)\n\t//For loop to get around OS Dial Timeout\n\tfor err != nil {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn ctx.Err()\n\t\tdefault:\n\t\t}\n\t\tconn, err = dialer.DialContext(ctx, \"tcp\", rAddr)\n\t}\n\tconn.Close()\n\treturn nil\n}", "func (mc *mysqlConn) Ping() (e error) {\n\t// Send command\n\te = mc.writeCommandPacket(COM_PING)\n\tif e != nil {\n\t\treturn\n\t}\n\n\t// Read Result\n\te = mc.readResultOK()\n\treturn\n}", "func (ts *testSystem) serverTryRead(size int, expectedData []byte) {\n\tvar q struct{}\n\tts.t.Logf(\"server starts to read...\")\n\t_, data, err := ts.server.Read()\n\tif err != nil {\n\t\tts.t.Fatalf(\"Server received error during read.\")\n\t\treturn\n\t}\n\n\tswitch size {\n\tcase SHORT:\n\t\t//fmt.Printf(\"WRONG!! Server received short message: %s\\n\", data)\n\t\tfmt.Printf(\"expected data: %s, size: %d\\n\", expectedData, size)\n\t\tts.t.Fatalf(\"Server received short message: %s\\n\", data)\n\t\treturn\n\tcase LONG:\n\t\tts.exitChan <- q\n\t\tif len(data) != len(expectedData) {\n\t\t\tts.t.Fatalf(\"Expecting data %s, server received longer message: %s\",\n\t\t\t\texpectedData, data)\n\t\t}\n\t\treturn\n\tcase NORMAL:\n\t\tts.exitChan <- q\n\t\tif !bytes.Equal(data, expectedData) {\n\t\t\tts.t.Fatalf(\"Expecting %s, server received message: %s\",\n\t\t\t\texpectedData, data)\n\t\t}\n\t\treturn\n\t}\n}", "func KeepConnection(err Error) bool {\n\t// Do not keep connection on client errors.\n\tif err.resultCode() < 0 {\n\t\treturn false\n\t}\n\n\treturn !err.Matches(types.QUERY_TERMINATED,\n\t\ttypes.SCAN_ABORT,\n\t\ttypes.QUERY_ABORTED,\n\t\ttypes.TIMEOUT)\n}", "func TestConnQueryCloseEarly(t *testing.T) {\n\tt.Parallel()\n\n\tconn := mustConnectString(t, os.Getenv(\"PGX_TEST_DATABASE\"))\n\tdefer closeConn(t, conn)\n\n\t// Immediately close query without reading any rows\n\trows, err := conn.Query(context.Background(), \"select generate_series(1,$1)\", 10)\n\tif err != nil {\n\t\tt.Fatalf(\"conn.Query failed: %v\", err)\n\t}\n\trows.Close()\n\n\tensureConnValid(t, conn)\n\n\t// Read partial response then close\n\trows, err = conn.Query(context.Background(), \"select generate_series(1,$1)\", 10)\n\tif err != nil {\n\t\tt.Fatalf(\"conn.Query failed: %v\", err)\n\t}\n\n\tok := rows.Next()\n\tif !ok {\n\t\tt.Fatal(\"rows.Next terminated early\")\n\t}\n\n\tvar n int32\n\trows.Scan(&n)\n\tif n != 1 {\n\t\tt.Fatalf(\"Expected 1 from first row, but got %v\", n)\n\t}\n\n\trows.Close()\n\n\tensureConnValid(t, conn)\n}", "func fetchData(lockChan chan bool) {\n tcpAddr, _ := net.ResolveTCPAddr(\"tcp4\", \"localhost:1201\")\n conn, _ := net.DialTCP(\"tcp\", nil, tcpAddr)\n\n // Write the address to the stream\n conn.Write([]byte(\"mfgiXnSzJF6mb37FDorWJeeqeP3tFTERpo\"))\n var buf[255]byte\n\n // Read back the response :-)\n n, err := conn.Read(buf[0:])\n if err!=nil {\n fmt.Println(\"Error reading data\")\n return\n }\n\n fmt.Printf(\"Received: %s\\n\", string(buf[:(n-5)]))\n\n // 'Unlock' the lock chan (thread join)\n lockChan<-true\n\n conn.Close()\n}", "func TestReAttachTCP(t *testing.T) {\n\ttestAttachNTimes(t, 3)\n}", "func (s *Server) verifyTransaction(tx *Transaction) bool {\n\ttime.Sleep(txDelay)\n\treturn true\n}", "func (mc *mysqlConn) exec(query string) (e error) {\n\t// Send command\n\te = mc.writeCommandPacket(COM_QUERY, query)\n\tif e != nil {\n\t\treturn\n\t}\n\n\t// Read Result\n\tresLen, e := mc.readResultSetHeaderPacket()\n\tif e != nil {\n\t\treturn\n\t}\n\n\tmc.affectedRows = 0\n\tmc.insertId = 0\n\n\tif resLen > 0 {\n\t\t_, e = mc.readUntilEOF()\n\t\tif e != nil {\n\t\t\treturn\n\t\t}\n\n\t\tmc.affectedRows, e = mc.readUntilEOF()\n\t\tif e != nil {\n\t\t\treturn\n\t\t}\n\t}\n\n\treturn\n}", "func (b *BoltTestSuite) connectionTest(conn connection.IConnection, mode bolt_mode.AccessMode, db, testFrom string) {\n\tb.Require().NotNil(conn)\n\t// test basic query\n\tall, m, err := conn.QueryWithDb(\"return 1;\", nil, db)\n\tb.Require().Nil(err)\n\tb.Require().NotNil(all)\n\tb.Require().NotNil(m)\n\tb.Require().Len(all, 1)\n\tb.Require().Len(all[0], 1)\n\tb.Require().Equal([][]interface{}{{\n\t\tint64(1),\n\t}}[0][0], all[0][0])\n\n\t// test basic exec\n\tres, err := conn.ExecWithDb(\"create (t:TestNode{id:$id}) return t\", map[string]interface{}{\n\t\t\"id\": testFrom,\n\t}, db)\n\tif mode == bolt_mode.WriteMode {\n\t\t// test behavior if its allowed to do writes\n\t\tb.Require().Nil(err)\n\t\tb.Require().NotNil(res)\n\t\tnodesCr, ok := res.GetNodesCreated()\n\t\tb.Require().True(ok)\n\t\tb.Require().Equal(int64(1), nodesCr)\n\t} else {\n\t\t// test behavior if not allowed to do writes\n\t\tb.Require().NotNil(err)\n\t\tb.Require().Nil(res)\n\t}\n\n\tqid := fmt.Sprintf(\"%s-%v\", testFrom, 1)\n\n\t// test create query\n\tif mode == bolt_mode.WriteMode {\n\t\tdata, _, err := conn.QueryWithDb(\"create (:TestNode{id:$id})\", map[string]interface{}{\n\t\t\t\"id\": qid,\n\t\t}, db)\n\t\tb.Require().Nil(err)\n\t\tb.Require().NotNil(data)\n\t} else {\n\t\t// test behavior if not allowed to do writes\n\t\trows, _, err := conn.QueryWithDb(\"create (:TestNode{id:$id})\", map[string]interface{}{\n\t\t\t\"id\": qid,\n\t\t}, db)\n\t\tb.Require().NotNil(err)\n\t\tb.Require().Nil(rows)\n\t}\n\n\t// after this point we can quit if its a readonly connection\n\tif mode == bolt_mode.ReadMode {\n\t\treturn\n\t}\n\n\t// test delete exec\n\tres, err = conn.ExecWithDb(\"match (t:TestNode{id:$id}) delete t\", map[string]interface{}{\n\t\t\"id\": testFrom,\n\t}, db)\n\tb.Require().Nil(err)\n\tb.Require().NotNil(res)\n\tnodesCr, ok := res.GetNodesDeleted()\n\tb.Require().True(ok)\n\tb.Require().Equal(int64(1), nodesCr)\n\n\t// test query\n\tdata, _, err := conn.QueryWithDb(\"match (n) where n.id=$id return n\", map[string]interface{}{\n\t\t\"id\": qid,\n\t}, db)\n\tb.Require().Nil(err)\n\tb.Require().NotNil(data)\n\n\t// test delete query\n\tdata, _, err = conn.QueryWithDb(\"match (t:TestNode{id:$id}) delete t\", map[string]interface{}{\n\t\t\"id\": qid,\n\t}, db)\n\tb.Require().Nil(err)\n\tb.Require().NotNil(data)\n\n\t// setup index stuff\n\tvar indexCreateQuery, indexDeleteQuery string\n\n\tif b.protocolVersion == 4 {\n\t\tindexCreateQuery = fmt.Sprintf(createIndexV4Query, testFrom+\"_index\")\n\t\tindexDeleteQuery = fmt.Sprintf(dropIndexV4Query, testFrom+\"_index\")\n\t} else {\n\t\tindexCreateQuery = createIndexV1t3Query\n\t\tindexDeleteQuery = dropIndexV1t3Query\n\t}\n\n\t// test create index\n\t_, err = conn.ExecWithDb(indexCreateQuery, nil, db)\n\tb.Require().Nil(err)\n\n\t_, err = conn.ExecWithDb(indexDeleteQuery, nil, db)\n\tb.Require().Nil(err)\n\n\t// test create/read in tx\n\ttx, err := conn.BeginWithDatabase(db)\n\tb.Require().Nil(err)\n\tb.Require().NotNil(tx)\n\n\tqid = fmt.Sprintf(\"%s-%v\", testFrom, 2)\n\n\t// test create query\n\tres, err = tx.ExecWithDb(\"create (:TestNode{id:$id})\", map[string]interface{}{\n\t\t\"id\": qid,\n\t}, db)\n\tb.Require().Nil(err)\n\tb.Require().NotNil(res)\n\tnodesCr, ok = res.GetNodesCreated()\n\tb.Require().True(ok)\n\tb.Require().Equal(int64(1), nodesCr)\n\n\tdata, _, err = tx.QueryWithDb(\"match (n) where n.id=$id return n\", map[string]interface{}{\n\t\t\"id\": qid,\n\t}, db)\n\tb.Require().Nil(err)\n\tb.Require().NotNil(data)\n\n\tb.Require().Nil(tx.Commit())\n\n\t// test rollback\n\ttx, err = conn.BeginWithDatabase(db)\n\tb.Require().Nil(err)\n\tb.Require().NotNil(tx)\n\n\tres, err = tx.ExecWithDb(\"create (:TestNode{id:$id})\", map[string]interface{}{\n\t\t\"id\": qid,\n\t}, db)\n\tb.Require().Nil(err)\n\tb.Require().NotNil(res)\n\tnodesCr, ok = res.GetNodesCreated()\n\tb.Require().True(ok)\n\tb.Require().Equal(int64(1), nodesCr)\n\n\tb.Require().Nil(tx.Rollback())\n}", "func (s *Server) isAlive(c net.Conn) bool {\n\tone := make([]byte, 1)\n\tif err := c.SetReadDeadline(time.Now().Add(time.Millisecond * 500)); err != nil {\n\t\tlog.Println(err)\n\n\t\treturn false\n\t}\n\n\t// client doesn't send anything, so it's fine to Read() instead of Peek()\n\tif _, err := c.Read(one); err == io.EOF {\n\t\treturn false\n\t}\n\n\treturn true\n}", "func handleConnection_server(c net.Conn) {\r\n\tfmt.Printf(\"Serving %s\\n\", c.RemoteAddr().String())\r\n\r\n\tfor {\r\n\r\n\t\t// splitting head => g++ parts => rest of the command\r\n\t\treader := bufio.NewReader(c)\r\n\t\tnetData, err := reader.ReadString('\\n')\r\n\t\tif err != nil {\r\n\t\t\tbreak\r\n\t\t}\r\n\r\n\t\tparts := processcmd(netData)\r\n\t\thead := parts[0]\r\n\t\tparts = parts[1:len(parts)]\r\n\r\n\t\t//if we are perfoming a unit test\r\n\t\t//generate the log file\r\n\t\tif netData == \"TEST\\n\" {\r\n\t\t\tcmd := exec.Command(\"sh\", \"randomFileGenerator.sh\")\r\n\t\t\tcmd.Start()\r\n\t\t\tcontinue\r\n\t\t}\r\n\r\n\t\tcmd := exec.Command(head, parts...)\r\n\r\n\t\tout, _ := cmd.CombinedOutput()\r\n\t\t//send size of grep output\r\n\t\tvar buf [8]byte\r\n\t\tbinary.BigEndian.PutUint64(buf[:], uint64(len(string(out))))\r\n\r\n\t\t_, err = c.Write(buf[:])\r\n\t\tif err != nil {\r\n\t\t\tfmt.Println(\"err:\", err)\r\n\t\t\tbreak\r\n\t\t}\r\n\t\tack, _ := reader.ReadString('\\n')\r\n\t\tvar count int\r\n\t\tcount = 0\r\n\t\tfmt.Printf(ack)\r\n\t\t//wait for ack from client\r\n\t\tfor ack != \"ACK\\n\" {\r\n\t\t\tfmt.Printf(\"Test1\")\r\n\t\t\tfor count < len(ack) {\r\n\t\t\t\tfmt.Println(ack[count])\r\n\t\t\t\tcount = count + 1\r\n\t\t\t}\r\n\t\t\tack, _ := reader.ReadString('\\n')\r\n\t\t\tif ack == \"ACK\\n\" {\r\n\r\n\t\t\t\tbreak\r\n\t\t\t}\r\n\t\t}\r\n\t\t//send the output of the command back to the client\r\n\t\tc.Write([]byte(string(string(out))))\r\n\r\n\t}\r\n\tc.Close()\r\n\r\n}", "func TestDbConnection(username, password, hostname, database, instance, port string) (string, error) {\n\tquery := url.Values{}\n\tquery.Add(\"database\", database)\n\tquery.Add(\"port\", port)\n\n\tu := &url.URL{\n\t\tScheme: \"sqlserver\",\n\t\tUser: url.UserPassword(username, password),\n\t\tHost: hostname,\n\t\tRawQuery: query.Encode(),\n\t}\n\tif instance != \"\" {\n\t\tu.Path = instance\n\t}\n\n\tcondb, errdb := sql.Open(\"mssql\", u.String())\n\tif errdb != nil {\n\t\treturn \"\", errdb\n\t}\n\n\tdefer condb.Close()\n\n\tvar sqlversion string\n\n\trows, err := condb.Query(\"select @@version\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tfor rows.Next() {\n\t\terr := rows.Scan(&sqlversion)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t\treturn \"\", err\n\t\t}\n\t\treturn sqlversion, nil\n\t}\n\treturn \"\", errors.New(\"No rows returned\")\n}" ]
[ "0.61619914", "0.61236", "0.6005466", "0.5846167", "0.58428293", "0.58305943", "0.57885176", "0.5739293", "0.56987995", "0.5646071", "0.5617517", "0.5611779", "0.56109595", "0.55873877", "0.55506074", "0.55271316", "0.5510483", "0.5507143", "0.5506634", "0.54904956", "0.5456948", "0.5445137", "0.5440545", "0.54363406", "0.5408962", "0.53786695", "0.5375556", "0.53690326", "0.5357434", "0.53512555", "0.5339146", "0.53283286", "0.532097", "0.5317817", "0.5307413", "0.53016037", "0.5264801", "0.5258524", "0.5257243", "0.52461326", "0.5237273", "0.52371645", "0.5235814", "0.52216333", "0.52164614", "0.5197007", "0.51816183", "0.5162302", "0.51610696", "0.5153408", "0.5153408", "0.5146682", "0.5144931", "0.5137112", "0.5126486", "0.5120293", "0.5117325", "0.5116365", "0.51138616", "0.51124996", "0.51048815", "0.5088728", "0.5085309", "0.5081307", "0.50798494", "0.5070626", "0.50689596", "0.50633", "0.50620276", "0.50570834", "0.50452465", "0.5044614", "0.5042548", "0.5039704", "0.50371313", "0.5036348", "0.50350213", "0.5029745", "0.50295687", "0.5027142", "0.50240064", "0.5019997", "0.5017856", "0.5012936", "0.5012851", "0.50124055", "0.50109375", "0.50106037", "0.5008644", "0.5006511", "0.5005758", "0.50049996", "0.50049376", "0.50003165", "0.49983937", "0.49974152", "0.49952862", "0.49924225", "0.49913236", "0.49887645" ]
0.5318429
33
Sends a TCP query that results in failure. When a query fails, Accept should close the TCP socket.
func TestAcceptFail(t *testing.T) { doh := newFakeTransport() client, server := makePair() // Start the forwarder running. go Accept(doh, server) lbuf := make([]byte, 2) // Send Query queryData := simpleQueryBytes binary.BigEndian.PutUint16(lbuf, uint16(len(queryData))) client.Write(lbuf) client.Write(queryData) // Indicate that the query failed doh.err = errors.New("fake error") // Read query queryRead := <-doh.query if !bytes.Equal(queryRead, queryData) { t.Error("Query mismatch") } // Accept should have closed the socket. n, _ := client.Read(lbuf) if n != 0 { t.Error("Expected to read 0 bytes") } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func WaitForTCP(ctx context.Context, rAddr string) error {\n\tdialer := net.Dialer{}\n\tconn, err := dialer.DialContext(ctx, \"tcp\", rAddr)\n\t//For loop to get around OS Dial Timeout\n\tfor err != nil {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn ctx.Err()\n\t\tdefault:\n\t\t}\n\t\tconn, err = dialer.DialContext(ctx, \"tcp\", rAddr)\n\t}\n\tconn.Close()\n\treturn nil\n}", "func TestSendFailed(t *testing.T) {\n\tdoh, _ := NewTransport(testURL, ips, nil, nil, nil)\n\ttransport := doh.(*transport)\n\trt := makeTestRoundTripper()\n\ttransport.client.Transport = rt\n\n\trt.err = errors.New(\"test\")\n\t_, err := doh.Query(simpleQueryBytes)\n\tvar qerr *queryError\n\tif err == nil {\n\t\tt.Error(\"Send failure should be reported\")\n\t} else if !errors.As(err, &qerr) {\n\t\tt.Errorf(\"Wrong error type: %v\", err)\n\t} else if qerr.status != SendFailed {\n\t\tt.Errorf(\"Wrong error status: %d\", qerr.status)\n\t} else if !errors.Is(qerr, rt.err) {\n\t\tt.Errorf(\"Underlying error is not retained\")\n\t}\n}", "func listenTCP() {\n\tfor {\n\t\tconn, err := tcpListener.Accept()\n\t\tif err != nil {\n\t\t\tif netErr, ok := err.(net.Error); ok && netErr.Temporary() {\n\t\t\t\tlog.Printf(\"Temporary error while accepting connection: %s\", netErr)\n\t\t\t}\n\n\t\t\tlog.Fatalf(\"Unrecoverable error while accepting connection: %s\", err)\n\t\t\treturn\n\t\t}\n\n\t\tgo handleTCPConn(conn)\n\t}\n}", "func FailOnConn(t *testing.T, addr string) net.Listener {\n\tt.Helper()\n\n\ttarpit, err := net.Listen(\"tcp\", addr)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tgo func() {\n\t\tt.Helper()\n\n\t\t_, err := tarpit.Accept()\n\t\tif err == nil {\n\t\t\tt.Error(\"No connection expected\")\n\t\t}\n\t}()\n\treturn tarpit\n}", "func handle_conn_err(err error) {\n if err == io.EOF {\n fmt.Println(\"Connection went away\")\n } else if neterr, ok := err.(net.Error); ok && neterr.Timeout() {\n fmt.Println(\"Connection Timeout\")\n } else if operr, ok := err.(*net.OpError); ok {\n if operr.Op == \"dial\" {\n fmt.Println(\"Couldn't reach host\")\n } else if operr.Op == \"read\" {\n fmt.Println(\"Can't read closed connection\")\n } else {\n fmt.Printf(\"Failed to perform op: '%s'\\n\", operr.Op)\n }\n }\n}", "func waitTCP(addr string) {\n\tlog.Printf(\"Waiting for TCP to be available at %s\", addr)\n\t// Try once a second to connect\n\tfor startTime := time.Now(); time.Since(startTime) < 10*time.Second; time.Sleep(time.Second) {\n\t\tconn, err := net.DialTimeout(\"tcp\", addr, time.Second)\n\n\t\tif err == nil {\n\t\t\t// Connection successful\n\t\t\tlog.Printf(\"TCP came up on %s\", addr)\n\t\t\tcloseErr := conn.Close()\n\t\t\tif closeErr != nil {\n\t\t\t\tlog.Printf(\"Error closing TCP connection in waitTCP: %s\", closeErr)\n\t\t\t}\n\n\t\t\treturn\n\t\t}\n\n\t\tlog.Printf(\"Tried to connect to %s, got error: %s. Will retry in 1 second.\", addr, err)\n\t}\n\n\t// Timed out\n\tpanic(fmt.Sprintf(\"Timeout out waiting for service to start on %s\", addr))\n}", "func TestAcceptClose(t *testing.T) {\n\tdoh := newFakeTransport()\n\tclient, server := makePair()\n\n\t// Start the forwarder running.\n\tgo Accept(doh, server)\n\n\tlbuf := make([]byte, 2)\n\t// Send Query\n\tqueryData := simpleQueryBytes\n\tbinary.BigEndian.PutUint16(lbuf, uint16(len(queryData)))\n\tclient.Write(lbuf)\n\tclient.Write(queryData)\n\n\t// Read query\n\tqueryRead := <-doh.query\n\tif !bytes.Equal(queryRead, queryData) {\n\t\tt.Error(\"Query mismatch\")\n\t}\n\n\t// Close the TCP connection\n\tclient.Close()\n\n\t// Send fake response too late.\n\tresponseData := []byte{1, 2, 8, 9, 10}\n\tdoh.response <- responseData\n}", "func waitTCPDown(addr string) {\n\tlog.Printf(\"Waiting for TCP to be down at %s\", addr)\n\t// Try once a second to connect\n\tfor startTime := time.Now(); time.Since(startTime) < 10*time.Second; time.Sleep(time.Second) {\n\t\tconn, err := net.DialTimeout(\"tcp\", addr, time.Second)\n\n\t\tif err != nil {\n\t\t\t// Connection failed\n\t\t\tlog.Printf(\"TCP went down on %s\", addr)\n\t\t\treturn\n\t\t}\n\n\t\tcloseErr := conn.Close()\n\t\tif closeErr != nil {\n\t\t\tlog.Printf(\"Error closing TCP connection in waitTCP: %s\", closeErr)\n\t\t}\n\n\t\tlog.Printf(\"Tried to connect to %s, was successful. Will retry in 1 second.\", addr)\n\t}\n\n\t// Timed out\n\tpanic(fmt.Sprintf(\"Timeout out waiting for service to stop on %s\", addr))\n}", "func ErrNetClosing() error {\n\tif errNetClosing == nil {\n\t\tvar dummyServer *net.TCPListener\n\t\tif addr, err := net.ResolveTCPAddr(\"tcp\", \"127.0.0.1:0\"); err != nil {\n\t\t\treturn nil\n\t\t} else if dummyServer, err = net.ListenTCP(\"tcp\", addr); err != nil {\n\t\t\treturn nil\n\t\t} else {\n\t\t\t// noinspection GoUnhandledErrorResult\n\t\t\tdefer dummyServer.Close()\n\t\t}\n\t\tif dummyClient, err := net.DialTCP(\"tcp\", nil, dummyServer.Addr().(*net.TCPAddr)); err != nil {\n\t\t\treturn nil\n\t\t} else {\n\t\t\t// noinspection GoUnhandledErrorResult\n\t\t\tdummyClient.Close()\n\t\t\t_, err = dummyClient.Write(dummyMessage)\n\t\t\tif opError, ok := err.(*net.OpError); ok {\n\t\t\t\terrNetClosing = opError.Err\n\t\t\t}\n\t\t}\n\t}\n\treturn errNetClosing\n}", "func (c *Conn) Reject() {\n\tswitch c.curcmd {\n\tcase HELO, EHLO:\n\t\tc.reply(ReplyRejected)\n\tcase MAILFROM, RCPTTO:\n\t\tc.reply(ReplyBadAddr)\n\tcase AUTH:\n\t\tc.authDone(false)\n\t\tc.reply(ReplyInvalidAuth)\n\tcase DATA:\n\t\tc.reply(ReplyRejected)\n\t}\n\tc.replied = true\n}", "func (cp *singleConnectionPool) OnFailure(c *Connection) error { return nil }", "func SendTCP(address, msg string) (string, error) {\n\tconn, err := net.Dial(\"tcp\", address)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif err := conn.SetReadDeadline(time.Now().Add(30 * time.Second)); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tdefer func() {\n\t\tif err := conn.Close(); err != nil {\n\t\t\tlogrus.Warn(\"Could not close TCP connection\")\n\t\t}\n\t}()\n\n\t// writes to the tcp connection\n\tfmt.Fprintf(conn, msg+\"\\n\")\n\n\tresponse, err := bufio.NewReader(conn).ReadString('\\n')\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn response, nil\n}", "func WaitSuccessfulDial(address string) error {\n\tctx, cancel := context.WithTimeout(context.Background(), waitDur)\n\tvar lastErr error\n\tdefer cancel()\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn multierr.Combine(ctx.Err(), lastErr)\n\t\tdefault:\n\t\t}\n\t\tvar conn net.Conn\n\t\tconn, lastErr = net.Dial(\"tcp\", address)\n\t\tif lastErr == nil {\n\t\t\treturn conn.Close()\n\t\t}\n\t\tlastErr = errors.Wrap(lastErr, 0)\n\t}\n}", "func echo(conn net.Conn){\n defer conn.Close()\n bData, _ := recvDataB(conn)\n sendDataB(conn, bData, OK_CODE)\n}", "func (c *Client) writeQuery(conn net.Conn, query []byte) error {\n\tvar err error\n\n\tif c.Timeout > 0 {\n\t\t_ = conn.SetWriteDeadline(time.Now().Add(c.Timeout))\n\t}\n\n\t// Write to the connection\n\tif _, ok := conn.(*net.TCPConn); ok {\n\t\tl := make([]byte, 2)\n\t\tbinary.BigEndian.PutUint16(l, uint16(len(query)))\n\t\t_, err = (&net.Buffers{l, query}).WriteTo(conn)\n\t} else {\n\t\t_, err = conn.Write(query)\n\t}\n\n\treturn err\n}", "func (l *MockListener) AcceptTCP() (ConnI, error) {\n\treturn <-l.connCh, nil\n}", "func serve(port int, handler connectionhandler) {\n \n if port < 1024 || port > 65535 {\n // todo: how does go handle errors.\n }\n\n portspec := fmt.Sprintf(\":%d\", port)\n\n sock, err := net.Listen(\"tcp\", portspec)\n if err != nil {\n // error\n fmt.Printf(\"%d\", err)\n }\n\n for {\n conn, err := sock.Accept()\n if err != nil {\n fmt.Printf(\"%d\", err) \n }\n go handler(conn) \n }\n}", "func (h *Handler) SendQuery(ctx context.Context, ip net.IP) (err error) {\n\n\tpacket := nodeStatusRequestWireFormat(`* `)\n\t// packet.printHeader()\n\n\tif ip == nil || ip.Equal(net.IPv4zero) {\n\t\treturn fmt.Errorf(\"invalid IP=%v\", ip)\n\t}\n\t// ip[3] = 255 // Network broadcast\n\n\t// To broadcast, use network broadcast i.e 192.168.0.255 for example.\n\ttargetAddr := &net.UDPAddr{IP: ip, Port: 137}\n\tif _, err = h.conn.WriteToUDP(packet, targetAddr); err != nil {\n\t\tif ctx.Err() == nil { // not cancelled\n\t\t\treturn fmt.Errorf(\"nbns failed to send packet: %w\", err)\n\t\t}\n\t}\n\treturn nil\n}", "func TestBadTCPClientConnect(t *testing.T) {\n\tclient, err := clients.New(\"321321321\", \"\")\n\tif err == nil {\n\t\tt.Fatalf(\"Client succeeded to connect\")\n\t}\n\tclient, err = clients.New(testAddr, \"hil\")\n\tif err != nil {\n\t\tt.Fatalf(\"Client failed to connect - %s\", err.Error())\n\t\tt.FailNow()\n\t}\n\tdefer client.Close()\n\n\tif err := client.Ping(); err != nil {\n\t\tt.Fatalf(\"ping failed\")\n\t}\n\tif msg := <-client.Messages(); msg.Data != \"pong\" {\n\t\tt.Fatalf(\"Unexpected data: Expecting 'pong' got %s\", msg.Data)\n\t}\n\tclient.Ping()\n}", "func (o *PluginDnsClient) Query(queries []utils.DnsQueryParams, socket transport.SocketApi) error {\n\tif o.IsNameServer() {\n\t\treturn fmt.Errorf(\"Querying is not permitted for Dns Name Servers!\")\n\t}\n\tquestions, err := utils.BuildQuestions(queries)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(questions) > 0 {\n\t\tdata := o.dnsPktBuilder.BuildQueryPkt(questions, o.Tctx.Simulation)\n\t\tif socket == nil {\n\t\t\treturn fmt.Errorf(\"Invalid Socket in Query!\")\n\t\t}\n\t\ttransportErr, _ := o.socket.Write(data)\n\t\tif transportErr != transport.SeOK {\n\t\t\to.stats.socketWriteError++\n\t\t\treturn transportErr.Error()\n\t\t}\n\t\to.stats.pktTxDnsQuery++ // successfully sent query\n\t\to.stats.txBytes += uint64(len(data)) // number of bytes sent\n\t}\n\treturn nil\n}", "func (my *MySQL) Ping() (err os.Error) {\n defer my.unlock()\n defer catchOsError(&err)\n my.lock()\n\n if my.conn == nil {\n return NOT_CONN_ERROR\n }\n if my.unreaded_rows {\n return UNREADED_ROWS_ERROR\n }\n\n // Send command\n my.sendCmd(_COM_PING)\n // Get server response\n my.getResult(nil)\n\n return\n}", "func (e errorConnection) Write(b []byte) (n int, err error) {\n\treturn 0, e.err\n}", "func (ec *ErrConnection) QueryInt(query string, args ...interface{}) int {\n\tvar i int\n\tif ec.Err != nil {\n\t\treturn i\n\t}\n\ti, err := QueryInt(ec.Conn, query, args...)\n\tif err != nil {\n\t\tec.Err = err\n\t}\n\treturn i\n}", "func Send(ctx context.Context, address string) (err error) {\n\t// Resolve the UDP address so that we can make use of DialUDP\n\t// with an actual IP and port instead of a name (in case a\n\t// hostname is specified).\n\traddr, err := net.ResolveUDPAddr(\"udp\", address)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t// Although we're not in a connection-oriented transport,\n\t// the act of `dialing` is analogous to the act of performing\n\t// a `connect(2)` syscall for a socket of type SOCK_DGRAM:\n\t// - it forces the underlying socket to only read and write\n\t// to and from a specific remote address.\n\tconn, err := net.DialUDP(\"udp\", nil, raddr)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t// Closes the underlying file descriptor associated with the,\n\t// socket so that it no longer refers to any file.\n\tdefer conn.Close()\n\n\tdoneChan := make(chan error, 1)\n\n\tgo func() {\n\t\t// It is possible that this action blocks, although this\n\t\t// should only occur in very resource-intensive situations:\n\t\t// - when you've filled up the socket buffer and the OS\n\t\t// can't dequeue the queue fast enough.\n\t\t//n, err := io.Copy(conn, reader)\n\t\t//n, err := fmt.Fprintf(conn, \"WSD\")\n\t\tn, err := fmt.Fprintf(conn,\n\t\t\t\"FC1307\"+string(0x1)+string(4)+string(0)+string(0)+string(0)+string(0)+string(0)+string(1)+string(5)+string(5)+\"adminxxxxxxxxxxxadminxxxxxxxxxxx\"+string(0)+string(0)+string(0)+string(1),\n\t\t)\n\t\tif err != nil {\n\t\t\tdoneChan <- err\n\t\t\treturn\n\t\t}\n\n\t\tfmt.Printf(\"%d bytes sent\\n\", n)\n\n\t\tbuffer := make([]byte, maxBufferSize)\n\n\t\t// Set a deadline for the ReadOperation so that we don't\n\t\t// wait forever for a server that might not respond on\n\t\t// a resonable amount of time.\n\t\tdeadline := time.Now().Add(15 * time.Second)\n\t\terr = conn.SetReadDeadline(deadline)\n\t\tif err != nil {\n\t\t\tdoneChan <- err\n\t\t\treturn\n\t\t}\n\n\t\tnRead, _, err := conn.ReadFrom(buffer)\n\t\tif err != nil {\n\t\t\tdoneChan <- err\n\t\t\treturn\n\t\t}\n\n\t\tfmt.Printf(\"bytes=%d received\\n\", nRead)\n\t\tfmt.Println(hex.EncodeToString(buffer[0:nRead]))\n\n\t\tdoneChan <- nil\n\t}()\n\tselect {\n\tcase <-ctx.Done():\n\t\tfmt.Println(\"cancelled\")\n\t\terr = ctx.Err()\n\tcase err = <-doneChan:\n\t}\n\n\treturn\n}", "func RPCAccept(ln net.Listener, server *rpc.Server) error {\n\terrClosing := errors.New(\"use of closed network connection\")\n\tfor {\n\t\tconn, err := ln.Accept()\n\t\tif err != nil {\n\t\t\tif ne, ok := err.(net.Error); ok && ne.Temporary() {\n\t\t\t\tlog.Warningf(\"RPC accept temporary error: %v\", err)\n\t\t\t\ttime.Sleep(1 * time.Second)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif oe, ok := err.(*net.OpError); ok && oe.Err.Error() == errClosing.Error() {\n\t\t\t\tlog.Infoln(\"RPC accept connection closed\")\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tlog.Errorf(\"RPC accept error: %v\", err)\n\t\t\treturn err\n\t\t}\n\t\tgo server.ServeConn(conn)\n\t}\n}", "func TestDialTCP(t *testing.T) {\n\tt.Logf(\"Running DialTCP test to %s:%s\", TEST_HOST, TEST_PORT)\n\tdb, err = DialTCP(TEST_HOST, TEST_USER, TEST_PASSWD, TEST_DBNAME)\n\tif err != nil {\n\t\tt.Logf(\"Error %s\", err)\n\t\tt.Fail()\n\t}\n\terr = db.Close()\n\tif err != nil {\n\t\tt.Logf(\"Error %s\", err)\n\t\tt.Fail()\n\t}\n}", "func SendError(conn net.Conn,name string, msg string){\n\n\tdataMap:=make(map[string]interface{})\n\tdataMap[\"type\"]=\"error\"\n\tdataMap[\"name\"]=name\n\tdataMap[\"msg\"]=msg\n\tSendJSONData(conn,dataMap)\n\n\n}", "func (m *Measurement) DialTCP() *TCPResponse {\n\n\t// Simply check that the server is up and can accept connections.\n\tresult := &TCPResponse{\n\t\tHost: m.Host,\n\t\tPort: m.Port,\n\t\tLatency: 0,\n\t\tTimeout: m.Timeout,\n\t\tSequence: m.count,\n\t}\n\n\taddress := fmt.Sprintf(\"%s:%d\", m.Host, m.Port)\n\tstart := time.Now()\n\n\tconn, err := net.DialTimeout(\"tcp\", address, time.Duration(m.Timeout)*time.Second)\n\tif err != nil {\n\n\t\tm.appendFailed(result)\n\t\treturn result\n\t}\n\tdefer conn.Close()\n\n\tresult.Latency = time.Since(start).Seconds() * 1000\n\n\tm.appendSuccessful(result)\n\treturn result\n}", "func (l *RstListener) Accept() (net.Conn, error) {\n\tconn, err := l.l.Accept()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttcpConn, ok := conn.(*net.TCPConn)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"Expected a TCP connection\")\n\t}\n\n\terr = tcpConn.SetLinger(0)\n\tif err != nil {\n\t\tfmt.Printf(\"get error before sending rst: %v\", err)\n\t}\n\treturn conn, err\n}", "func TCPDialCheck(addr string, timeout time.Duration) Check {\n\treturn func() error {\n\t\tconn, err := net.DialTimeout(\"tcp\", addr, timeout)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn conn.Close()\n\t}\n}", "func TCPDialCheck(addr string, timeout time.Duration) Check {\n\treturn func() error {\n\t\tconn, err := net.DialTimeout(\"tcp\", addr, timeout)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn conn.Close()\n\t}\n}", "func serve(err error) error {\n\tif err == nil {\n\t\treturn nil\n\t}\n\n\tnerr, ok := err.(*net.OpError)\n\tif !ok {\n\t\treturn err\n\t}\n\n\t// Unfortunately there isn't an easier way to check for this, but\n\t// we want to ignore errors related to the connection closing, since\n\t// s.Close is triggered on signal.\n\tif nerr.Err.Error() != \"use of closed network connection\" {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func hostDockerQuery() {\n\tlog.Println(\"hostDockerQuery\")\n\tfor {\n\t\ttime.Sleep(2 * time.Second)\n\t\tc, err := net.Dial(\"unix\", \"/var/run/dockerConnection/hostconnection.sock\")\n\t\tif err != nil {\n\t\t\tcontinue;\n\t\t}\n\t\t// send to socket\n\t\tlog.Println(\"sending request to server\")\n\t\tfmt.Fprintf(c, \"hi\" + \"\\n\")\n\t\t// listen for reply\n\t\tmessage, _ := bufio.NewReader(c).ReadString('\\n')\n\t\t//log.Println(\"Message from server: \" + message)\n\t\tlog.Println(\"Received update from host server\")\n\n\t\t// set this to be the latest response\n\t\tlatestHostServerResponse = message\n\t}\n}", "func dialHost(addr string, port, timeout uint) error {\n\twaitTime := 5 * time.Second\n\tattempts := timeout / uint(waitTime.Seconds())\n\tfullAddr := fmt.Sprintf(\"%s:%d\", addr, port)\n\n\treturn retry.Retry(func(attempt uint) error {\n\t\tconn, err := net.Dial(\"tcp\", fullAddr)\n\t\tif err != nil {\n\t\t\tlog.Warnf(\"attempt [%d] to verify host [%s] is listening failed\", attempt, fullAddr)\n\t\t\treturn err\n\t\t}\n\n\t\treturn conn.Close()\n\t}, strategy.Wait(waitTime), strategy.Limit(attempts))\n}", "func (c Client) SendQuery(message dns.Msg) (dns.Msg, error) {\n\t// Open a new QUIC stream\n\tlog.Debugln(\"opening new quic stream\")\n\tstream, err := c.Session.OpenStream()\n\tif err != nil {\n\t\treturn dns.Msg{}, errors.New(\"quic stream open: \" + err.Error())\n\t}\n\n\t// Pack the DNS message for transmission\n\tlog.Debugln(\"packing dns message\")\n\tpacked, err := message.Pack()\n\tif err != nil {\n\t\t_ = stream.Close()\n\t\treturn dns.Msg{}, errors.New(\"dns message pack: \" + err.Error())\n\t}\n\n\t// Send the DNS query over QUIC\n\tlog.Debugln(\"writing packed format to the stream\")\n\t_, err = stream.Write(packed)\n\t_ = stream.Close()\n\tif err != nil {\n\t\treturn dns.Msg{}, errors.New(\"quic stream write: \" + err.Error())\n\t}\n\n\t// Read the response\n\tlog.Debugln(\"reading server response\")\n\tresponse, err := ioutil.ReadAll(stream)\n\tif err != nil {\n\t\treturn dns.Msg{}, errors.New(\"quic stream read: \" + err.Error())\n\t}\n\n\t// Unpack the DNS message\n\tlog.Debugln(\"unpacking response dns message\")\n\tvar msg dns.Msg\n\terr = msg.Unpack(response)\n\tif err != nil {\n\t\treturn dns.Msg{}, errors.New(\"dns message unpack: \" + err.Error())\n\t}\n\n\treturn msg, nil // nil error\n}", "func handleTCPMsg(ln net.Listener) {\n\tdefer ln.Close()\n\tlog.Printf(\"listen for messages on port 15000\")\n\tfor {\n\t\trw, e := ln.Accept()\n\t\tif e != nil {\n\t\t\tlog.Fatal(e)\n\t\t}\n\t\tgo handleTCPConnection(rw)\n\t}\n}", "func TestSendQErrorPropagation(t *testing.T) {\n\twg := &sync.WaitGroup{}\n\tr, w := io.Pipe()\n\tsq := NewSendQ(w, 100, wg)\n\texpected := errors.New(\"test\")\n\tr.CloseWithError(expected)\n\tsq.Write([]byte(\"this should fail\"))\n\terr := <-sq.ErrChan()\n\tif err != expected {\n\t\tt.Errorf(\"Expected a known error, got %d\", err)\n\t}\n\tif unknown, done := <-sq.ErrChan(); done {\n\t\tt.Errorf(\"Expected error channel to close, but it didn't. Instead, got '%s'\", unknown)\n\t}\n\twg.Wait()\n}", "func TestHTTPError(t *testing.T) {\n\tdoh, _ := NewTransport(testURL, ips, nil, nil, nil)\n\ttransport := doh.(*transport)\n\trt := makeTestRoundTripper()\n\ttransport.client.Transport = rt\n\n\tgo func() {\n\t\t<-rt.req\n\t\tr, w := io.Pipe()\n\t\trt.resp <- &http.Response{\n\t\t\tStatusCode: 500,\n\t\t\tBody: r,\n\t\t\tRequest: &http.Request{URL: parsedURL},\n\t\t}\n\t\tw.Write([]byte{0, 0, 8, 9, 10})\n\t\tw.Close()\n\t}()\n\n\t_, err := doh.Query(simpleQueryBytes)\n\tvar qerr *queryError\n\tif err == nil {\n\t\tt.Error(\"Empty body should cause an error\")\n\t} else if !errors.As(err, &qerr) {\n\t\tt.Errorf(\"Wrong error type: %v\", err)\n\t} else if qerr.status != HTTPError {\n\t\tt.Errorf(\"Wrong error status: %d\", qerr.status)\n\t}\n}", "func TestAcceptOversize(t *testing.T) {\n\tdoh := newFakeTransport()\n\tclient, server := makePair()\n\n\t// Start the forwarder running.\n\tgo Accept(doh, server)\n\n\tlbuf := make([]byte, 2)\n\t// Send Query\n\tqueryData := simpleQueryBytes\n\tbinary.BigEndian.PutUint16(lbuf, uint16(len(queryData)))\n\tclient.Write(lbuf)\n\tclient.Write(queryData)\n\n\t// Read query\n\t<-doh.query\n\n\t// Send oversize response\n\tdoh.response <- make([]byte, 65536)\n\n\t// Accept should have closed the socket because the response\n\t// cannot be written.\n\tn, _ := client.Read(lbuf)\n\tif n != 0 {\n\t\tt.Error(\"Expected to read 0 bytes\")\n\t}\n}", "func (server *TCPServer) Listen() (chan bool, error) {\n\tserver.mutex.Lock()\n\tdefer server.mutex.Unlock()\n\tdone := make(chan bool)\n\t// give errors a buffer\n\terrors := make(chan error, 10)\n\tconnections := make(chan net.Conn)\n\tconsecutiveFailures := 0\n\tlistener, err := server.implementation.GetListener()\n\tif nil != err {\n\t\treturn nil, err\n\t}\n\tserver.Dispatcher.Run()\n\tgo func() {\n\t\tgo server.listen(listener, connections, errors, done)\n\t\tserver.idleTicker = timeutil.NewTicker(server.idleTimeout).Start()\n\t\tfor {\n\t\t\tserver.idleTicker.Reset()\n\t\t\tselect {\n\t\t\tcase timeout := <-server.idleUpdate:\n\t\t\t\tserver.idleTimeout = timeout\n\t\t\t\tserver.idleTicker.SetPeriod(server.idleTimeout)\n\t\t\tcase <-server.idleTicker.Channel():\n\t\t\t\tserver.log.Infof(\"tcp server was idle for %s\", server.idleTimeout)\n\t\t\t\tserver.callOnIdle()\n\t\t\t\tserver.idleTicker.Stop()\n\t\t\tcase <-done:\n\t\t\t\tserver.Dispatcher.Quit(true)\n\t\t\t\tlistener.Close()\n\t\t\t\treturn\n\t\t\tcase conn := <-connections:\n\t\t\t\ttask, err := server.implementation.GetTask(conn)\n\t\t\t\tif nil != err {\n\t\t\t\t\terrors <- err\n\t\t\t\t} else {\n\t\t\t\t\tconsecutiveFailures = 0\n\t\t\t\t\tserver.Dispatch(task)\n\t\t\t\t}\n\t\t\tcase err := <-errors:\n\t\t\t\tserver.log.Errorf(\"error encountered listening %s %#v\", err, err)\n\t\t\t\tconsecutiveFailures++\n\t\t\t\tif consecutiveFailures > server.MaxConsecutiveErrors {\n\t\t\t\t\tserver.log.Infof(\"Maximum consecutive errors threshold exceeded.\")\n\t\t\t\t\tdone <- true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\treturn done, nil\n}", "func TestDisconnect1(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"short\")\n\t}\n\tcheckConnStr(t)\n\tSetLogger(testLogger{t})\n\n\t// Revert to the normal dialer after the test is done.\n\tnormalCreateDialer := createDialer\n\tdefer func() {\n\t\tcreateDialer = normalCreateDialer\n\t}()\n\n\twaitDisrupt := make(chan struct{})\n\tctx, cancel := context.WithTimeout(context.Background(), time.Second*2)\n\tdefer cancel()\n\n\tcreateDialer = func(p *connectParams) dialer {\n\t\tnd := tcpDialer{&net.Dialer{Timeout: p.dial_timeout, KeepAlive: p.keepAlive}}\n\t\tdi := &dialerInterrupt{nd: nd}\n\t\tgo func() {\n\t\t\t<-waitDisrupt\n\t\t\tdi.Interrupt(true)\n\t\t\tdi.Interrupt(false)\n\t\t}()\n\t\treturn di\n\t}\n\tdb, err := sql.Open(\"sqlserver\", makeConnStr(t).String())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif err := db.PingContext(ctx); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer db.Close()\n\n\t_, err = db.ExecContext(ctx, `SET LOCK_TIMEOUT 1800;`)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tgo func() {\n\t\ttime.Sleep(time.Second * 1)\n\t\tclose(waitDisrupt)\n\t}()\n\tt.Log(\"prepare for query\")\n\t_, err = db.ExecContext(ctx, `waitfor delay '00:00:3';`)\n\tif err != nil {\n\t\tt.Log(\"expected error after disconnect\", err)\n\t\treturn\n\t}\n\tt.Fatal(\"wanted error after Exec\")\n}", "func (s *Server) Run() error {\n\tfor {\n\t\tconn, err := s.ln.Accept()\n\t\tif err != nil {\n\t\t\toperr, ok := err.(*net.OpError)\n\t\t\tif !ok {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif operr.Temporary() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t\tgo s.handleConnection(conn)\n\t}\n}", "func TCP(address string) bool {\n\tconn, err := net.DialTimeout(\"tcp\", address, timeout)\n\tif err != nil {\n\t\treturn false\n\t}\n\tconn.Close()\n\treturn true\n}", "func (p *Proxy) respondTCP(d *DNSContext) error {\n\tresp := d.Res\n\tconn := d.Conn\n\n\tbytes, err := resp.Pack()\n\tif err != nil {\n\t\treturn errorx.Decorate(err, \"couldn't convert message into wire format: %s\", resp.String())\n\t}\n\n\terr = proxyutil.WritePrefixed(bytes, conn)\n\n\tif proxyutil.IsConnClosed(err) {\n\t\treturn err\n\t}\n\tif err != nil {\n\t\treturn errorx.Decorate(err, \"conn.Write() returned error\")\n\t}\n\n\treturn nil\n}", "func sendOutgoing(conn net.Conn, message []byte) {\n numWritten, err := conn.Write(message)\n\n if err != nil {\n fmt.Println(\"Error writing outgoing message: \", err.Error())\n os.Exit(1)\n }\n\n if numWritten != len(message) {\n fmt.Println(\"Could not write out the full message.\")\n }\n}", "func (ctn *Connection) Write(buf []byte) (total int, err error) {\n\t// make sure all bytes are written\n\t// Don't worry about the loop, timeout has been set elsewhere\n\tlength := len(buf)\n\tfor total < length {\n\t\tvar r int\n\t\tif err = ctn.updateDeadline(); err != nil {\n\t\t\tbreak\n\t\t}\n\t\tr, err = ctn.conn.Write(buf[total:])\n\t\ttotal += r\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// If all bytes are written, ignore any potential error\n\t// The error will bubble up on the next network io if it matters.\n\tif total == len(buf) {\n\t\treturn total, nil\n\t}\n\n\tif ctn.node != nil {\n\t\tctn.node.incrErrorCount()\n\t\tatomic.AddInt64(&ctn.node.stats.ConnectionsFailed, 1)\n\t}\n\n\t// the line should happen before .Close()\n\terr = errToTimeoutErr(ctn, err)\n\tctn.Close()\n\n\treturn total, err\n}", "func sendRequest(conn net.Conn, text string) {\n message := text;\n \n if _,err := conn.Write([]byte(message + \"\\n\")); err != nil {\n log.Fatal(err)\n }\n}", "func TestGraphQLHTTPOnSamePort_GQLRequest_Unsuccessful(t *testing.T) {\n\tstack := createNode(t, false, false)\n\tdefer stack.Close()\n\tif err := stack.Start(); err != nil {\n\t\tt.Fatalf(\"could not start node: %v\", err)\n\t}\n\tbody := strings.NewReader(`{\"query\": \"{block{number}}\",\"variables\": null}`)\n\tresp, err := http.Post(fmt.Sprintf(\"%s/graphql\", stack.HTTPEndpoint()), \"application/json\", body)\n\tif err != nil {\n\t\tt.Fatalf(\"could not post: %v\", err)\n\t}\n\t// make sure the request is not handled successfully\n\tassert.Equal(t, http.StatusNotFound, resp.StatusCode)\n}", "func CMWriteToTCP(sock *net.TCPConn, writeChan, readChan chan ConnectionMsg,\n\tpeerAddress string) {\n\tloop := 1\n\tvar errorMsg ConnectionMsg\n\terrorMsg.Host = peerAddress\n\terrorMsg.Type = \"WriteError\"\n\tfor loop == 1 {\n\t\tselect {\n\t\tcase msg := <-writeChan:\n\t\t\tswitch msg.Type {\n\t\t\tcase \"Data\":\n\t\t\t\t_, err := sock.Write(msg.Data)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfor loop == 1 {\n\t\t\t\t\t\tselect {\n\t\t\t\t\t\tcase readChan <- errorMsg:\n\t\t\t\t\t\tcase errorMsg := <-writeChan:\n\t\t\t\t\t\t\tif errorMsg.Type != \"ConnectionError\" {\n\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\tloop = 0\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tcase \"ConnectionError\":\n\t\t\t\tloop = 0\n\t\t\tdefault:\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t}\n}", "func BenchmarkTCPFindCipherFail(b *testing.B) {\n\tb.StopTimer()\n\tb.ResetTimer()\n\n\tlistener, err := net.ListenTCP(\"tcp\", &net.TCPAddr{IP: net.ParseIP(\"127.0.0.1\"), Port: 0})\n\tif err != nil {\n\t\tb.Fatalf(\"ListenTCP failed: %v\", err)\n\t}\n\n\tcipherList, err := MakeTestCiphers(ss.MakeTestSecrets(100))\n\tif err != nil {\n\t\tb.Fatal(err)\n\t}\n\ttestPayload := ss.MakeTestPayload(50)\n\tfor n := 0; n < b.N; n++ {\n\t\tgo func() {\n\t\t\tconn, err := net.Dial(\"tcp\", listener.Addr().String())\n\t\t\tif err != nil {\n\t\t\t\tb.Fatalf(\"Failed to dial %v: %v\", listener.Addr(), err)\n\t\t\t}\n\t\t\tconn.Write(testPayload)\n\t\t\tconn.Close()\n\t\t}()\n\t\tclientConn, err := listener.AcceptTCP()\n\t\tif err != nil {\n\t\t\tb.Fatalf(\"AcceptTCP failed: %v\", err)\n\t\t}\n\t\tclientIP := clientConn.RemoteAddr().(*net.TCPAddr).IP\n\t\tb.StartTimer()\n\t\tfindAccessKey(clientConn, clientIP, cipherList)\n\t\tb.StopTimer()\n\t}\n}", "func handleRequest(conn net.Conn) {\r\n\tdefer wg.Done()\r\n\tdefer conn.Close()\r\n\tisClose:= false\r\n\tvar response string = \"\"\r\n\t// Make a buffer to hold incoming data.\r\n\tbuf := make([]byte, 1024)\r\n\r\n\tstartTime := time.Now()\r\n\ttimeInactive := 0\r\n\tfor !isClose && !isExit && time.Since(startTime).Minutes() < MAXTCPCONN && timeInactive < MAXTCPINACTIVE {\t\r\n\t\tconn.SetDeadline(time.Now().Add(TIMEOUT*time.Second))\r\n\t\t// Read the incoming connection into the buffer.\r\n\t\tnb, err := conn.Read(buf)\r\n\t\tif err != nil { \r\n\t\t\tif opErr, ok := err.(*net.OpError); ok && opErr.Timeout() {\ttimeInactive+=TIMEOUT; continue }\r\n\t\t\tbreak; \r\n\t\t}\r\n\t\tif nb > 0 {\r\n\t\t\ttimeInactive = 0\r\n\t\t\tresponse, isClose, err = handleMessage(string(buf[:nb]),\"TCP\")\r\n\t\t\t// Send the response\r\n\t\t\tif err == nil && len(response)>0 {\r\n\t\t\t\tif isVerbose { logger.Print(\"Response: \"+response) }\r\n\t\t\t\tconn.Write([]byte(response)) \r\n\t\t\t} else if err != nil {\r\n\t\t\t\tlogger.Print(err.Error())\r\n\t\t\t\tbreak\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\tif isVerbose {logger.Print(\"Close TCP connection ...\") }\r\n}", "func (mb *tcpTransporter) Send(aduRequest []byte) (aduResponse []byte, err error) {\n\tmb.mu.Lock()\n\tdefer mb.mu.Unlock()\n\n\tvar data [tcpMaxLength]byte\n\trecoveryDeadline := time.Now().Add(mb.IdleTimeout)\n\n\tfor {\n\t\t// Establish a new connection if not connected\n\t\tif err = mb.connect(); err != nil {\n\t\t\treturn\n\t\t}\n\n\t\t// Set timer to close when idle\n\t\tmb.lastActivity = time.Now()\n\t\tmb.startCloseTimer()\n\t\t// Set write and read timeout\n\t\tvar timeout time.Time\n\t\tif mb.Timeout > 0 {\n\t\t\ttimeout = mb.lastActivity.Add(mb.Timeout)\n\t\t}\n\t\tif err = mb.conn.SetDeadline(timeout); err != nil {\n\t\t\treturn\n\t\t}\n\t\t// Send data\n\t\tmb.logf(\"modbus: send % x\", aduRequest)\n\t\tif _, err = mb.conn.Write(aduRequest); err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tmb.lastAttemptedTransactionID = binary.BigEndian.Uint16(aduRequest)\n\t\tvar res readResult\n\t\taduResponse, res, err = mb.readResponse(aduRequest, data[:], recoveryDeadline)\n\t\tswitch res {\n\t\tcase readResultDone:\n\t\t\tif err == nil {\n\t\t\t\tmb.lastSuccessfulTransactionID = binary.BigEndian.Uint16(aduResponse)\n\t\t\t}\n\t\t\treturn\n\t\tcase readResultRetry:\n\t\t\tcontinue\n\t\t}\n\n\t\tmb.logf(\"modbus: close connection and retry, because of %v\", err)\n\n\t\tmb.close()\n\t\ttime.Sleep(mb.LinkRecoveryTimeout)\n\t}\n}", "func KeepConnection(err Error) bool {\n\t// Do not keep connection on client errors.\n\tif err.resultCode() < 0 {\n\t\treturn false\n\t}\n\n\treturn !err.Matches(types.QUERY_TERMINATED,\n\t\ttypes.SCAN_ABORT,\n\t\ttypes.QUERY_ABORTED,\n\t\ttypes.TIMEOUT)\n}", "func (db *DB) Query(query string, args ...interface{}) (rows *sql.Rows, err error) {\n\tidx, readReplica := db.readReplicaRR()\n\trows, err = readReplica.Query(query, args...)\n\t// If it is a connection issue with the target, then try another endpoint.\n\t// Currently, this is an overkill. Ideally, catch all errors related to network.\n\tif err != nil {\n\t\tif err := checkBeat(readReplica); err != nil {\n\t\t\t// proactively quarantine the down readReplica\n\t\t\tgo db.quarantineReadReplica(idx)\n\t\t\t_, readReplica := db.readReplicaRR()\n\t\t\treturn readReplica.Query(query, args...)\n\t\t}\n\t}\n\n\t// if the error is not related to network issue,\n\t// return the original error\n\treturn\n}", "func tcpData(host string, port int, sourceIP string, healthCheck HealthCheck) (Status, error, string) {\n\n\ttcpAddr, err := net.ResolveTCPAddr(\"tcp\", fmt.Sprintf(\"%s:%d\", host, port))\n\tif err != nil {\n\t\treturn Offline, err, fmt.Sprintf(\"failed to resolve to an address: %s:%d\", host, port)\n\t}\n\n\tlocalAddr, errl := net.ResolveIPAddr(\"ip\", sourceIP)\n\tif errl != nil {\n\t\treturn Offline, errl, fmt.Sprintf(\"failed to resolve to an ip adress: %s\", sourceIP)\n\t}\n\n\tlocalTCPAddr := net.TCPAddr{\n\t\tIP: localAddr.IP,\n\t}\n\n\t// Custom dialer with\n\tconn, err := net.DialTCP(\"tcp\", &localTCPAddr, tcpAddr)\n\tif err != nil {\n\t\treturn Offline, err, fmt.Sprintf(\"failed to dail from source: %+v target: %+v\", localTCPAddr, *tcpAddr)\n\t}\n\n\tdefer conn.Close()\n\n\tfmt.Fprintf(conn, healthCheck.TCPRequest)\n\tr, err := regexp.Compile(healthCheck.TCPReply)\n\tif err != nil {\n\t\treturn Offline, err, fmt.Sprintf(\"regex Compile failed on %s\", healthCheck.TCPReply)\n\t}\n\n\tconn.SetReadDeadline(time.Now().Add(time.Duration(healthCheck.Timeout) * time.Second))\n\tfor {\n\t\tline, err := bufio.NewReader(conn).ReadString('\\n')\n\t\tif err != nil {\n\t\t\treturn Offline, err, fmt.Sprintf(\"failed with last input %s\", line)\n\t\t}\n\n\t\tif r.MatchString(line) {\n\t\t\treturn Online, nil, \"OK\"\n\t\t}\n\t}\n}", "func TestShortQuery(t *testing.T) {\n\tvar qerr *queryError\n\tdoh, _ := NewTransport(testURL, ips, nil, nil, nil)\n\t_, err := doh.Query([]byte{})\n\tif err == nil {\n\t\tt.Error(\"Empty query should fail\")\n\t} else if !errors.As(err, &qerr) {\n\t\tt.Errorf(\"Wrong error type: %v\", err)\n\t} else if qerr.status != BadQuery {\n\t\tt.Errorf(\"Wrong error status: %d\", qerr.status)\n\t}\n\n\t_, err = doh.Query([]byte{1})\n\tif err == nil {\n\t\tt.Error(\"One byte query should fail\")\n\t} else if !errors.As(err, &qerr) {\n\t\tt.Errorf(\"Wrong error type: %v\", err)\n\t} else if qerr.status != BadQuery {\n\t\tt.Errorf(\"Wrong error status: %d\", qerr.status)\n\t}\n}", "func TestConnQueryCloseEarly(t *testing.T) {\n\tt.Parallel()\n\n\tconn := mustConnectString(t, os.Getenv(\"PGX_TEST_DATABASE\"))\n\tdefer closeConn(t, conn)\n\n\t// Immediately close query without reading any rows\n\trows, err := conn.Query(context.Background(), \"select generate_series(1,$1)\", 10)\n\tif err != nil {\n\t\tt.Fatalf(\"conn.Query failed: %v\", err)\n\t}\n\trows.Close()\n\n\tensureConnValid(t, conn)\n\n\t// Read partial response then close\n\trows, err = conn.Query(context.Background(), \"select generate_series(1,$1)\", 10)\n\tif err != nil {\n\t\tt.Fatalf(\"conn.Query failed: %v\", err)\n\t}\n\n\tok := rows.Next()\n\tif !ok {\n\t\tt.Fatal(\"rows.Next terminated early\")\n\t}\n\n\tvar n int32\n\trows.Scan(&n)\n\tif n != 1 {\n\t\tt.Fatalf(\"Expected 1 from first row, but got %v\", n)\n\t}\n\n\trows.Close()\n\n\tensureConnValid(t, conn)\n}", "func isCriticalTCP(err error) (ok bool) {\n\tvar netErr net.Error\n\tif errors.As(err, &netErr) && netErr.Timeout() {\n\t\treturn false\n\t}\n\n\tswitch {\n\tcase\n\t\terrors.Is(err, io.EOF),\n\t\terrors.Is(err, net.ErrClosed),\n\t\terrors.Is(err, os.ErrDeadlineExceeded),\n\t\tisConnBroken(err):\n\t\treturn false\n\tdefault:\n\t\treturn true\n\t}\n}", "func Query(host, domain string) (string, error) {\n\tvar (\n\t\td net.Dialer\n\t\tout string\n\t\terr error\n\t)\n\n\tctx, cancel := context.WithTimeout(context.Background(),\n\t\tTotalTimeout*time.Second)\n\tdefer cancel()\n\n\thostport := net.JoinHostPort(host, PortNumber)\n\tconn, err := d.DialContext(ctx, \"tcp\", hostport)\n\tif err != nil {\n\t\treturn out, err\n\t}\n\tdefer conn.Close()\n\n\terr = conn.SetWriteDeadline(time.Now().Add(WriteTimeout *\n\t\ttime.Second))\n\tif err != nil {\n\t\treturn out, err\n\t}\n\n\tif _, err := conn.Write([]byte(domain + \"\\r\\n\")); err != nil {\n\t\treturn out, err\n\t}\n\n\terr = conn.SetReadDeadline(time.Now().Add(ReadTimeout *\n\t\ttime.Second))\n\tif err != nil {\n\t\treturn out, err\n\t}\n\n\toutput, err := ioutil.ReadAll(conn)\n\tif err != nil {\n\t\treturn out, err\n\t}\n\n\tout = string(output)\n\treturn out, nil\n}", "func TestSendError(t *testing.T) {\n\terr := SendError(\"Send Error\", \"https://status.btfs.io\", \"my peer id\", \"my HValue\")\n\tif err != nil {\n\t\tt.Errorf(\"Send error message to status server failed, reason: %v\", err)\n\t} else {\n\t\tt.Log(\"Send error message to status server successfully!\")\n\t}\n}", "func client(serverIp string, serverPort string) {\n //TCPAddr\n tcpAddr, err := net.ResolveTCPAddr(\"tcp\", serverIp + serverPort)\n checkErrorClient(err)\n\n //TCPConn\n conn, err := net.DialTCP(\"tcp\", nil, tcpAddr)\n checkErrorClient(err)\n reader := bufio.NewReader(os.Stdin)\n buf := make([]byte, SendBufferSize)\n\n for {\n readTotal, err := reader.Read(buf)\n if err != nil {\n if err != io.EOF {\n checkErrorClient(err)\n }\n break\n }\n _, err = conn.Write(buf[:readTotal])\n checkErrorClient(err)\n }\n\n checkErrorClient(err)\n os.Exit(0)\n}", "func Query(q *serfclient.QueryParam, errch chan<- error) {\n\tvar err error\n\n\t// retry connecting to serf for 5 minutes, every 5 seconds. TODO:\n\t// probably should make this configurable eventually.\n\tretryDelay := time.Duration(5)\n\tretryNum := 60\n\n\tif Serfer == nil || Serfer.IsClosed() {\n\t\tserfClients.closeSerf(config.Config.SerfAddr)\n\t\tvar ns *serfclient.RPCClient\n\t\tfor i := 0; i < retryNum; i++ {\n\t\t\tlogger.Debugf(\"reconnecting to serf try #%d...\", i+1)\n\t\t\tns, err = NewRPCClient(config.Config.SerfAddr)\n\t\t\tif err == nil {\n\t\t\t\tSerfer = ns\n\t\t\t\tlogger.Debugf(\"reconnected to serf!\")\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tlogger.Debugf(\"Failed to reconnect to serf on try #%d, waiting %d seconds\", i+1, retryDelay)\n\t\t\ttime.Sleep(retryDelay * time.Second)\n\t\t}\n\t\t// if we got here we never managed to reconnect\n\t\tqErr := fmt.Errorf(\"Could not reconnect to serf after %d seconds. Last error: %s\", int(retryDelay)*retryNum, err.Error())\n\t\terrch <- qErr\n\t\tclose(errch)\n\t\treturn\n\t}\n\n\terr = Serfer.Query(q)\n\n\terrch <- nil\n\tclose(errch)\n\treturn\n}", "func (serv *Server) handleInvalidData(conn int) {\n\tvar (\n\t\tlogp = `handleInvalidData`\n\t\tframeClose []byte = NewFrameClose(false, StatusInvalidData, nil)\n\n\t\terr error\n\t)\n\n\terr = Send(conn, frameClose, serv.Options.ReadWriteTimeout)\n\tif err != nil {\n\t\tlog.Printf(`%s: %s`, logp, err)\n\t\tgoto out\n\t}\n\n\t_, err = Recv(conn, serv.Options.ReadWriteTimeout)\n\tif err != nil {\n\t\tlog.Printf(`%s: %s`, logp, err)\n\t}\nout:\n\tserv.ClientRemove(conn)\n}", "func (c *Conn) failed() bool {\n\tselect {\n\tcase <-c.connectDone:\n\t\treturn c.connectErr != nil\n\tdefault:\n\t\treturn false\n\t}\n}", "func WaitTCPPortClosed(ctx Ctx, addr fmt.Stringer) error {\n\tconst delay = time.Second / 20\n\tbackOff := backoff.WithContext(backoff.NewConstantBackOff(delay), ctx)\n\top := func() error {\n\t\tvar dialer net.Dialer\n\t\tconn, err := dialer.DialContext(ctx, \"tcp\", addr.String())\n\t\tif err != nil {\n\t\t\treturn nil\n\t\t}\n\t\t_ = conn.Close()\n\t\treturn fmt.Errorf(\"%w: %s\", errTCPPortOpen, addr)\n\t}\n\treturn backoff.Retry(op, backOff)\n}", "func queryFailed(t *testing.T, err error) {\n\tt.Fatalf(\"Failed to query tree: %s\\n\", err.Error())\n}", "func resendPendingQuery(query section.Section, oldToken token.Token, name, ipAddr string,\n\texpiration int64, s *Server) bool {\n\t//TODO CFE which port to choose?\n\tif tcpAddr, err := net.ResolveTCPAddr(\"tcp\", fmt.Sprintf(\"%v:%d\", ipAddr, 5022)); err != nil {\n\t\tconnInfo := connection.Info{Type: connection.TCP, TCPAddr: tcpAddr}\n\t\tif s.caches.RedirectCache.AddConnInfo(name, connInfo, expiration) {\n\t\t\ttok := token.New()\n\t\t\tif s.caches.PendingQueries.UpdateToken(oldToken, tok) {\n\t\t\t\tsendSection(query, tok, connInfo, s)\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\t//No redirect/delegation for connInfo in cache, send notification back to senders.\n\t}\n\treturn false\n}", "func (n *Node) Send(msg string, toNode PPeer){\n\n wait, err := time.ParseDuration(\"3s\")\n Fatal(err)\n conn, err := net.DialTimeout(\"tcp\", toNode.GetAddr()+\":\"+toNode.GetPort(),wait)\n Fatal(err)\n buff := []byte(msg)\n fmt.Println(\"to send: %v\",msg)\n _,err2 := conn.Write(buff)\n Fatal(err2)\n defer conn.Close()\n\n}", "func (c *Conn) Send(opcode uint, message []byte, wireTimeout time.Duration) error {\n\tc.writeMutex.Lock()\n\tc.SetWriteMode(opcode, true)\n\t_, err := c.writeWithRetry(message, wireTimeout)\n\tc.writeMutex.Unlock()\n\treturn err\n}", "func (ctn *Connection) Write(buf []byte) (total int, aerr Error) {\n\tvar err error\n\n\t// make sure all bytes are written\n\t// Don't worry about the loop, timeout has been set elsewhere\n\tif err = ctn.updateDeadline(); err == nil {\n\t\tif total, err = ctn.conn.Write(buf); err == nil {\n\t\t\treturn total, nil\n\t\t}\n\n\t\t// If all bytes are written, ignore any potential error\n\t\t// The error will bubble up on the next network io if it matters.\n\t\tif total == len(buf) {\n\t\t\treturn total, nil\n\t\t}\n\t}\n\n\taerr = chainErrors(errToAerospikeErr(ctn, err), aerr)\n\n\tif ctn.node != nil {\n\t\tctn.node.incrErrorCount()\n\t\tatomic.AddInt64(&ctn.node.stats.ConnectionsFailed, 1)\n\t}\n\n\tctn.Close()\n\n\treturn total, aerr\n}", "func query(object string, server string, tcpport string) (string, error) {\r\n\t// open connnection\r\n\tloggers.Info.Printf(\"whois.query() setup connection\")\r\n\tconn, err := net.DialTimeout(\"tcp\", net.JoinHostPort(server, tcpport), time.Second*30)\r\n\tif err != nil {\r\n\t\treturn \"\", fmt.Errorf(\"whois: connect to whois server failed: %v\", err)\r\n\t}\r\n\tdefer conn.Close()\r\n\t// set connection write timeout\r\n\t_ = conn.SetWriteDeadline(time.Now().Add(time.Second * 30))\r\n\t_, err = conn.Write([]byte(object + \"\\r\\n\"))\r\n\tif err != nil {\r\n\t\treturn \"\", fmt.Errorf(\"whois: send to whois server failed: %v\", err)\r\n\t}\r\n\t// set connection read timeout\r\n\t_ = conn.SetReadDeadline(time.Now().Add(time.Second * 30))\r\n\tbuffer, err := ioutil.ReadAll(conn)\r\n\tif err != nil {\r\n\t\treturn \"\", fmt.Errorf(\"whois: read from whois server failed: %v\", err)\r\n\t}\r\n\t// return result\r\n\treturn string(buffer), nil\r\n}", "func (c *Conn) Write(b []byte) (int, error) {\n\terr := c.Conn.SetWriteDeadline(time.Now().Add(c.WriteTimeout))\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn c.Conn.Write(b)\n}", "func poll(t *testing.T, address string) {\n\titerations := 1000\n\tfor i := 0; i < iterations; i++ {\n\t\t_, err := net.Dial(\"tcp\", address)\n\t\tt.Logf(\"Dial %s attempt %d\", address, i)\n\t\tif err == nil {\n\t\t\tbreak\n\t\t}\n\t\tif i == (iterations - 1) {\n\t\t\tt.Fatalf(\"Couldn't establish connection to %s\", address)\n\t\t}\n\t}\n}", "func SendAnswer(conn net.Conn, answer []byte) (err error) {\r\n\tvar n int\r\n\tvar sent int\r\n\r\n\tif len(answer) > 65535 {\r\n\t\tpanic(\"An answer must not be more than 65535\")\r\n\t}\r\n\r\n\tdefer conn.SetDeadline(time.Time{})\r\n\tconn.SetDeadline(time.Now().Add(idleTimeout))\r\n\r\n\tfor {\r\n\t\tif n, err = conn.Write(answer); err != nil {\r\n\t\t\treturn\r\n\t\t}\r\n\t\tsent = sent + n\r\n\t\tif sent < len(answer) {\r\n\t\t\tcontinue\r\n\t\t} else {\r\n\t\t\tbreak\r\n\t\t}\r\n\t}\r\n\treturn\r\n\r\n}", "func (ss *socketSession) receivedInvalidRequest(closeConn bool) {\n\tss.socketConn.Write(socketValueInvalidRequest)\n\n\tif closeConn {\n\t\tss.socketConn.Close()\n\t}\n}", "func workWithClient(conn net.Conn, db *sql.DB) {\n defer conn.Close()\n fmt.Println(\"connected\")\n newMsgCh := make(chan message, CHAN_NEW_MESSAGE_SIZE)\n\n LISTEN_LOOP:\n for {\n select {\n /// read command from user\n case req := <-recvDataCmd(conn):\n sErr := executeCommand(conn, db, req.data, req.err, &newMsgCh)\n if sErr.Err != nil {\n if sErr.Err == io.EOF {\n break LISTEN_LOOP\n }\n sendError(conn, sErr)\n }\n case msg := <- newMsgCh:\n sendData(conn, msg.ToStr(), OK_CODE)\n } // end select\n } // end for\n}", "func TestLimitListenerError(t *testing.T) {\n\tdonec := make(chan bool, 1)\n\tgo func() {\n\t\tconst n = 2\n\t\tll := LimitListener(errorListener{}, n)\n\t\tfor i := 0; i < n+1; i++ {\n\t\t\t_, err := ll.Accept()\n\t\t\tif err != errFake {\n\t\t\t\tt.Fatalf(\"Accept error = %v; want errFake\", err)\n\t\t\t}\n\t\t}\n\t\tdonec <- true\n\t}()\n\tselect {\n\tcase <-donec:\n\tcase <-time.After(5 * time.Second):\n\t\tt.Fatal(\"timeout. deadlock?\")\n\t}\n}", "func (r *Responder) ServiceUnavailable() { r.write(http.StatusServiceUnavailable) }", "func TestDisconnect2(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"short\")\n\t}\n\tcheckConnStr(t)\n\tSetLogger(testLogger{t})\n\n\t// Revert to the normal dialer after the test is done.\n\tnormalCreateDialer := createDialer\n\tdefer func() {\n\t\tcreateDialer = normalCreateDialer\n\t}()\n\n\tend := make(chan error)\n\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\tgo func() {\n\t\twaitDisrupt := make(chan struct{})\n\t\tctx, cancel = context.WithTimeout(ctx, time.Second*2)\n\t\tdefer cancel()\n\n\t\tcreateDialer = func(p *connectParams) dialer {\n\t\t\tnd := tcpDialer{&net.Dialer{Timeout: p.dial_timeout, KeepAlive: p.keepAlive}}\n\t\t\tdi := &dialerInterrupt{nd: nd}\n\t\t\tgo func() {\n\t\t\t\t<-waitDisrupt\n\t\t\t\tdi.Interrupt(false)\n\t\t\t}()\n\t\t\treturn di\n\t\t}\n\t\tdb, err := sql.Open(\"sqlserver\", makeConnStr(t).String())\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tif err := db.PingContext(ctx); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tdefer db.Close()\n\n\t\t_, err = db.ExecContext(ctx, `SET LOCK_TIMEOUT 1800;`)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tclose(waitDisrupt)\n\n\t\t_, err = db.ExecContext(ctx, `waitfor delay '00:00:3';`)\n\t\tend <- err\n\t}()\n\n\ttimeout := time.After(10 * time.Second)\n\tselect {\n\tcase err := <-end:\n\t\tif err == nil {\n\t\t\tt.Fatal(\"test err\")\n\t\t}\n\tcase <-timeout:\n\t\tt.Fatal(\"timeout\")\n\t}\n}", "func (c *Conn) Tempfail() {\n\tswitch c.curcmd {\n\tcase HELO, EHLO:\n\t\tc.reply(ReplyServiceNotAvailable)\n\tcase AUTH:\n\t\tc.authDone(false)\n\t\tc.reply(ReplyAuthTmpFail)\n\tcase MAILFROM, RCPTTO, DATA:\n\t\tc.reply(ReplyMailboxNotAvailable)\n\t}\n\tc.replied = true\n}", "func (w *Watcher) sendError(err error) bool {\n\tselect {\n\tcase w.Errors <- err:\n\t\treturn true\n\tcase <-w.quit:\n\t}\n\treturn false\n}", "func (ch *clientHandle) Send(q Query) ([]byte, error) {\n\tif nil == ch.queryQueue {\n\t\treturn nil, fmt.Errorf(\"ClientHandle has been closed\")\n\t}\n\tch.queryQueue <- query{Query: q, response: ch.response}\n\tres := <-ch.response\n\treturn res.data, res.err\n}", "func acceptAndClose(t *testing.T, hostport string) net.Listener {\n\tt.Helper()\n\tl, err := net.Listen(\"tcp\", hostport)\n\tif err != nil {\n\t\tt.Fatalf(\"Unable to start listen: %v\", err)\n\t}\n\tgo func() {\n\t\tfor {\n\t\t\tn, err := l.Accept()\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tn.Close()\n\t\t}\n\t}()\n\treturn l\n}", "func Query(err error, switcher int) {\n\tfmt.Fprintf(os.Stderr, \"ERROR: %v\\n\", err)\n\n\tswitch {\n\n\tcase switcher == 0:\n\t\tfmt.Fprintln(os.Stderr, \"Search: \"+queryURL+replaceSpace(err.Error()))\n\n\tcase switcher == 1:\n\t\twebbrowser.Open(queryURL + err.Error())\n\n\tdefault:\n\t\tfmt.Fprintf(os.Stderr, \"UNKNOWN switcher (need 0 or 1): %v\\n\", switcher)\n\t}\n\n\tos.Exit(1)\n}", "func HandleServiceTCPConnection(node *shared.Node, conn net.Conn) {\n\tdefer conn.Close()\n\n\treader := bufio.NewReader(conn)\n\n\tfor {\n\t\trawMsg, err := reader.ReadString('\\n')\n\t\tlogBandwithInfo(\"Recieve\", len(rawMsg))\n\t\trawMsg = strings.Trim(rawMsg, \"\\n\")\n\t\tif err == io.EOF {\n\t\t\tfmt.Println(\"Server offline\")\n\t\t\tbreak\n\t\t}\n\n\t\t// TODO: Add a parse message function\n\t\tif strings.HasPrefix(rawMsg, \"INTRODUCE\") {\n\t\t\t// self introduction\n\t\t\tarr := strings.Split(rawMsg, \" \")\n\t\t\taddr := arr[1]\n\t\t\tport := arr[2]\n\t\t\tgo joinP2P(node, addr, port)\n\t\t} else if strings.HasPrefix(rawMsg, \"TRANSACTION\") {\n\t\t\t// Handle TRANSACTION\n\t\t\tnode.Transactions.SetAdd(rawMsg)\n\t\t\tnode.TransactionBuffer.Add(rawMsg)\n\t\t\tnode.Mempool.SetAdd(rawMsg)\n\t\t\tnode.RWlock.Lock()\n\t\t\tnode.NewMsgCount++\n\t\t\tnode.RWlock.Unlock()\n\t\t\tlogWithTimestamp(rawMsg)\n\t\t\t// go sendGossipingMsg(node, \"TRANSACTION\", 0, rawMsg)\n\t\t} else if strings.HasPrefix(rawMsg, \"DIE\") || strings.HasPrefix(rawMsg, \"QUIT\") {\n\t\t\tos.Exit(1)\n\t\t} else if strings.HasPrefix(rawMsg, \"SOLVED\") {\n\t\t\tblockchain.PuzzleSolvedHandler(node, rawMsg)\n\t\t} else if strings.HasPrefix(rawMsg, \"VERIFY\") {\n\t\t\tarr := strings.Split(rawMsg, \" \")\n\t\t\tresponse := arr[1]\n\t\t\tblockHash := arr[2]\n\t\t\tnode.VerifyChannelMap[blockHash] <- response == \"OK\"\n\t\t} else {\n\t\t\tfmt.Println(\"Unknown message format.\")\n\t\t}\n\n\t}\n}", "func TestTCPProbeTimeout(t *testing.T) {\n\tprobeExpectTimeout(t, 49)\n\tprobeExpectTimeout(t, 50)\n\tprobeExpectTimeout(t, 51)\n}", "func (c *MiningClient) ConnectionLost(err error) {\n\tlog.WithTime(time.Now()).WithFields(log.Fields{\"host\": c.Host, \"time\": time.Now().Format(\"15:04:05\")}).WithError(err).Errorf(\"lost connection to host, retrying...\")\n\n\t// Endless try to reconnect\n\tfor {\n\t\ttime.Sleep(1 * time.Second)\n\t\terr := c.Connect()\n\t\tif err != nil {\n\t\t\tlog.WithFields(log.Fields{\"host\": c.Host, \"time\": time.Now().Format(\"15:04:05\")}).WithError(err).Errorf(\"failed to reconnect, retrying...\")\n\t\t\ttime.Sleep(5 * time.Second)\n\t\t\tcontinue\n\t\t}\n\t\tbreak\n\n\t}\n}", "func (h *DNSHandler) DoTCP(w dns.ResponseWriter, req *dns.Msg) {\n\th.do(\"tcp\", w, req)\n}", "func (t *tcp) close() error {\n\tif !t.isopen {\n\t\treturn nil\n\t}\n\tt.isopen = false\n\t// closing this channel means that anyone readong from the channel is auto-selected in a Select statement\n\tclose(t.closed)\n\tt.conn.Close()\n\treturn nil\n}", "func BenchmarkDialTCP(b *testing.B) {\n\tfor i := 0; i < b.N; i++ {\n\t\tDialTCP(TEST_HOST, TEST_USER, TEST_PASSWD, TEST_DBNAME)\n\t}\n}", "func checkRemoteConnection(serverAddr string) error {\n\n\tlog.Println(\"Connecting to sync server at\", serverAddr)\n\n\tconn := getRemoteConnection(serverAddr, false)\n\tif conn == nil {\n\t\treturn errors.New(\"Cannot connect to sync server: \" + serverAddr)\n\t}\n\n\terr := tcpSend(conn, \"Test\\n\")\n\tif err != nil {\n\t\treturn errors.New(\"Cannot send data to remote server. \" + err.Error())\n\t}\n\n\treturn nil\n}", "func (s *Server) ListenTCP(addressPort string) (err error) {\n\tlisten, err := net.Listen(\"tcp\", addressPort)\n\tif err != nil {\n\t\tlog.Printf(\"Failed to Listen: %v\\n\", err)\n\t\treturn err\n\t}\n\ts.listeners = append(s.listeners, listen)\n\tgo s.accept(listen)\n\treturn err\n}", "func (p *peer) TrySend(chID byte, msgBytes []byte) bool {\r\n\tif !p.IsRunning() {\r\n\t\treturn false\r\n\t} else if !p.hasChannel(chID) {\r\n\t\treturn false\r\n\t}\r\n\treturn p.mconn.TrySend(chID, msgBytes)\r\n}", "func GetCommandToTestTCPConnection(host string, port int32) string {\n\treturn fmt.Sprintf(\"if (-Not (Test-NetConnection %s -Port %d).TcpTestSucceeded)\"+\n\t\t\" {Write-Output 'connection failed:'; exit 10}\", host, port)\n}", "func sendMsg(conn *net.UDPConn, raddr net.UDPAddr, query interface{}) {\n\ttotalSent.Add(1)\n\tvar b bytes.Buffer\n\tif err := bencode.Marshal(&b, query); err != nil {\n\t\treturn\n\t}\n\tif n, err := conn.WriteToUDP(b.Bytes(), &raddr); err != nil {\n\t\tlogger.Infof(\"DHT: node write failed to %+v, error=%s\", raddr, err)\n\t} else {\n\t\ttotalWrittenBytes.Add(int64(n))\n\t}\n\treturn\n}", "func (ec *ErrConnection) Query(query string, args ...interface{}) *sql.Rows {\n\tif ec.Err != nil {\n\t\treturn nil\n\t}\n\trows, err := ec.Conn.Query(query, args...)\n\tif err != nil {\n\t\tec.Err = err\n\t}\n\treturn rows\n}", "func (t *DNSOverTCPTransport) RoundTrip(\n\tctx context.Context, query model.DNSQuery) (model.DNSResponse, error) {\n\t// TODO(bassosimone): this method should more strictly honour the context, which\n\t// currently is only used to bound the dial operation\n\trawQuery, err := query.Bytes()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(rawQuery) > math.MaxUint16 {\n\t\treturn nil, errQueryTooLarge\n\t}\n\tconn, err := t.dial(ctx, \"tcp\", t.address)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer conn.Close()\n\tconst iotimeout = 10 * time.Second\n\tconn.SetDeadline(time.Now().Add(iotimeout))\n\t// Write request\n\tbuf := []byte{byte(len(rawQuery) >> 8)}\n\tbuf = append(buf, byte(len(rawQuery)))\n\tbuf = append(buf, rawQuery...)\n\tif _, err = conn.Write(buf); err != nil {\n\t\treturn nil, err\n\t}\n\t// Read response\n\theader := make([]byte, 2)\n\tif _, err = io.ReadFull(conn, header); err != nil {\n\t\treturn nil, err\n\t}\n\tlength := int(header[0])<<8 | int(header[1])\n\trawResponse := make([]byte, length)\n\tif _, err = io.ReadFull(conn, rawResponse); err != nil {\n\t\treturn nil, err\n\t}\n\treturn t.decoder.DecodeResponse(rawResponse, query)\n}", "func (t *TCPStreamLayer) Dial(address string, timeout time.Duration) (net.Conn, error) {\n\treturn net.DialTimeout(\"tcp\", address, timeout)\n}", "func (s *Server) Accept() error {\n\tvar tempDelay time.Duration // how long to sleep on accept failure\n\tfor {\n\t\tc, e := s.Listener.Accept()\n\t\tif e != nil {\n\t\t\tif ne, ok := e.(net.Error); ok && ne.Temporary() {\n\t\t\t\tif tempDelay == 0 {\n\t\t\t\t\ttempDelay = 5 * time.Millisecond\n\t\t\t\t} else {\n\t\t\t\t\ttempDelay *= 2\n\t\t\t\t}\n\t\t\t\tif max := 1 * time.Second; tempDelay > max {\n\t\t\t\t\ttempDelay = max\n\t\t\t\t}\n\t\t\t\ttime.Sleep(tempDelay)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn e\n\t\t}\n\t\tgo s.accept(c)\n\t}\n}", "func (c *conn) Send(cmd string, args ...interface{}) error {\n\terr := c.conn.Send(cmd, args...)\n\tc.pending++\n\treturn err\n}" ]
[ "0.5549525", "0.55408275", "0.53043896", "0.528792", "0.52191246", "0.5194476", "0.5182247", "0.517237", "0.50538695", "0.4971472", "0.49563774", "0.49338162", "0.49332836", "0.49071816", "0.4888864", "0.48753402", "0.486269", "0.4854261", "0.48495227", "0.48452085", "0.48272598", "0.47903314", "0.47851095", "0.47799405", "0.4762936", "0.47554046", "0.4753776", "0.4751005", "0.47444847", "0.4717985", "0.4717985", "0.47068864", "0.46733713", "0.46710047", "0.4665925", "0.46635118", "0.46553332", "0.46509305", "0.46353254", "0.46338254", "0.46289608", "0.46259797", "0.46194983", "0.46061254", "0.45699978", "0.45632988", "0.45627922", "0.4561362", "0.45590067", "0.45588934", "0.45535642", "0.4550877", "0.45501748", "0.45449722", "0.4535433", "0.453079", "0.4526633", "0.45263112", "0.45179915", "0.45178235", "0.4517537", "0.45165262", "0.4515816", "0.45088968", "0.45060444", "0.4504444", "0.45012385", "0.4484967", "0.44766343", "0.44704247", "0.4467212", "0.44650713", "0.44641897", "0.44533822", "0.44340366", "0.44285387", "0.44237578", "0.44229874", "0.44201857", "0.44134676", "0.44111243", "0.44100338", "0.4407369", "0.4403978", "0.44015273", "0.44005764", "0.43968728", "0.43929034", "0.4382812", "0.43784925", "0.43766308", "0.43699098", "0.43665296", "0.43624654", "0.43587607", "0.43563762", "0.4351563", "0.4347104", "0.43455723", "0.43413234" ]
0.62287647
0
Sends a TCP query, and closes the socket before the response is sent. This tests for crashes when a response cannot be delivered.
func TestAcceptClose(t *testing.T) { doh := newFakeTransport() client, server := makePair() // Start the forwarder running. go Accept(doh, server) lbuf := make([]byte, 2) // Send Query queryData := simpleQueryBytes binary.BigEndian.PutUint16(lbuf, uint16(len(queryData))) client.Write(lbuf) client.Write(queryData) // Read query queryRead := <-doh.query if !bytes.Equal(queryRead, queryData) { t.Error("Query mismatch") } // Close the TCP connection client.Close() // Send fake response too late. responseData := []byte{1, 2, 8, 9, 10} doh.response <- responseData }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func echo(conn net.Conn){\n defer conn.Close()\n bData, _ := recvDataB(conn)\n sendDataB(conn, bData, OK_CODE)\n}", "func (c Client) SendQuery(message dns.Msg) (dns.Msg, error) {\n\t// Open a new QUIC stream\n\tlog.Debugln(\"opening new quic stream\")\n\tstream, err := c.Session.OpenStream()\n\tif err != nil {\n\t\treturn dns.Msg{}, errors.New(\"quic stream open: \" + err.Error())\n\t}\n\n\t// Pack the DNS message for transmission\n\tlog.Debugln(\"packing dns message\")\n\tpacked, err := message.Pack()\n\tif err != nil {\n\t\t_ = stream.Close()\n\t\treturn dns.Msg{}, errors.New(\"dns message pack: \" + err.Error())\n\t}\n\n\t// Send the DNS query over QUIC\n\tlog.Debugln(\"writing packed format to the stream\")\n\t_, err = stream.Write(packed)\n\t_ = stream.Close()\n\tif err != nil {\n\t\treturn dns.Msg{}, errors.New(\"quic stream write: \" + err.Error())\n\t}\n\n\t// Read the response\n\tlog.Debugln(\"reading server response\")\n\tresponse, err := ioutil.ReadAll(stream)\n\tif err != nil {\n\t\treturn dns.Msg{}, errors.New(\"quic stream read: \" + err.Error())\n\t}\n\n\t// Unpack the DNS message\n\tlog.Debugln(\"unpacking response dns message\")\n\tvar msg dns.Msg\n\terr = msg.Unpack(response)\n\tif err != nil {\n\t\treturn dns.Msg{}, errors.New(\"dns message unpack: \" + err.Error())\n\t}\n\n\treturn msg, nil // nil error\n}", "func handleRequest(conn net.Conn) {\r\n\tdefer wg.Done()\r\n\tdefer conn.Close()\r\n\tisClose:= false\r\n\tvar response string = \"\"\r\n\t// Make a buffer to hold incoming data.\r\n\tbuf := make([]byte, 1024)\r\n\r\n\tstartTime := time.Now()\r\n\ttimeInactive := 0\r\n\tfor !isClose && !isExit && time.Since(startTime).Minutes() < MAXTCPCONN && timeInactive < MAXTCPINACTIVE {\t\r\n\t\tconn.SetDeadline(time.Now().Add(TIMEOUT*time.Second))\r\n\t\t// Read the incoming connection into the buffer.\r\n\t\tnb, err := conn.Read(buf)\r\n\t\tif err != nil { \r\n\t\t\tif opErr, ok := err.(*net.OpError); ok && opErr.Timeout() {\ttimeInactive+=TIMEOUT; continue }\r\n\t\t\tbreak; \r\n\t\t}\r\n\t\tif nb > 0 {\r\n\t\t\ttimeInactive = 0\r\n\t\t\tresponse, isClose, err = handleMessage(string(buf[:nb]),\"TCP\")\r\n\t\t\t// Send the response\r\n\t\t\tif err == nil && len(response)>0 {\r\n\t\t\t\tif isVerbose { logger.Print(\"Response: \"+response) }\r\n\t\t\t\tconn.Write([]byte(response)) \r\n\t\t\t} else if err != nil {\r\n\t\t\t\tlogger.Print(err.Error())\r\n\t\t\t\tbreak\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\tif isVerbose {logger.Print(\"Close TCP connection ...\") }\r\n}", "func WaitForTCP(ctx context.Context, rAddr string) error {\n\tdialer := net.Dialer{}\n\tconn, err := dialer.DialContext(ctx, \"tcp\", rAddr)\n\t//For loop to get around OS Dial Timeout\n\tfor err != nil {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn ctx.Err()\n\t\tdefault:\n\t\t}\n\t\tconn, err = dialer.DialContext(ctx, \"tcp\", rAddr)\n\t}\n\tconn.Close()\n\treturn nil\n}", "func SendTCP(address, msg string) (string, error) {\n\tconn, err := net.Dial(\"tcp\", address)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif err := conn.SetReadDeadline(time.Now().Add(30 * time.Second)); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tdefer func() {\n\t\tif err := conn.Close(); err != nil {\n\t\t\tlogrus.Warn(\"Could not close TCP connection\")\n\t\t}\n\t}()\n\n\t// writes to the tcp connection\n\tfmt.Fprintf(conn, msg+\"\\n\")\n\n\tresponse, err := bufio.NewReader(conn).ReadString('\\n')\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn response, nil\n}", "func sendRequest(conn net.Conn, text string) {\n message := text;\n \n if _,err := conn.Write([]byte(message + \"\\n\")); err != nil {\n log.Fatal(err)\n }\n}", "func Send(ctx context.Context, address string) (err error) {\n\t// Resolve the UDP address so that we can make use of DialUDP\n\t// with an actual IP and port instead of a name (in case a\n\t// hostname is specified).\n\traddr, err := net.ResolveUDPAddr(\"udp\", address)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t// Although we're not in a connection-oriented transport,\n\t// the act of `dialing` is analogous to the act of performing\n\t// a `connect(2)` syscall for a socket of type SOCK_DGRAM:\n\t// - it forces the underlying socket to only read and write\n\t// to and from a specific remote address.\n\tconn, err := net.DialUDP(\"udp\", nil, raddr)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t// Closes the underlying file descriptor associated with the,\n\t// socket so that it no longer refers to any file.\n\tdefer conn.Close()\n\n\tdoneChan := make(chan error, 1)\n\n\tgo func() {\n\t\t// It is possible that this action blocks, although this\n\t\t// should only occur in very resource-intensive situations:\n\t\t// - when you've filled up the socket buffer and the OS\n\t\t// can't dequeue the queue fast enough.\n\t\t//n, err := io.Copy(conn, reader)\n\t\t//n, err := fmt.Fprintf(conn, \"WSD\")\n\t\tn, err := fmt.Fprintf(conn,\n\t\t\t\"FC1307\"+string(0x1)+string(4)+string(0)+string(0)+string(0)+string(0)+string(0)+string(1)+string(5)+string(5)+\"adminxxxxxxxxxxxadminxxxxxxxxxxx\"+string(0)+string(0)+string(0)+string(1),\n\t\t)\n\t\tif err != nil {\n\t\t\tdoneChan <- err\n\t\t\treturn\n\t\t}\n\n\t\tfmt.Printf(\"%d bytes sent\\n\", n)\n\n\t\tbuffer := make([]byte, maxBufferSize)\n\n\t\t// Set a deadline for the ReadOperation so that we don't\n\t\t// wait forever for a server that might not respond on\n\t\t// a resonable amount of time.\n\t\tdeadline := time.Now().Add(15 * time.Second)\n\t\terr = conn.SetReadDeadline(deadline)\n\t\tif err != nil {\n\t\t\tdoneChan <- err\n\t\t\treturn\n\t\t}\n\n\t\tnRead, _, err := conn.ReadFrom(buffer)\n\t\tif err != nil {\n\t\t\tdoneChan <- err\n\t\t\treturn\n\t\t}\n\n\t\tfmt.Printf(\"bytes=%d received\\n\", nRead)\n\t\tfmt.Println(hex.EncodeToString(buffer[0:nRead]))\n\n\t\tdoneChan <- nil\n\t}()\n\tselect {\n\tcase <-ctx.Done():\n\t\tfmt.Println(\"cancelled\")\n\t\terr = ctx.Err()\n\tcase err = <-doneChan:\n\t}\n\n\treturn\n}", "func (c *RESPConn) write(raw []byte) error {\n\tif c.conn == nil {\n\t\treturn ErrConnClosed\n\t}\n\n\tc.conn.SetWriteDeadline(time.Now().Add(c.timeout))\n\t_, err := c.conn.Write(raw)\n\terr = wrapErr(err)\n\tif err == ErrConnClosed {\n\t\tc.close()\n\t}\n\n\treturn err\n}", "func (d *Device) EndSocketSend() error {\n\td.Write([]byte(\"+++\"))\n\n\t_, err := d.Response(pause)\n\treturn err\n}", "func (c *Client) writeQuery(conn net.Conn, query []byte) error {\n\tvar err error\n\n\tif c.Timeout > 0 {\n\t\t_ = conn.SetWriteDeadline(time.Now().Add(c.Timeout))\n\t}\n\n\t// Write to the connection\n\tif _, ok := conn.(*net.TCPConn); ok {\n\t\tl := make([]byte, 2)\n\t\tbinary.BigEndian.PutUint16(l, uint16(len(query)))\n\t\t_, err = (&net.Buffers{l, query}).WriteTo(conn)\n\t} else {\n\t\t_, err = conn.Write(query)\n\t}\n\n\treturn err\n}", "func SendAnswer(conn net.Conn, answer []byte) (err error) {\r\n\tvar n int\r\n\tvar sent int\r\n\r\n\tif len(answer) > 65535 {\r\n\t\tpanic(\"An answer must not be more than 65535\")\r\n\t}\r\n\r\n\tdefer conn.SetDeadline(time.Time{})\r\n\tconn.SetDeadline(time.Now().Add(idleTimeout))\r\n\r\n\tfor {\r\n\t\tif n, err = conn.Write(answer); err != nil {\r\n\t\t\treturn\r\n\t\t}\r\n\t\tsent = sent + n\r\n\t\tif sent < len(answer) {\r\n\t\t\tcontinue\r\n\t\t} else {\r\n\t\t\tbreak\r\n\t\t}\r\n\t}\r\n\treturn\r\n\r\n}", "func send(conn *net.Conn, request []byte) error {\n\tvar err error\n\tvar n int\n\tn, err = (*conn).Write(request)\n\n\tfor n < len(request) {\n\t\tn, err = (*conn).Write(request[n:])\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn err\n}", "func send(conn *net.Conn, request []byte) error {\n\tvar err error\n\tvar n int\n\tn, err = (*conn).Write(request)\n\n\tfor n < len(request) {\n\t\tn, err = (*conn).Write(request[n:])\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn err\n}", "func (p *Proxy) respondTCP(d *DNSContext) error {\n\tresp := d.Res\n\tconn := d.Conn\n\n\tbytes, err := resp.Pack()\n\tif err != nil {\n\t\treturn errorx.Decorate(err, \"couldn't convert message into wire format: %s\", resp.String())\n\t}\n\n\terr = proxyutil.WritePrefixed(bytes, conn)\n\n\tif proxyutil.IsConnClosed(err) {\n\t\treturn err\n\t}\n\tif err != nil {\n\t\treturn errorx.Decorate(err, \"conn.Write() returned error\")\n\t}\n\n\treturn nil\n}", "func (m *Measurement) DialTCP() *TCPResponse {\n\n\t// Simply check that the server is up and can accept connections.\n\tresult := &TCPResponse{\n\t\tHost: m.Host,\n\t\tPort: m.Port,\n\t\tLatency: 0,\n\t\tTimeout: m.Timeout,\n\t\tSequence: m.count,\n\t}\n\n\taddress := fmt.Sprintf(\"%s:%d\", m.Host, m.Port)\n\tstart := time.Now()\n\n\tconn, err := net.DialTimeout(\"tcp\", address, time.Duration(m.Timeout)*time.Second)\n\tif err != nil {\n\n\t\tm.appendFailed(result)\n\t\treturn result\n\t}\n\tdefer conn.Close()\n\n\tresult.Latency = time.Since(start).Seconds() * 1000\n\n\tm.appendSuccessful(result)\n\treturn result\n}", "func (c *Conn) Send(opcode uint, message []byte, wireTimeout time.Duration) error {\n\tc.writeMutex.Lock()\n\tc.SetWriteMode(opcode, true)\n\t_, err := c.writeWithRetry(message, wireTimeout)\n\tc.writeMutex.Unlock()\n\treturn err\n}", "func TestAcceptFail(t *testing.T) {\n\tdoh := newFakeTransport()\n\tclient, server := makePair()\n\n\t// Start the forwarder running.\n\tgo Accept(doh, server)\n\n\tlbuf := make([]byte, 2)\n\t// Send Query\n\tqueryData := simpleQueryBytes\n\tbinary.BigEndian.PutUint16(lbuf, uint16(len(queryData)))\n\tclient.Write(lbuf)\n\tclient.Write(queryData)\n\n\t// Indicate that the query failed\n\tdoh.err = errors.New(\"fake error\")\n\n\t// Read query\n\tqueryRead := <-doh.query\n\tif !bytes.Equal(queryRead, queryData) {\n\t\tt.Error(\"Query mismatch\")\n\t}\n\n\t// Accept should have closed the socket.\n\tn, _ := client.Read(lbuf)\n\tif n != 0 {\n\t\tt.Error(\"Expected to read 0 bytes\")\n\t}\n}", "func (o *PluginDnsClient) Query(queries []utils.DnsQueryParams, socket transport.SocketApi) error {\n\tif o.IsNameServer() {\n\t\treturn fmt.Errorf(\"Querying is not permitted for Dns Name Servers!\")\n\t}\n\tquestions, err := utils.BuildQuestions(queries)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(questions) > 0 {\n\t\tdata := o.dnsPktBuilder.BuildQueryPkt(questions, o.Tctx.Simulation)\n\t\tif socket == nil {\n\t\t\treturn fmt.Errorf(\"Invalid Socket in Query!\")\n\t\t}\n\t\ttransportErr, _ := o.socket.Write(data)\n\t\tif transportErr != transport.SeOK {\n\t\t\to.stats.socketWriteError++\n\t\t\treturn transportErr.Error()\n\t\t}\n\t\to.stats.pktTxDnsQuery++ // successfully sent query\n\t\to.stats.txBytes += uint64(len(data)) // number of bytes sent\n\t}\n\treturn nil\n}", "func (h *Handler) SendQuery(ctx context.Context, ip net.IP) (err error) {\n\n\tpacket := nodeStatusRequestWireFormat(`* `)\n\t// packet.printHeader()\n\n\tif ip == nil || ip.Equal(net.IPv4zero) {\n\t\treturn fmt.Errorf(\"invalid IP=%v\", ip)\n\t}\n\t// ip[3] = 255 // Network broadcast\n\n\t// To broadcast, use network broadcast i.e 192.168.0.255 for example.\n\ttargetAddr := &net.UDPAddr{IP: ip, Port: 137}\n\tif _, err = h.conn.WriteToUDP(packet, targetAddr); err != nil {\n\t\tif ctx.Err() == nil { // not cancelled\n\t\t\treturn fmt.Errorf(\"nbns failed to send packet: %w\", err)\n\t\t}\n\t}\n\treturn nil\n}", "func TestConnQueryCloseEarly(t *testing.T) {\n\tt.Parallel()\n\n\tconn := mustConnectString(t, os.Getenv(\"PGX_TEST_DATABASE\"))\n\tdefer closeConn(t, conn)\n\n\t// Immediately close query without reading any rows\n\trows, err := conn.Query(context.Background(), \"select generate_series(1,$1)\", 10)\n\tif err != nil {\n\t\tt.Fatalf(\"conn.Query failed: %v\", err)\n\t}\n\trows.Close()\n\n\tensureConnValid(t, conn)\n\n\t// Read partial response then close\n\trows, err = conn.Query(context.Background(), \"select generate_series(1,$1)\", 10)\n\tif err != nil {\n\t\tt.Fatalf(\"conn.Query failed: %v\", err)\n\t}\n\n\tok := rows.Next()\n\tif !ok {\n\t\tt.Fatal(\"rows.Next terminated early\")\n\t}\n\n\tvar n int32\n\trows.Scan(&n)\n\tif n != 1 {\n\t\tt.Fatalf(\"Expected 1 from first row, but got %v\", n)\n\t}\n\n\trows.Close()\n\n\tensureConnValid(t, conn)\n}", "func (ch *clientHandle) Send(q Query) ([]byte, error) {\n\tif nil == ch.queryQueue {\n\t\treturn nil, fmt.Errorf(\"ClientHandle has been closed\")\n\t}\n\tch.queryQueue <- query{Query: q, response: ch.response}\n\tres := <-ch.response\n\treturn res.data, res.err\n}", "func send(c *Conn, cmd string) (string, error) {\n\t_, err := sendFull(c, []byte(cmd))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t//wait for response\n\tresp, err := c.bufReader.ReadString('\\n')\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn resp, nil\n}", "func client(serverIp string, serverPort string) {\n //TCPAddr\n tcpAddr, err := net.ResolveTCPAddr(\"tcp\", serverIp + serverPort)\n checkErrorClient(err)\n\n //TCPConn\n conn, err := net.DialTCP(\"tcp\", nil, tcpAddr)\n checkErrorClient(err)\n reader := bufio.NewReader(os.Stdin)\n buf := make([]byte, SendBufferSize)\n\n for {\n readTotal, err := reader.Read(buf)\n if err != nil {\n if err != io.EOF {\n checkErrorClient(err)\n }\n break\n }\n _, err = conn.Write(buf[:readTotal])\n checkErrorClient(err)\n }\n\n checkErrorClient(err)\n os.Exit(0)\n}", "func (client *Client) SendRaw(msg string) (string, error) {\n\t// ensure connection is established\n\tif err := client.setupConnection(); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tbuffer := prepareMessage(msg)\n\n\tif client.RawQueryPrinter != nil {\n\t\tclient.RawQueryPrinter(msg, true)\n\t}\n\tresponse, err := client.sendAndReceive(buffer)\n\tif err != nil {\n\t\tif client.NoAutoRetry {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tif client.InnerErrorPrinter != nil {\n\t\t\tclient.InnerErrorPrinter(fmt.Errorf(\"query failed: %s\", err))\n\t\t}\n\n\t\t// try re-establishing lost connection once\n\t\tif client.connection != nil {\n\t\t\t// ignore close errors (connection will be discarded anyway)\n\t\t\tclient.closeConnection()\n\t\t}\n\t\tif err := client.setupConnection(); err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"failed to restore lost connection: %s\", err.Error())\n\t\t}\n\t\t// restore authenticated session if it existed before\n\t\tif len(client.lastUser) > 0 && len(client.lastPass) > 0 {\n\t\t\tif err := client.Login(client.lastUser, client.lastPass); err != nil {\n\t\t\t\treturn \"\", fmt.Errorf(\"failed to restore session: %s\", err.Error())\n\t\t\t}\n\t\t}\n\t\t// retry sending request once\n\t\tif client.RawQueryPrinter != nil {\n\t\t\tclient.RawQueryPrinter(msg, true)\n\t\t}\n\t\tresponse, err = client.sendAndReceive(buffer)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\n\tif client.RawQueryPrinter != nil {\n\t\tclient.RawQueryPrinter(response, false)\n\t}\n\treturn response, nil\n}", "func sendOutgoing(conn net.Conn, message []byte) {\n numWritten, err := conn.Write(message)\n\n if err != nil {\n fmt.Println(\"Error writing outgoing message: \", err.Error())\n os.Exit(1)\n }\n\n if numWritten != len(message) {\n fmt.Println(\"Could not write out the full message.\")\n }\n}", "func main() {\n sendData := []byte(\"HEAD / HTTP/1.0\\r\\n\\r\\n\")\n\n target := os.Args[1]\n\n tcpAddr, err := net.ResolveTCPAddr(\"tcp4\", target + \":80\")\n checkError(err)\n\n conn, err := net.DialTCP(\"tcp\", nil, tcpAddr)\n checkError(err)\n\n _, err = conn.Write(sendData)\n checkError(err)\n\n result, err := ioutil.ReadAll(conn)\n checkError(err)\n\n fmt.Println(string(result))\n\n os.Exit(0)\n}", "func (t *TCPData) Flush(sock mangos.Socket) bool {\n msg := t.Marshal()\n t.Events = nil\n\n if sock == nil {\n return false\n }\n if err := sock.Send(msg); err != nil {\n log.Printf(\"Cannot push message on socket: %s\", err.Error())\n return false\n }\n return true\n}", "func (s *Service) serve(conn *net.TCPConn) {\n\tdefer conn.Close()\n\tdefer s.waitGroup.Done()\n\tfor {\n\n\t\tselect {\n\t\tcase <-s.ch:\n\t\t\tlog.Println(\"disconnecting\", conn.RemoteAddr())\n\t\t\treturn\n\t\tdefault:\n\t\t}\n\n\t\tconn.SetDeadline(time.Now().Add(10*1e9))\n\n\t\tbuf := make([]byte, 4096)\n\n\t\tn, err := conn.Read(buf)\n\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Cannot read from buffer: %v\\r\\n\", err)\n\n\t\t}\n\n\t\ts := strings.Trim(string(buf[:n]), \" \\r\\n\")\n\n\t\t// Parse the command\n\t\tcode, response := ParseCommand(s)\n\n\t\t// QUIT command was issued\n\t\tif code == -1 { break }\n\n\t\t// Write response\n\t\t_, err = conn.Write([]byte(response))\n\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Error writing buffer: %v\\r\\n\", err)\n\t\t}\n\t}\n}", "func RawRequest(sock, data string, timeout time.Duration) (reply string, err error) {\n\n\t// Connect to the socket.\n\taddr, err := net.ResolveUnixAddr(\"unix\", sock)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tconn, err := net.DialUnix(\"unix\", nil, addr)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer conn.Close()\n\tdeadline := time.Now().Add(timeout)\n\tif err := conn.SetWriteDeadline(deadline); err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// Do write inline.\n\tn, errWrite := conn.Write([]byte(data))\n\tif errWrite == nil && n != len(data) {\n\t\terrWrite = fmt.Errorf(\"Short write.\")\n\t\treturn \"\", errWrite\n\t}\n\n\tvar errRead error\n\twg := sync.WaitGroup{}\n\twg.Add(1) // Background Read\n\n\t// Read from the client in the background.\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tdeadline := time.Now().Add(timeout)\n\t\tconn.SetReadDeadline(deadline)\n\t\t// Read the response from the client.\n\t\tvar bytes []byte\n\t\tbytes, errRead = ioutil.ReadAll(conn)\n\t\treply = string(bytes)\n\t}()\n\n\twg.Wait()\n\tswitch {\n\tcase errRead != nil:\n\t\terr = errRead\n\tcase !strings.HasPrefix(reply, \"REQUEST OK\\n\"):\n\t\terr = fmt.Errorf(\"Request failed: %s\", reply)\n\t}\n\n\treturn\n}", "func (mb *tcpTransporter) Send(aduRequest []byte) (aduResponse []byte, err error) {\n\tmb.mu.Lock()\n\tdefer mb.mu.Unlock()\n\n\tvar data [tcpMaxLength]byte\n\trecoveryDeadline := time.Now().Add(mb.IdleTimeout)\n\n\tfor {\n\t\t// Establish a new connection if not connected\n\t\tif err = mb.connect(); err != nil {\n\t\t\treturn\n\t\t}\n\n\t\t// Set timer to close when idle\n\t\tmb.lastActivity = time.Now()\n\t\tmb.startCloseTimer()\n\t\t// Set write and read timeout\n\t\tvar timeout time.Time\n\t\tif mb.Timeout > 0 {\n\t\t\ttimeout = mb.lastActivity.Add(mb.Timeout)\n\t\t}\n\t\tif err = mb.conn.SetDeadline(timeout); err != nil {\n\t\t\treturn\n\t\t}\n\t\t// Send data\n\t\tmb.logf(\"modbus: send % x\", aduRequest)\n\t\tif _, err = mb.conn.Write(aduRequest); err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tmb.lastAttemptedTransactionID = binary.BigEndian.Uint16(aduRequest)\n\t\tvar res readResult\n\t\taduResponse, res, err = mb.readResponse(aduRequest, data[:], recoveryDeadline)\n\t\tswitch res {\n\t\tcase readResultDone:\n\t\t\tif err == nil {\n\t\t\t\tmb.lastSuccessfulTransactionID = binary.BigEndian.Uint16(aduResponse)\n\t\t\t}\n\t\t\treturn\n\t\tcase readResultRetry:\n\t\t\tcontinue\n\t\t}\n\n\t\tmb.logf(\"modbus: close connection and retry, because of %v\", err)\n\n\t\tmb.close()\n\t\ttime.Sleep(mb.LinkRecoveryTimeout)\n\t}\n}", "func (q *query) sendResponse(data []byte, err error) {\n\tq.response <- queryResponse{data: data, err: err}\n}", "func waitTCP(addr string) {\n\tlog.Printf(\"Waiting for TCP to be available at %s\", addr)\n\t// Try once a second to connect\n\tfor startTime := time.Now(); time.Since(startTime) < 10*time.Second; time.Sleep(time.Second) {\n\t\tconn, err := net.DialTimeout(\"tcp\", addr, time.Second)\n\n\t\tif err == nil {\n\t\t\t// Connection successful\n\t\t\tlog.Printf(\"TCP came up on %s\", addr)\n\t\t\tcloseErr := conn.Close()\n\t\t\tif closeErr != nil {\n\t\t\t\tlog.Printf(\"Error closing TCP connection in waitTCP: %s\", closeErr)\n\t\t\t}\n\n\t\t\treturn\n\t\t}\n\n\t\tlog.Printf(\"Tried to connect to %s, got error: %s. Will retry in 1 second.\", addr, err)\n\t}\n\n\t// Timed out\n\tpanic(fmt.Sprintf(\"Timeout out waiting for service to start on %s\", addr))\n}", "func hostDockerQuery() {\n\tlog.Println(\"hostDockerQuery\")\n\tfor {\n\t\ttime.Sleep(2 * time.Second)\n\t\tc, err := net.Dial(\"unix\", \"/var/run/dockerConnection/hostconnection.sock\")\n\t\tif err != nil {\n\t\t\tcontinue;\n\t\t}\n\t\t// send to socket\n\t\tlog.Println(\"sending request to server\")\n\t\tfmt.Fprintf(c, \"hi\" + \"\\n\")\n\t\t// listen for reply\n\t\tmessage, _ := bufio.NewReader(c).ReadString('\\n')\n\t\t//log.Println(\"Message from server: \" + message)\n\t\tlog.Println(\"Received update from host server\")\n\n\t\t// set this to be the latest response\n\t\tlatestHostServerResponse = message\n\t}\n}", "func waitTCPDown(addr string) {\n\tlog.Printf(\"Waiting for TCP to be down at %s\", addr)\n\t// Try once a second to connect\n\tfor startTime := time.Now(); time.Since(startTime) < 10*time.Second; time.Sleep(time.Second) {\n\t\tconn, err := net.DialTimeout(\"tcp\", addr, time.Second)\n\n\t\tif err != nil {\n\t\t\t// Connection failed\n\t\t\tlog.Printf(\"TCP went down on %s\", addr)\n\t\t\treturn\n\t\t}\n\n\t\tcloseErr := conn.Close()\n\t\tif closeErr != nil {\n\t\t\tlog.Printf(\"Error closing TCP connection in waitTCP: %s\", closeErr)\n\t\t}\n\n\t\tlog.Printf(\"Tried to connect to %s, was successful. Will retry in 1 second.\", addr)\n\t}\n\n\t// Timed out\n\tpanic(fmt.Sprintf(\"Timeout out waiting for service to stop on %s\", addr))\n}", "func Send(tsx TsxServer, cmd string, cmdName string) string {\n\n\t// connect to this socket\n\n\tsrv := fmt.Sprintf(\"%s:%d\", tsx.Addr, tsx.Port)\n\tconn, err := net.Dial(\"tcp\", srv)\n\tif err != nil {\n\t\tfmt.Printf(\"Try tcp to server: %s\\n\", srv)\n\t\tfmt.Printf(\"error: %s\\n\", err)\n\t\tpanic(\"We have no connection !\")\n\t}\n\tdefer conn.Close()\n\n\t// send to socket\n\tfmt.Printf(\"Get status from TheSkyX: %s\\n\", cmdName)\n\tconn.Write([]byte(cmd))\n\n\t// listen for reply\n\tret := listenReply(conn)\n\n\treturn string(ret)\n}", "func (t *tcpHandler) send(msg []byte) error {\n\tfor {\n\t\tfor t.conn == nil {\n\t\t\tt.logFields.Info(\"attempting to connect with receiver\")\n\t\t\tconn, err := net.Dial(\"tcp\", t.addr)\n\t\t\tif err != nil {\n\t\t\t\tt.logFields.Error(err, \"retrying in \", retryTime.String())\n\t\t\t\ttime.Sleep(retryTime)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tt.logFields.Info(\"conntected\")\n\t\t\tt.conn = conn\n\t\t}\n\n\t\tt.logFields.Debug(\"sending message\")\n\n\t\t_, err := writeFrame(msg, t.conn)\n\t\tif err != nil {\n\t\t\tt.conn.Close()\n\t\t\tt.conn = nil\n\t\t\t// TODO: handle client disconnected but conn exists\n\t\t\tt.logFields.Error(err, \"retrying in \", retryTime.String())\n\t\t\ttime.Sleep(retryTime)\n\t\t\tcontinue\n\t\t}\n\n\t\tt.logFields.Debug(\"wating for acknowledge signal\")\n\t\tn, err := readFrame(t.buf[:], t.conn)\n\t\tif err != nil {\n\t\t\tt.conn.Close()\n\t\t\tt.conn = nil\n\t\t\tt.logFields.Error(err, \"retrying in \", retryTime.String())\n\t\t\ttime.Sleep(retryTime)\n\t\t\tcontinue\n\t\t}\n\n\t\tresponse := string(t.buf[0:n])\n\t\tif response != \"OK\" {\n\t\t\tt.conn.Close()\n\t\t\tt.conn = nil\n\t\t\tt.logFields.Error(\"did not acknowledge. responded:\", response, \"retrying in \", retryTime.String())\n\t\t\ttime.Sleep(retryTime)\n\t\t\tcontinue\n\t\t}\n\t\tt.logFields.Info(\"message sent\")\n\t\treturn nil\n\t}\n}", "func (c *TcpClient) Call(serviceMethod string, args []string) (reply interface{}, err error) {\n\t//jsonargs, _ := json.Marshal(args)\n\t//rawargs := json.RawMessage(jsonargs)\n\tparams := make([]interface{}, len(args))\n\tfor i, v := range args {\n\t\tparams[i] = v\n\t}\n\tr := types.StratumRequest{Method: serviceMethod, Params: params}\n\t//r := types.StratumRequest{Method: serviceMethod, Params: rawargs}\n\n\tc.seqmutex.Lock()\n\tc.seq++\n\tr.ID = c.seq\n\tc.seqmutex.Unlock()\n\n\trawmsg, err := json.Marshal(r)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"json.Marshal failed: %v\", err)\n\t\treturn\n\t}\n\tcall := c.registerRequest(r.ID)\n\tdefer c.cancelRequest(r.ID)\n\n\trawmsg = append(rawmsg, []byte(\"\\n\")...)\n\tc.mu.Lock()\n\tif c.connected {\n\t\t_, err = c.socket.Write(rawmsg)\n\t} else {\n\t\terr = fmt.Errorf(\"Can't write to socket, socket has been closed\")\n\t\treturn nil, err\n\t}\n\tc.mu.Unlock()\n\tif err != nil {\n\t\terr = fmt.Errorf(\"socket.Write failed: %v\", err)\n\t\treturn\n\t}\n\t//Make sure the request is cancelled if no response is given\n\tgo func() {\n\t\t// cancel after 10 seconds\n\t\tfor timeElapsed := 0; timeElapsed < 10; timeElapsed += 1 {\n\t\t\t// cancel the request if we've called stop\n\t\t\tselect {\n\t\t\tcase <-c.tg.StopChan():\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t\ttime.Sleep(1 * time.Second)\n\t\t\t}\n\t\t}\n\t\tc.cancelRequest(r.ID)\n\t}()\n\treply = <-call\n\n\tif reply == nil {\n\t\terr = errors.New(\"Timeout\")\n\t\treturn\n\t}\n\terr, _ = reply.(error)\n\treturn\n}", "func (client *Client) Send(line string) error {\n\tclient.mutex.RLock()\n\tconn := client.conn\n\tclient.mutex.RUnlock()\n\n\tif conn == nil {\n\t\treturn ErrNoConnection\n\t}\n\n\tif !strings.HasSuffix(line, \"\\n\") {\n\t\tline += \"\\r\\n\"\n\t}\n\n\t_ = conn.SetWriteDeadline(time.Now().Add(time.Second * 30))\n\t_, err := conn.Write([]byte(line))\n\tif err != nil {\n\t\tclient.EmitNonBlocking(NewErrorEvent(\"write\", \"Write failed:\"+err.Error(), \"connect_failed\", err))\n\t\t_ = client.Disconnect(false)\n\t}\n\n\treturn err\n}", "func (mc *MockConn) Write(b []byte) (n int, err error) {\n\tif mc.closed {\n\t\treturn 0, errors.New(\"Connection closed.\")\n\t}\n\n\tdata := make([]byte, len(b))\n\tcopy(data, b)\n\tmc.sendChan <- data\n\treturn len(b), nil\n}", "func (mb *tcpTransporter) flush(b []byte) (err error) {\n\tif err = mb.conn.SetReadDeadline(time.Now()); err != nil {\n\t\treturn\n\t}\n\t// Timeout setting will be reset when reading\n\tif _, err = mb.conn.Read(b); err != nil {\n\t\t// Ignore timeout error\n\t\tif netError, ok := err.(net.Error); ok && netError.Timeout() {\n\t\t\terr = nil\n\t\t}\n\t}\n\treturn\n}", "func (t *tcp) close() error {\n\tif !t.isopen {\n\t\treturn nil\n\t}\n\tt.isopen = false\n\t// closing this channel means that anyone readong from the channel is auto-selected in a Select statement\n\tclose(t.closed)\n\tt.conn.Close()\n\treturn nil\n}", "func TestSendFailed(t *testing.T) {\n\tdoh, _ := NewTransport(testURL, ips, nil, nil, nil)\n\ttransport := doh.(*transport)\n\trt := makeTestRoundTripper()\n\ttransport.client.Transport = rt\n\n\trt.err = errors.New(\"test\")\n\t_, err := doh.Query(simpleQueryBytes)\n\tvar qerr *queryError\n\tif err == nil {\n\t\tt.Error(\"Send failure should be reported\")\n\t} else if !errors.As(err, &qerr) {\n\t\tt.Errorf(\"Wrong error type: %v\", err)\n\t} else if qerr.status != SendFailed {\n\t\tt.Errorf(\"Wrong error status: %d\", qerr.status)\n\t} else if !errors.Is(qerr, rt.err) {\n\t\tt.Errorf(\"Underlying error is not retained\")\n\t}\n}", "func TestShortQuery(t *testing.T) {\n\tvar qerr *queryError\n\tdoh, _ := NewTransport(testURL, ips, nil, nil, nil)\n\t_, err := doh.Query([]byte{})\n\tif err == nil {\n\t\tt.Error(\"Empty query should fail\")\n\t} else if !errors.As(err, &qerr) {\n\t\tt.Errorf(\"Wrong error type: %v\", err)\n\t} else if qerr.status != BadQuery {\n\t\tt.Errorf(\"Wrong error status: %d\", qerr.status)\n\t}\n\n\t_, err = doh.Query([]byte{1})\n\tif err == nil {\n\t\tt.Error(\"One byte query should fail\")\n\t} else if !errors.As(err, &qerr) {\n\t\tt.Errorf(\"Wrong error type: %v\", err)\n\t} else if qerr.status != BadQuery {\n\t\tt.Errorf(\"Wrong error status: %d\", qerr.status)\n\t}\n}", "func (r *Receiver) sendResponse(packet *ptp.SyncDelayReq, sourceIP string, rawPacket gopacket.Packet) error {\n\tdst, err := net.ResolveIPAddr(\"ip6:ipv6-icmp\", sourceIP)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to resolve sender address: %w\", err)\n\t}\n\tconn, err := net.ListenPacket(\"ip6:ipv6-icmp\", \"\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to establish connection: %w\", err)\n\t}\n\tdefer conn.Close()\n\n\tmess := icmp.Message{\n\t\tType: ipv6.ICMPTypeTimeExceeded, Code: 0,\n\t\tBody: &icmp.RawBody{\n\t\t\tData: rawPacket.Data()[PTPUnusedSize:],\n\t\t},\n\t}\n\tbuf, err := mess.Marshal(nil)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to marshal the icmp packet: %w\", err)\n\t}\n\tif _, err := conn.WriteTo(buf, dst); err != nil {\n\t\treturn fmt.Errorf(\"unable to write to connection: %w\", err)\n\t}\n\treturn nil\n}", "func (c *client) SocketClosed() {\n}", "func (r *Request) Send() (int64, error) {\n\tvar err error\n\tif r.conn == nil {\n\t\tr.conn, err = net.Dial(\"tcp\", r.Addr)\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t}\n\tmsg := append(netstring(r.Header), r.Body...)\n\treturn io.Copy(r.conn, bytes.NewReader(msg))\n}", "func (s *Socket) Send(v interface{}) {\n\n\tif s.closed || s.lost {\n\t\treturn\n\t}\n\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tlogrus.Warnf(\"Recovered from panic in socket Send. %v\", r)\n\t\t\treturn\n\t\t}\n\t}()\n\n\ts.mutex.Lock()\n\ts.conn.SetWriteDeadline(time.Now().Add(writeWait))\n\tif err := s.conn.WriteJSON(v); err != nil {\n\t\tlogrus.Debug(err)\n\t}\n\ts.mutex.Unlock()\n\n}", "func TCP(address string) bool {\n\tconn, err := net.DialTimeout(\"tcp\", address, timeout)\n\tif err != nil {\n\t\treturn false\n\t}\n\tconn.Close()\n\treturn true\n}", "func (c *Conn) write(t int, buf []byte) error {\n\tc.ws.SetWriteDeadline(time.Now().Add(c.WriteTimeout * time.Second))\n\treturn c.ws.WriteMessage(t, buf)\n}", "func (c *Conn) Write(b []byte) (int, error) {\n\terr := c.Conn.SetWriteDeadline(time.Now().Add(c.WriteTimeout))\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn c.Conn.Write(b)\n}", "func TestServerShortLivedConn(t *testing.T) {\n\tserver := newTestServer()\n\tdefer server.Stop()\n\n\tlistener, err := net.Listen(\"tcp\", \"127.0.0.1:0\")\n\tif err != nil {\n\t\tt.Fatal(\"can't listen:\", err)\n\t}\n\tdefer listener.Close()\n\tgo server.ServeListener(listener)\n\n\tvar (\n\t\trequest = `{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"rpc_modules\"}` + \"\\n\"\n\t\twantResp = `{\"jsonrpc\":\"2.0\",\"id\":1,\"result\":{\"nftest\":\"1.0\",\"rpc\":\"1.0\",\"test\":\"1.0\"}}` + \"\\n\"\n\t\tdeadline = time.Now().Add(10 * time.Second)\n\t)\n\tfor i := 0; i < 20; i++ {\n\t\tconn, err := net.Dial(\"tcp\", listener.Addr().String())\n\t\tif err != nil {\n\t\t\tt.Fatal(\"can't dial:\", err)\n\t\t}\n\t\tdefer conn.Close()\n\t\tconn.SetDeadline(deadline)\n\t\t// Write the request, then half-close the connection so the server stops reading.\n\t\tconn.Write([]byte(request))\n\t\tconn.(*net.TCPConn).CloseWrite()\n\t\t// Now try to get the response.\n\t\tbuf := make([]byte, 2000)\n\t\tn, err := conn.Read(buf)\n\t\tif err != nil {\n\t\t\tt.Fatal(\"read error:\", err)\n\t\t}\n\t\tif !bytes.Equal(buf[:n], []byte(wantResp)) {\n\t\t\tt.Fatalf(\"wrong response: %s\", buf[:n])\n\t\t}\n\t}\n}", "func TestDialTCP(t *testing.T) {\n\tt.Logf(\"Running DialTCP test to %s:%s\", TEST_HOST, TEST_PORT)\n\tdb, err = DialTCP(TEST_HOST, TEST_USER, TEST_PASSWD, TEST_DBNAME)\n\tif err != nil {\n\t\tt.Logf(\"Error %s\", err)\n\t\tt.Fail()\n\t}\n\terr = db.Close()\n\tif err != nil {\n\t\tt.Logf(\"Error %s\", err)\n\t\tt.Fail()\n\t}\n}", "func (my *MySQL) Ping() (err os.Error) {\n defer my.unlock()\n defer catchOsError(&err)\n my.lock()\n\n if my.conn == nil {\n return NOT_CONN_ERROR\n }\n if my.unreaded_rows {\n return UNREADED_ROWS_ERROR\n }\n\n // Send command\n my.sendCmd(_COM_PING)\n // Get server response\n my.getResult(nil)\n\n return\n}", "func handleConnection(c net.Conn) {\n\tconnectionId := c.RemoteAddr().String()\n\tlog.Printf(\"ACCEPT[%s]\\n\", connectionId)\n\n\t// Prepare and start a timer to send requests to the client\n\tticker := time.NewTicker(5000 * time.Millisecond)\n\tdone := make(chan bool)\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-done:\n\t\t\t\treturn\n\t\t\tcase <-ticker.C:\n\t\t\t\t// TODO Make request type pluggable\n\t\t\t\tsendRequest(c, \"0120\")\n\t\t\t}\n\t\t}\n\t}()\n\n\t// Read request from client\n\tfor {\n\t\tnetData, err := bufio.NewReader(c).ReadString('\\n')\n\t\tif err != nil {\n\t\t\tlog.Printf(\"CLOSED[%s]\\n\", connectionId)\n\t\t\tticker.Stop()\n\t\t\tdone <- true\n\t\t\tbreak\n\t\t}\n\n\t\t// Process client request and send response\n\t\trequest := strings.TrimSpace(string(netData))\n\t\tlog.Printf(\"RECV[%s]: length=%d, request=%s\\n\", connectionId, len(request), request)\n\t\tresponse := handleRequest(request)\n\t\tresponse = fmt.Sprintf(\"%s\\n\", response)\n\t\tlog.Printf(\"SEND[%s]: length=%d, response=%s\", connectionId, len(response), response)\n\n\t\tc.Write([]byte(string(response)))\n\t}\n\tc.Close()\n}", "func (c *conn) Send(cmd string, args ...interface{}) error {\n\terr := c.conn.Send(cmd, args...)\n\tc.pending++\n\treturn err\n}", "func handleConnection(conn *net.TCPConn) {\n\tdefer conn.Close() // clean up when done\n\n\tbuf := make([]byte, 1024)\n\n\tn, err := conn.Read(buf)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\n\t// echo buffer\n\tw, err := conn.Write(buf[:n])\n\tif err != nil {\n\t\tfmt.Println(\"failed to write to client:\", err)\n\t\treturn\n\t}\n\tif w != n { // was all data sent\n\t\tfmt.Println(\"warning: not all data sent to client\")\n\t\treturn\n\t}\n}", "func TestAcceptOversize(t *testing.T) {\n\tdoh := newFakeTransport()\n\tclient, server := makePair()\n\n\t// Start the forwarder running.\n\tgo Accept(doh, server)\n\n\tlbuf := make([]byte, 2)\n\t// Send Query\n\tqueryData := simpleQueryBytes\n\tbinary.BigEndian.PutUint16(lbuf, uint16(len(queryData)))\n\tclient.Write(lbuf)\n\tclient.Write(queryData)\n\n\t// Read query\n\t<-doh.query\n\n\t// Send oversize response\n\tdoh.response <- make([]byte, 65536)\n\n\t// Accept should have closed the socket because the response\n\t// cannot be written.\n\tn, _ := client.Read(lbuf)\n\tif n != 0 {\n\t\tt.Error(\"Expected to read 0 bytes\")\n\t}\n}", "func (d *DaemonClient) sendAndWaitForStream(req []byte, consumer func(io.Reader) error) error {\n\tvar conn net.Conn\n\tvar err error\n\tbaseCmd := command.BaseCommand{}\n\terr = json.Unmarshal(req, &baseCmd)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Failed to unmarshal command\")\n\t}\n\tconn, err = net.DialTimeout(\"tcp\", fmt.Sprintf(\"%s:%d\", \"127.0.0.1\", d.daemonServerListenPort), time.Second*30)\n\tif err != nil {\n\t\treturn errors.Wrap(err, fmt.Sprintf(\"%s failed to dial to daemon\", baseCmd.CommandType))\n\t}\n\tdefer conn.Close()\n\tif _, err = conn.Write(req); err != nil {\n\t\treturn errors.Wrap(err, fmt.Sprintf(\"%s failed to write to daemon\", baseCmd.CommandType))\n\t}\n\tcw, ok := conn.(interface{ CloseWrite() error })\n\tif !ok {\n\t\treturn errors.New(fmt.Sprintf(\"%s failed to close write to daemon server\", baseCmd.CommandType))\n\t}\n\tlog.WrapAndLogE(cw.CloseWrite())\n\tif consumer != nil {\n\t\treturn consumer(conn)\n\t}\n\treturn nil\n}", "func sendRequest(c net.Conn, mti string) {\n\tconnectionId := c.RemoteAddr().String()\n\n\tpayload := createRequest(mti)\n\trequest := fmt.Sprintf(\"%s:%s\\n\", mti, payload)\n\n\tlog.Printf(\"SEND[%s] length=%d, request=%s\", connectionId, len(request), request)\n\tc.Write([]byte(string(request)))\n}", "func sendCommand(command CommandRequest) *CommandResponse {\n conn, err := net.Dial(\"tcp\", \"127.0.0.1:5000\")\n if err != nil {\n LOG[ERROR].Println(StatusText(StatusConnectionError), err, \"retrying...\")\n // Sleep to allow some time for new master startup\n time.Sleep(5 * time.Second)\n conn, err = net.Dial(\"tcp\", \"127.0.0.1:5000\")\n }\n if err != nil {\n LOG[ERROR].Println(StatusText(StatusConnectionError), err)\n return nil\n }\n defer conn.Close()\n\n encoder := gob.NewEncoder(conn)\n err = encoder.Encode(command)\n if err != nil {\n LOG[ERROR].Println(StatusText(StatusEncodeError), err)\n return nil\n }\n\n var response CommandResponse\n decoder := gob.NewDecoder(conn)\n err = decoder.Decode(&response)\n if err != nil {\n LOG[ERROR].Println(StatusText(StatusDecodeError), err)\n return nil\n }\n return &response\n}", "func (t *DNSOverTCPTransport) RoundTrip(\n\tctx context.Context, query model.DNSQuery) (model.DNSResponse, error) {\n\t// TODO(bassosimone): this method should more strictly honour the context, which\n\t// currently is only used to bound the dial operation\n\trawQuery, err := query.Bytes()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(rawQuery) > math.MaxUint16 {\n\t\treturn nil, errQueryTooLarge\n\t}\n\tconn, err := t.dial(ctx, \"tcp\", t.address)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer conn.Close()\n\tconst iotimeout = 10 * time.Second\n\tconn.SetDeadline(time.Now().Add(iotimeout))\n\t// Write request\n\tbuf := []byte{byte(len(rawQuery) >> 8)}\n\tbuf = append(buf, byte(len(rawQuery)))\n\tbuf = append(buf, rawQuery...)\n\tif _, err = conn.Write(buf); err != nil {\n\t\treturn nil, err\n\t}\n\t// Read response\n\theader := make([]byte, 2)\n\tif _, err = io.ReadFull(conn, header); err != nil {\n\t\treturn nil, err\n\t}\n\tlength := int(header[0])<<8 | int(header[1])\n\trawResponse := make([]byte, length)\n\tif _, err = io.ReadFull(conn, rawResponse); err != nil {\n\t\treturn nil, err\n\t}\n\treturn t.decoder.DecodeResponse(rawResponse, query)\n}", "func (c *conn) Close() error {\n\tklog.V(4).Infoln(\"closing connection\")\n\tif c.closeTunnel != nil {\n\t\tdefer c.closeTunnel()\n\t}\n\n\tvar req *client.Packet\n\tif c.connID != 0 {\n\t\treq = &client.Packet{\n\t\t\tType: client.PacketType_CLOSE_REQ,\n\t\t\tPayload: &client.Packet_CloseRequest{\n\t\t\t\tCloseRequest: &client.CloseRequest{\n\t\t\t\t\tConnectID: c.connID,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t} else {\n\t\t// Never received a DIAL response so no connection ID.\n\t\treq = &client.Packet{\n\t\t\tType: client.PacketType_DIAL_CLS,\n\t\t\tPayload: &client.Packet_CloseDial{\n\t\t\t\tCloseDial: &client.CloseDial{\n\t\t\t\t\tRandom: c.random,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t}\n\n\tklog.V(5).InfoS(\"[tracing] send req\", \"type\", req.Type)\n\n\tif err := c.tunnel.Send(req); err != nil {\n\t\treturn err\n\t}\n\n\tselect {\n\tcase errMsg := <-c.closeCh:\n\t\tif errMsg != \"\" {\n\t\t\treturn errors.New(errMsg)\n\t\t}\n\t\treturn nil\n\tcase <-time.After(CloseTimeout):\n\t}\n\n\treturn errConnCloseTimeout\n}", "func BenchmarkDialTCP(b *testing.B) {\n\tfor i := 0; i < b.N; i++ {\n\t\tDialTCP(TEST_HOST, TEST_USER, TEST_PASSWD, TEST_DBNAME)\n\t}\n}", "func sendTcpMsmtStopRequest(addr string, port int, callSize int) {\n\ttcpObj := clientProtos.NewTcpObj(\"TcpThroughputMsmtStopReqConn\", addr, port, callSize)\n\n\treqDataObj := new(shared.DataObj)\n\treqDataObj.Type = shared.MEASUREMENT_STOP_REQUEST\n\n\tif val, ok := idStorage[\"host-uuid\"]; ok {\n\t\treqDataObj.Id = val\n\t} else {\n\t\tfmt.Println(\"\\nFound not the id\")\n\t}\n\n\tseqNo++\n\treqDataObj.Seq = strconv.FormatUint(seqNo, 10)\n\treqDataObj.Secret = \"fancySecret\"\n\n\tif val, ok := msmtIdStorage[\"tcp-throughput1\"]; ok {\n\t\treqDataObj.Measurement_id = val\n\t} else {\n\t\tfmt.Println(\"\\nFound not the measurement id for tcp throughput\")\n\t}\n\n\treqJson := shared.ConvDataStructToJson(reqDataObj)\n\t// debug fmt.Printf(\"\\nmsmt stop request JSON is: % s\", reqJson)\n\n\tmsmtStopRep := tcpObj.StopMeasurement(reqJson)\n\tprepareOutput(msmtStopRep)\n}", "func (s *MockStream) SendRequest(req *envoy_service_discovery.DiscoveryRequest) error {\n\tsubCtx, cancel := context.WithTimeout(s.ctx, s.recvTimeout)\n\n\tselect {\n\tcase <-subCtx.Done():\n\t\tcancel()\n\t\tif errors.Is(subCtx.Err(), context.Canceled) {\n\t\t\treturn io.EOF\n\t\t}\n\t\treturn subCtx.Err()\n\tcase s.recv <- req:\n\t\tcancel()\n\t\treturn nil\n\t}\n}", "func (c *Client) Send(data []byte, flush bool) error {\n\tc.instruments.Log(octo.LOGINFO, c.info.UUID, \"udp.Client.Send\", \"Started\")\n\n\tc.instruments.Log(octo.LOGTRANSMISSION, c.info.UUID, \"udp.Client.Send\", \"Started : %q\", string(data))\n\t_, err := c.conn.WriteToUDP(data, c.addr)\n\tc.instruments.Log(octo.LOGTRANSMISSION, c.info.UUID, \"udp.Client.Send\", \"Ended\")\n\n\tc.instruments.NotifyEvent(octo.Event{\n\t\tType: octo.DataWrite,\n\t\tClient: c.info.UUID,\n\t\tServer: c.info.SUUID,\n\t\tLocalAddr: c.info.Local,\n\t\tRemoteAddr: c.info.Remote,\n\t\tData: octo.NewDataInstrument(data, err),\n\t\tDetails: map[string]interface{}{},\n\t})\n\n\tif err != nil {\n\t\tc.instruments.Log(octo.LOGERROR, c.info.UUID, \"udp.Client.Send\", \"Completed : %s\", err.Error())\n\t\treturn err\n\t}\n\n\tc.instruments.Log(octo.LOGINFO, c.info.UUID, \"udp.Client.Send\", \"Completed\")\n\treturn nil\n}", "func (c *queryClient) Send(body interface{}) (io.ReadCloser, error) {\n\n\tjsonBody, err := json.Marshal(body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar reqBody = new(bytes.Buffer)\n\t_, err = reqBody.Write(jsonBody)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp, err := c.client.Post(c.url, \"application/json\", reqBody)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif resp.StatusCode == 200 {\n\t\treturn resp.Body, nil\n\t}\n\n\t// decode and return error if the status isn't 200\n\trespBody, err := io.ReadAll(resp.Body)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn nil, errors.New(string(respBody))\n}", "func (c *Client) Send(method byte, arguments []byte) error {\n\tvar d net.Dialer\n\tctx, cancel := context.WithTimeout(context.Background(), time.Minute)\n\tdefer cancel()\n\n\tconn, err := d.DialContext(ctx, \"tcp\", c.Host)\n\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to dial: %v\", err)\n\t}\n\n\tdefer conn.Close()\n\n\tpayload := prepend(arguments, method)\n\tresponse, err := conn.Write(payload)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif response != 0 {\n\t\treturn &CommandFailed{Code: response}\n\t}\n\n\treturn nil\n}", "func TestBadTCPClientConnect(t *testing.T) {\n\tclient, err := clients.New(\"321321321\", \"\")\n\tif err == nil {\n\t\tt.Fatalf(\"Client succeeded to connect\")\n\t}\n\tclient, err = clients.New(testAddr, \"hil\")\n\tif err != nil {\n\t\tt.Fatalf(\"Client failed to connect - %s\", err.Error())\n\t\tt.FailNow()\n\t}\n\tdefer client.Close()\n\n\tif err := client.Ping(); err != nil {\n\t\tt.Fatalf(\"ping failed\")\n\t}\n\tif msg := <-client.Messages(); msg.Data != \"pong\" {\n\t\tt.Fatalf(\"Unexpected data: Expecting 'pong' got %s\", msg.Data)\n\t}\n\tclient.Ping()\n}", "func (c *client) Send(p []byte) error {\n\t_, err := c.conn.Write(p)\n\treturn err\n}", "func TestDisconnect1(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"short\")\n\t}\n\tcheckConnStr(t)\n\tSetLogger(testLogger{t})\n\n\t// Revert to the normal dialer after the test is done.\n\tnormalCreateDialer := createDialer\n\tdefer func() {\n\t\tcreateDialer = normalCreateDialer\n\t}()\n\n\twaitDisrupt := make(chan struct{})\n\tctx, cancel := context.WithTimeout(context.Background(), time.Second*2)\n\tdefer cancel()\n\n\tcreateDialer = func(p *connectParams) dialer {\n\t\tnd := tcpDialer{&net.Dialer{Timeout: p.dial_timeout, KeepAlive: p.keepAlive}}\n\t\tdi := &dialerInterrupt{nd: nd}\n\t\tgo func() {\n\t\t\t<-waitDisrupt\n\t\t\tdi.Interrupt(true)\n\t\t\tdi.Interrupt(false)\n\t\t}()\n\t\treturn di\n\t}\n\tdb, err := sql.Open(\"sqlserver\", makeConnStr(t).String())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif err := db.PingContext(ctx); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer db.Close()\n\n\t_, err = db.ExecContext(ctx, `SET LOCK_TIMEOUT 1800;`)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tgo func() {\n\t\ttime.Sleep(time.Second * 1)\n\t\tclose(waitDisrupt)\n\t}()\n\tt.Log(\"prepare for query\")\n\t_, err = db.ExecContext(ctx, `waitfor delay '00:00:3';`)\n\tif err != nil {\n\t\tt.Log(\"expected error after disconnect\", err)\n\t\treturn\n\t}\n\tt.Fatal(\"wanted error after Exec\")\n}", "func (c *Conn) Write(b []byte) (int, error) {\n\tif !c.ok() {\n\t\treturn 0, syscall.EINVAL\n\t}\n\tpayload := b\n\tif len(payload) > mss {\n\t\tpayload = payload[:mss]\n\t}\n\t_, err := c.sendBuf.Push(payload)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tl, err := c.sendDATA(payload)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn l, nil\n}", "func handleConnection(conn net.Conn) {\n\tdebugSession := fakeDebugSession{\n\t\trw: bufio.NewReadWriter(bufio.NewReader(conn), bufio.NewWriter(conn)),\n\t\tsendQueue: make(chan dap.Message),\n\t\tstopDebug: make(chan struct{}),\n\t}\n\tgo debugSession.sendFromQueue()\n\n\tfor {\n\t\terr := debugSession.handleRequest()\n\t\t// TODO(polina): check for connection vs decoding error?\n\t\tif err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\tlog.Println(\"No more data to read:\", err)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\t// There maybe more messages to process, but\n\t\t\t// we will start with the strict behavior of only accepting\n\t\t\t// expected inputs.\n\t\t\tlog.Fatal(\"Server error: \", err)\n\t\t}\n\t}\n\n\tlog.Println(\"Closing connection from\", conn.RemoteAddr())\n\tclose(debugSession.stopDebug)\n\tdebugSession.sendWg.Wait()\n\tclose(debugSession.sendQueue)\n\tconn.Close()\n}", "func send(conn net.Conn, format string, v ...interface{}) {\n\tdebug(\"> \"+format, v...)\n\n\tfmt.Fprintf(conn, format+\"\\r\\n\", v...)\n\ttime.Sleep(700 * time.Millisecond) // Wait a bit so we don't flood\n}", "func sendQuicMsmtStopRequest(addr string, port int, callSize int) {\n\ttcpObj := clientProtos.NewTcpObj(\"QuicMsmtStopReqConn\", addr, port, callSize)\n\n\treqDataObj := new(shared.DataObj)\n\treqDataObj.Type = shared.MEASUREMENT_STOP_REQUEST\n\n\tif val, ok := idStorage[\"host-uuid\"]; ok {\n\t\treqDataObj.Id = val\n\t} else {\n\t\tfmt.Println(\"\\nFound not the id\")\n\t}\n\n\tseqNo++\n\treqDataObj.Seq = strconv.FormatUint(seqNo, 10)\n\treqDataObj.Secret = \"fancySecret\"\n\n\tif val, ok := msmtIdStorage[\"quic-throughput1\"]; ok {\n\t\treqDataObj.Measurement_id = val\n\t} else {\n\t\tfmt.Println(\"\\nFound not the measurement id for quic throughput\")\n\t}\n\n\treqJson := shared.ConvDataStructToJson(reqDataObj)\n\t// debug fmt.Printf(\"\\nmsmt stop request JSON is: % s\", reqJson)\n\n\tmsmtStopRep := tcpObj.StopMeasurement(reqJson)\n\tprepareOutput(msmtStopRep)\n}", "func (buf replyBuf) send(conn *net.UnixConn, err error) error {\n\treplyToSerialize := reply{\n\t\tSuccess: err == nil,\n\t\tValue: buf.value,\n\t\tPipeID: buf.pipeid,\n\t}\n\tif err != nil {\n\t\treplyToSerialize.Error = err.Error()\n\t}\n\tserializedReply, err := json.Marshal(&replyToSerialize)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// We took ownership of the FD - close it when we're done.\n\tdefer func() {\n\t\tif buf.fd != nil {\n\t\t\tbuf.fd.Close()\n\t\t}\n\t}()\n\t// Copy the FD number to the socket ancillary buffer\n\tfds := make([]int, 0)\n\tif buf.fd != nil {\n\t\tfds = append(fds, int(buf.fd.Fd()))\n\t}\n\toob := syscall.UnixRights(fds...)\n\tn, oobn, err := conn.WriteMsgUnix(serializedReply, oob, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// Validate that we sent the full packet\n\tif n != len(serializedReply) || oobn != len(oob) {\n\t\treturn io.ErrShortWrite\n\t}\n\treturn nil\n}", "func (c *Controller) callAndWait(p []*Packet, checkError bool, f func(*Packet) bool) error {\n\tc.packetConnLock.Lock()\n\tdefer c.packetConnLock.Unlock()\n\n\tcheckSeqs := map[uint16]bool{}\n\tfor _, packet := range p {\n\t\tif seq, err := packet.Seq(); err == nil {\n\t\t\tcheckSeqs[seq] = true\n\t\t}\n\t}\n\n\tconn, err := NewPacketConn()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer conn.Close()\n\n\tsessInfo := c.getSessionInfo()\n\tif err := conn.Auth(sessInfo.UserID, sessInfo.Authorize, c.timeout); err != nil {\n\t\treturn err\n\t}\n\n\t// Prevent the bg thread from blocking on a\n\t// channel send forever.\n\tdoneChan := make(chan struct{}, 1)\n\tdefer close(doneChan)\n\n\tpackets := make(chan *Packet, 16)\n\terrChan := make(chan error, 1)\n\tgo func() {\n\t\tdefer close(packets)\n\t\tfor {\n\t\t\tpacket, err := conn.Read()\n\t\t\tif err != nil {\n\t\t\t\terrChan <- err\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif checkError && packet.IsResponse {\n\t\t\t\tseq, err := packet.Seq()\n\t\t\t\tif err == nil && checkSeqs[seq] && len(packet.Data) > 0 {\n\t\t\t\t\tif packet.Data[len(packet.Data)-1] != 0 {\n\t\t\t\t\t\terrChan <- RemoteCallError\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tselect {\n\t\t\tcase packets <- packet:\n\t\t\tcase <-doneChan:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\tfor _, subPacket := range p {\n\t\tif err := conn.Write(subPacket); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\ttimeout := time.After(c.timeout)\n\tfor {\n\t\tselect {\n\t\tcase packet, ok := <-packets:\n\t\t\tif !ok {\n\t\t\t\t// Could be a race condition between packets and errChan.\n\t\t\t\tselect {\n\t\t\t\tcase err := <-errChan:\n\t\t\t\t\treturn err\n\t\t\t\tdefault:\n\t\t\t\t\treturn errors.New(\"connection closed\")\n\t\t\t\t}\n\t\t\t}\n\t\t\tif f(packet) {\n\t\t\t\treturn nil\n\t\t\t}\n\t\tcase err := <-errChan:\n\t\t\treturn err\n\t\tcase <-timeout:\n\t\t\treturn errors.New(\"timeout waiting for response\")\n\t\t}\n\t}\n}", "func SendCommand(conn *net.TCPConn, address, command string) (resp string, err error) {\n\tdefer color.Unset()\n\n\tresp, err = writeCommand(conn, command)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tcolor.Set(color.FgBlue)\n\tlog.L.Infof(\"Response from device: %s\", resp)\n\treturn resp, nil\n}", "func writeToTCP(sock *net.TCPConn, writeChan chan []byte,\n\tfeedbackFromSocket, feedbackToSocket chan bool) {\n\tloop := 1\n\tfor loop == 1 {\n\t\tselect {\n\t\tcase msg := <-writeChan:\n\t\t\t_, err := sock.Write(msg)\n\t\t\tif err != nil {\n\t\t\t\tselect {\n\t\t\t\tcase feedbackFromSocket <- true:\n\t\t\t\tcase <-feedbackToSocket:\n\t\t\t\t}\n\t\t\t\tloop = 0\n\t\t\t\tcontinue\n\t\t\t}\n\t\tcase <-feedbackToSocket:\n\t\t\tloop = 0\n\t\t\tcontinue\n\t\t}\n\t}\n}", "func ProcessTCPCommands(config *Configuration, connection net.Conn) {\n Log.LogFunctionName()\n defer connection.Close()\n Log.Infof(\"Accepted C&C connection from %s.\", connection.RemoteAddr().String())\n helpString := \">>> Commands: 'status', 'stop', 'restart', 'help', 'version', 'profiles', 'goprocs'.\\n\"\n var error error\n timeout, error := time.ParseDuration(\"30s\")\n if error != nil { Log.Errorf(\"Error parsing duration: %v.\", error) }\n\n var commandBuffer string\n buffer := make([]byte, 256)\n for error == nil {\n connection.SetDeadline(time.Now().Add(timeout))\n var n int\n n, error = connection.Read(buffer)\n if n <= 0 && error != nil { break }\n Log.Debugf(\"Read %d characters.\", n)\n\n commandBuffer += string(buffer[:n])\n index := strings.Index(commandBuffer, \"\\n\")\n for index > -1 && error == nil {\n command := strings.ToLower(commandBuffer[:index])\n command = strings.TrimSpace(command)\n if index < len(commandBuffer)-1 {\n commandBuffer = commandBuffer[index+1:]\n } else {\n commandBuffer = \"\"\n }\n index = strings.Index(commandBuffer, \"\\n\")\n\n Log.Infof(\"C&C command '%s'.\", command)\n switch command {\n case \"hello\":\n _, error = connection.Write([]byte(\">>> Hello.\\n\"))\n case \"version\":\n s := fmt.Sprintf(\">>> Software version %s.\\n\", Util.CompileVersion())\n _, error = connection.Write([]byte(s))\n case \"status\":\n s := fmt.Sprintf(\"%s.\\n\", config.ServerStatusString())\n _, error = connection.Write([]byte(s))\n case \"profiles\":\n s := GetProfileNames()\n _, error = connection.Write([]byte(s))\n case \"goprocs\":\n s := GetGoprocs()\n _, error = connection.Write([]byte(s))\n case \"stop\":\n _, error = connection.Write([]byte(\">>> Stopping.\\n\"))\n myself, _ := os.FindProcess(os.Getpid())\n myself.Signal(syscall.SIGHUP)\n case \"\", \" \", \"\\n\":\n case \"help\", \"?\", \"h\":\n _, error = connection.Write([]byte(helpString))\n default:\n message := fmt.Sprintf(\">>> Unknown command '%s'.\\n\", command)\n _, error = connection.Write([]byte(message))\n if error != nil { break; }\n _, error = connection.Write([]byte(helpString))\n }\n }\n }\n if error != nil {\n Log.Debugf(\"Connection closed with error %v.\", error)\n } else {\n Log.Debugf(\"Connection closed without error.\")\n }\n}", "func (d *Device) ServeTCP(in net.Conn) {\n\tNewSession(in, d).Serve() // conn will be Closed inside\n}", "func (tv TV) send(sharpCommand string, sharpParameter string) (string, error) {\n\tcmdString := fmt.Sprintf(\"%4s%-4s\\r\", sharpCommand, sharpParameter)\n\n\tconnectString := fmt.Sprintf(\"%s:%s\", tv.IP, tv.Port)\n\tconn, err := net.DialTimeout(\"tcp\", connectString, time.Duration(100*time.Millisecond))\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Error connecting to TV: %s\", err)\n\t}\n\tdefer conn.Close()\n\n\tfmt.Fprintf(conn, cmdString)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Error sending command to TV: %s, err\")\n\t}\n\n\tapiResult := make([]byte, 32)\n\tbytesRead, err := conn.Read(apiResult)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Error reading response from TV: Only read in %d bytes:\",\n\t\t\tbytesRead)\n\t} else {\n\t\tresultString := parseResult(apiResult)\n\t\tif resultString == \"ERR\" {\n\t\t\treturn resultString, errors.New(\"Error(ERR) returned by TV in response to command.\")\n\t\t}\n\t\treturn resultString, nil\n\t}\n\n\t// Can we even get here? Hmm...\n\treturn \"\", errors.New(\"BUG: Send() in utils.go fell through to the end. That's not supposed to happen.\")\n}", "func (c *SodaClient) Write(sendMsg string) {\n\tmsg := strings.TrimSpace(sendMsg)\n\n\tbuf := []byte(msg)\n\n\t_, err := c.conn.Write(buf) // returns string length of write and potential write errors\n\n\tif err != nil {\n\t\tfmt.Println(msg, err)\n\t}\n}", "func (t *transport) sendLoop(addr string, reconnectInterval, retryTimeout time.Duration, log SomeLogger) {\n\tvar (\n\t\tsock net.Conn\n\t\terr error\n\t\treconnectC <-chan time.Time\n\t)\n\n\tif reconnectInterval > 0 {\n\t\treconnectTicker := time.NewTicker(reconnectInterval)\n\t\tdefer reconnectTicker.Stop()\n\t\treconnectC = reconnectTicker.C\n\t}\n\nRECONNECT:\n\t// Attempt to connect\n\tsock, err = net.Dial(\"udp\", addr)\n\tif err != nil {\n\t\tlog.Printf(\"[STATSD] Error connecting to server: %s\", err)\n\t\tgoto WAIT\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase buf, ok := <-t.sendQueue:\n\t\t\t// Get a buffer from the queue\n\t\t\tif !ok {\n\t\t\t\t_ = sock.Close() // nolint: gosec\n\t\t\t\tt.shutdownWg.Done()\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif len(buf) > 0 {\n\t\t\t\t// cut off \\n in the end\n\t\t\t\t_, err := sock.Write(buf[0 : len(buf)-1])\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Printf(\"[STATSD] Error writing to socket: %s\", err)\n\t\t\t\t\t_ = sock.Close() // nolint: gosec\n\t\t\t\t\tgoto WAIT\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// return buffer to the pool\n\t\t\tselect {\n\t\t\tcase t.bufPool <- buf:\n\t\t\tdefault:\n\t\t\t\t// pool is full, let GC handle the buf\n\t\t\t}\n\t\tcase <-reconnectC:\n\t\t\t_ = sock.Close() // nolint: gosec\n\t\t\tgoto RECONNECT\n\t\t}\n\t}\n\nWAIT:\n\t// Wait for a while\n\ttime.Sleep(retryTimeout)\n\tgoto RECONNECT\n}", "func (s *Stream) write(b []byte) (n int, err error) {\n\tvar flags uint16\n\tvar max uint32\n\tvar body []byte\nSTART:\n\ts.stateLock.Lock()\n\tswitch s.state {\n\tcase streamLocalClose:\n\t\tfallthrough\n\tcase streamClosed:\n\t\ts.stateLock.Unlock()\n\t\treturn 0, ErrStreamClosed\n\tcase streamReset:\n\t\ts.stateLock.Unlock()\n\t\treturn 0, ErrConnectionReset\n\t}\n\ts.stateLock.Unlock()\n\n\t// If there is no data available, block\n\twindow := atomic.LoadUint32(&s.sendWindow)\n\tif window == 0 {\n\t\tgoto WAIT\n\t}\n\n\t// Determine the flags if any\n\tflags = s.sendFlags()\n\n\t// Send up to our send window\n\tmax = min(window, uint32(len(b)))\n\tbody = b[:max]\n\n\t// Send the header\n\ts.sendHdr.encode(typeData, flags, s.id, max)\n\tif err = s.session.waitForSendErr(s.sendHdr, body, s.sendErr); err != nil {\n\t\treturn 0, err\n\t}\n\n\t// Reduce our send window\n\tatomic.AddUint32(&s.sendWindow, ^uint32(max-1))\n\n\t// Unlock\n\treturn int(max), err\n\nWAIT:\n\tvar timeout <-chan time.Time\n\twriteDeadline := s.writeDeadline.Load().(time.Time)\n\tif !writeDeadline.IsZero() {\n\t\tdelay := writeDeadline.Sub(time.Now())\n\t\ttimeout = time.After(delay)\n\t}\n\tselect {\n\tcase <-s.sendNotifyCh:\n\t\tgoto START\n\tcase <-timeout:\n\t\treturn 0, ErrTimeout\n\t}\n\treturn 0, nil\n}", "func fetchData(lockChan chan bool) {\n tcpAddr, _ := net.ResolveTCPAddr(\"tcp4\", \"localhost:1201\")\n conn, _ := net.DialTCP(\"tcp\", nil, tcpAddr)\n\n // Write the address to the stream\n conn.Write([]byte(\"mfgiXnSzJF6mb37FDorWJeeqeP3tFTERpo\"))\n var buf[255]byte\n\n // Read back the response :-)\n n, err := conn.Read(buf[0:])\n if err!=nil {\n fmt.Println(\"Error reading data\")\n return\n }\n\n fmt.Printf(\"Received: %s\\n\", string(buf[:(n-5)]))\n\n // 'Unlock' the lock chan (thread join)\n lockChan<-true\n\n conn.Close()\n}", "func handleSmppConnection(smsc *Smsc, conn net.Conn) {\n\tsessionId := rand.Int()\n\tsystemId := \"anonymous\"\n\tstopLoop := false\n\n\tdefer delete(smsc.Sessions, sessionId)\n\tdefer conn.Close()\n\n\tfor {\n\t\t// read PDU header\n\t\tpduHeadBuf := make([]byte, 16)\n\t\tif _, err := io.ReadFull(conn, pduHeadBuf); err != nil {\n\t\t\tlog.Printf(\"closing connection for system_id[%s] due %v\\n\", systemId, err)\n\t\t\treturn\n\t\t}\n\t\tcmdLen := binary.BigEndian.Uint32(pduHeadBuf[0:])\n\t\tcmdId := binary.BigEndian.Uint32(pduHeadBuf[4:])\n\t\t// cmdSts := binary.BigEndian.Uint32(pduHeadBuf[8:])\n\t\tseqNum := binary.BigEndian.Uint32(pduHeadBuf[12:])\n\n\t\tvar respBytes []byte\n\n\t\tswitch cmdId {\n\t\tcase BIND_RECEIVER, BIND_TRANSMITTER, BIND_TRANSCEIVER: // bind requests\n\t\t\t{\n\t\t\t\tpduBody := make([]byte, cmdLen-16)\n\t\t\t\tif _, err := io.ReadFull(conn, pduBody); err != nil {\n\t\t\t\t\tlog.Printf(\"closing connection due %v\\n\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\t// find first null terminator\n\t\t\t\tidx := bytes.Index(pduBody, []byte(\"\\x00\"))\n\t\t\t\tif idx == -1 {\n\t\t\t\t\tlog.Printf(\"invalid pdu_body. cannot find system_id. closing connection\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tsystemId = string(pduBody[:idx])\n\t\t\t\tsmsc.Sessions[sessionId] = Session{systemId, conn}\n\t\t\t\tlog.Printf(\"bind request from system_id[%s]\\n\", systemId)\n\n\t\t\t\trespCmdId := 2147483648 + cmdId // hack to calc resp cmd id\n\t\t\t\trespBytes = stringBodyPDU(respCmdId, STS_OK, seqNum, \"smscsim\")\n\t\t\t}\n\t\tcase UNBIND: // unbind request\n\t\t\t{\n\t\t\t\tlog.Printf(\"unbind request from system_id[%s]\\n\", systemId)\n\t\t\t\trespBytes = headerPDU(UNBIND_RESP, STS_OK, seqNum)\n\t\t\t\tstopLoop = true\n\t\t\t}\n\t\tcase ENQUIRE_LINK: // enquire_link\n\t\t\t{\n\t\t\t\tlog.Printf(\"enquire_link from system_id[%s]\\n\", systemId)\n\t\t\t\trespBytes = headerPDU(ENQUIRE_LINK_RESP, STS_OK, seqNum)\n\t\t\t}\n\t\tcase SUBMIT_SM: // submit_sm\n\t\t\t{\n\t\t\t\tpduBody := make([]byte, cmdLen-16)\n\t\t\t\tif _, err := io.ReadFull(conn, pduBody); err != nil {\n\t\t\t\t\tlog.Printf(\"error reading submit_sm body for %s due %v. closing connection\", systemId, err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tlog.Printf(\"submit_sm from system_id[%s]\\n\", systemId)\n\n\t\t\t\tidxCounter := 0\n\t\t\t\tnullTerm := []byte(\"\\x00\")\n\n\t\t\t\tsrvTypeEndIdx := bytes.Index(pduBody, nullTerm)\n\t\t\t\tif srvTypeEndIdx == -1 {\n\t\t\t\t\trespBytes = headerPDU(GENERIC_NACK, STS_INVALID_CMD, seqNum)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tidxCounter = idxCounter + srvTypeEndIdx\n\t\t\t\tidxCounter = idxCounter + 3 // skip src ton and npi\n\n\t\t\t\tsrcAddrEndIdx := bytes.Index(pduBody[idxCounter:], nullTerm)\n\t\t\t\tif srcAddrEndIdx == -1 {\n\t\t\t\t\trespBytes = headerPDU(GENERIC_NACK, STS_INVALID_CMD, seqNum)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tidxCounter = idxCounter + srcAddrEndIdx\n\t\t\t\tidxCounter = idxCounter + 3 // skip dest ton and npi\n\n\t\t\t\tdestAddrEndIdx := bytes.Index(pduBody[idxCounter:], nullTerm)\n\t\t\t\tif destAddrEndIdx == -1 {\n\t\t\t\t\trespBytes = headerPDU(GENERIC_NACK, STS_INVALID_CMD, seqNum)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tidxCounter = idxCounter + destAddrEndIdx\n\t\t\t\tidxCounter = idxCounter + 4 // skip esm_class, protocol_id, priority_flag\n\n\t\t\t\tschedEndIdx := bytes.Index(pduBody[idxCounter:], nullTerm)\n\t\t\t\tif schedEndIdx == -1 {\n\t\t\t\t\trespBytes = headerPDU(GENERIC_NACK, STS_INVALID_CMD, seqNum)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tidxCounter = idxCounter + schedEndIdx\n\t\t\t\tidxCounter = idxCounter + 1 // next is validity period\n\n\t\t\t\tvalidityEndIdx := bytes.Index(pduBody[idxCounter:], nullTerm)\n\t\t\t\tif validityEndIdx == -1 {\n\t\t\t\t\trespBytes = headerPDU(GENERIC_NACK, STS_INVALID_CMD, seqNum)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tidxCounter = idxCounter + validityEndIdx\n\t\t\t\tregisteredDlr := pduBody[idxCounter+1] // registered_delivery is next field after the validity_period\n\n\t\t\t\t// prepare submit_sm_resp\n\t\t\t\tmsgId := strconv.Itoa(rand.Int())\n\t\t\t\trespBytes = stringBodyPDU(SUBMIT_SM_RESP, STS_OK, seqNum, msgId)\n\n\t\t\t\tif registeredDlr != 0 {\n\t\t\t\t\tgo func() {\n\t\t\t\t\t\ttime.Sleep(2000 * time.Millisecond)\n\t\t\t\t\t\tnow := time.Now()\n\t\t\t\t\t\tdlr := deliveryReceiptPDU(msgId, now, now)\n\t\t\t\t\t\tif _, err := conn.Write(dlr); err != nil {\n\t\t\t\t\t\t\tlog.Printf(\"error sending delivery receipt to system_id[%s] due %v.\", systemId, err)\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tlog.Printf(\"delivery receipt for message [%s] was send to system_id[%s]\", msgId, systemId)\n\t\t\t\t\t\t}\n\t\t\t\t\t}()\n\t\t\t\t}\n\t\t\t}\n\t\tcase DELIVER_SM_RESP: // deliver_sm_resp\n\t\t\t{\n\t\t\t\tif cmdLen > 16 {\n\t\t\t\t\tbuf := make([]byte, cmdLen-16)\n\t\t\t\t\tif _, err := io.ReadFull(conn, buf); err != nil {\n\t\t\t\t\t\tlog.Printf(\"error reading deliver_sm_resp for %s due %v. closing connection\", systemId, err)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tlog.Println(\"deliver_sm_resp from\", systemId)\n\t\t\t}\n\t\tdefault:\n\t\t\t{\n\t\t\t\tif cmdLen > 16 {\n\t\t\t\t\tbuf := make([]byte, cmdLen-16)\n\t\t\t\t\tif _, err := io.ReadFull(conn, buf); err != nil {\n\t\t\t\t\t\tlog.Printf(\"error reading pdu for %s due %v. closing connection\", systemId, err)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tlog.Printf(\"unsupported pdu cmd_id(%d) from %s\", cmdId, systemId)\n\t\t\t\t// generic nack packet with status \"Invalid Command ID\"\n\t\t\t\trespBytes = headerPDU(GENERIC_NACK, STS_INVALID_CMD, seqNum)\n\t\t\t}\n\t\t}\n\n\t\tif _, err := conn.Write(respBytes); err != nil {\n\t\t\tlog.Printf(\"error sending response to system_id[%s] due %v. closing connection\", systemId, err)\n\t\t\treturn\n\t\t}\n\n\t\tif stopLoop {\n\t\t\treturn\n\t\t}\n\t}\n}", "func (h *DNSHandler) DoTCP(w dns.ResponseWriter, req *dns.Msg) {\n\th.do(\"tcp\", w, req)\n}", "func sendTTYMsgToClient(cConn *websocket.Conn, sConn *websocket.Conn) {\n\tfor {\n\t\tr := &TTYResponse{}\n\t\terr := sConn.ReadJSON(r)\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t\t// Server closed connection\n\t\t\tcConn.Close()\n\t\t\treturn\n\t\t}\n\t\tcConn.WriteJSON(r)\n\t}\n}", "func (*TlsTcp) Send(ctx context.Context, requester net.Conn, datastr string) *IPLevel4Response {\n\tdata, err := base64.StdEncoding.DecodeString(datastr)\n\tif err != nil {\n\t\tlog.Fatalf(\"Some error occured during base64 decode. Error %s\", err.Error())\n\t}\n\trequester.SetDeadline(time.Now().Add(60 * time.Second))\n\tsendStart := time.Now()\n\t_, err = requester.Write(data)\n\tsendEnd := time.Now()\n\tsendDuration := stats.D(sendEnd.Sub(sendStart))\n\n\tstate := lib.GetState(ctx)\n\tstats.PushIfNotDone(ctx, state.Samples, stats.ConnectedSamples{\n\t\tSamples: []stats.Sample{\n\t\t\t{Metric: metrics.HTTPReqSending, Time: sendStart, Value: sendDuration},\n\t\t},\n\t\tTime: sendStart,\n\t})\n\tlvl4Response := IPLevel4Response{\n\t\tStatus: 200,\n\t}\n\tif err != nil {\n\t\tfmt.Println(\"send failure\")\n\t\tfmt.Printf(\"send failure: %s\\n\", err.Error())\n\t\tlvl4Response.Status = 300\n\t}\n\treturn &lvl4Response\n}", "func (s *Server) ServeTCP(w ResponseWriter, req *Request) {\n\tctx := s.pool.Get().(*Context)\n\tctx.writer.rebase(w)\n\tctx.Request = req\n\tctx.reset()\n\ts.handle(ctx)\n\ts.pool.Put(ctx)\n}", "func sendMsg(conn *net.UDPConn, raddr net.UDPAddr, query interface{}) {\n\ttotalSent.Add(1)\n\tvar b bytes.Buffer\n\tif err := bencode.Marshal(&b, query); err != nil {\n\t\treturn\n\t}\n\tif n, err := conn.WriteToUDP(b.Bytes(), &raddr); err != nil {\n\t\tlogger.Infof(\"DHT: node write failed to %+v, error=%s\", raddr, err)\n\t} else {\n\t\ttotalWrittenBytes.Add(int64(n))\n\t}\n\treturn\n}", "func GetCommandToTestTCPConnection(host string, port int32) string {\n\treturn fmt.Sprintf(\"if (-Not (Test-NetConnection %s -Port %d).TcpTestSucceeded)\"+\n\t\t\" {Write-Output 'connection failed:'; exit 10}\", host, port)\n}", "func send(msg TSP_msg, dest_ip string) (conn net.Conn) {\n\tconn, err := net.Dial(\"tcp\", dest_ip)\n\tif err != nil {\n\t\tfmt.Println(\"error connecting to \" + dest_ip)\n\t\tos.Exit(1)\n\t}\n\tencoder := gob.NewEncoder(conn)\n\tencoder.Encode(msg)\n\treturn\n}", "func query(object string, server string, tcpport string) (string, error) {\r\n\t// open connnection\r\n\tloggers.Info.Printf(\"whois.query() setup connection\")\r\n\tconn, err := net.DialTimeout(\"tcp\", net.JoinHostPort(server, tcpport), time.Second*30)\r\n\tif err != nil {\r\n\t\treturn \"\", fmt.Errorf(\"whois: connect to whois server failed: %v\", err)\r\n\t}\r\n\tdefer conn.Close()\r\n\t// set connection write timeout\r\n\t_ = conn.SetWriteDeadline(time.Now().Add(time.Second * 30))\r\n\t_, err = conn.Write([]byte(object + \"\\r\\n\"))\r\n\tif err != nil {\r\n\t\treturn \"\", fmt.Errorf(\"whois: send to whois server failed: %v\", err)\r\n\t}\r\n\t// set connection read timeout\r\n\t_ = conn.SetReadDeadline(time.Now().Add(time.Second * 30))\r\n\tbuffer, err := ioutil.ReadAll(conn)\r\n\tif err != nil {\r\n\t\treturn \"\", fmt.Errorf(\"whois: read from whois server failed: %v\", err)\r\n\t}\r\n\t// return result\r\n\treturn string(buffer), nil\r\n}", "func Query(addr string, cmd []byte) ([]byte, error) {\n\tudpAddr, err := net.ResolveUDPAddr(\"udp\", addr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tconn, err := net.DialUDP(\"udp\", nil, udpAddr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer conn.Close()\n\n\t_, err = conn.Write(cmd)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar buf [1024]byte\n\tconn.SetReadDeadline(time.Now().Add(5000 * time.Millisecond))\n\tn, err := conn.Read(buf[0:])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn buf[0:n], nil\n}", "func TestSendQStartupShutdown(t *testing.T) {\n\twg := &sync.WaitGroup{}\n\tr, w := io.Pipe()\n\tsq := NewSendQ(w, 100, wg)\n\tsq.Close()\n\tb := make([]byte, 10)\n\tn, err := r.Read(b)\n\tif n != 0 || err != io.EOF {\n\t\tt.Fatalf(\"Expected EOF, got %s\", err)\n\t}\n\twg.Wait()\n}", "func handleConnection(conn net.Conn) (err error) {\n\tdefer conn.Close()\n\n\ttimestamp := time.Now().UnixNano()\n\n\tconn.SetDeadline(time.Now().Add(100 * time.Millisecond))\n\n\tin := bufio.NewReader(conn)\n\n\tbuffer, err := in.Peek(1024)\n\tquery := queryPattern.FindStringSubmatch(string(buffer))\n\tif query == nil {\n\t\terr = fmt.Errorf(\"Failed to parse HTTP query header.\")\n\t\treturn\n\t}\n\n\turi, err := url.ParseRequestURI(query[2])\n\tif err != nil {\n\t\treturn\n\t}\n\n\tvar target string\n\tif portPattern.MatchString(uri.Host) {\n\t\ttarget = uri.Host\n\t} else {\n\t\ttarget = fmt.Sprintf(\"%s:80\", uri.Host)\n\t}\n\n\n\tfmt.Printf(\"Handling request for %s.\\n\", uri)\n\n\thost, err := net.Dial(\"tcp\", target)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer host.Close()\n\n\treqLog, err := os.Create(fmt.Sprintf(\"%d_req\", timestamp))\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\treturn\n\t}\n\n\tresLog, err := os.Create(fmt.Sprintf(\"%d_res\", timestamp))\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\treturn\n\t}\n\n\terr = pipe(in, tee { host, reqLog, 1000 }, 100)\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\treturn\n\t}\n\n\terr = pipe(host, tee { conn, resLog, 1000 }, 2000)\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\treturn\n\t}\n\n\treturn\n}", "func TestDisconnect2(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"short\")\n\t}\n\tcheckConnStr(t)\n\tSetLogger(testLogger{t})\n\n\t// Revert to the normal dialer after the test is done.\n\tnormalCreateDialer := createDialer\n\tdefer func() {\n\t\tcreateDialer = normalCreateDialer\n\t}()\n\n\tend := make(chan error)\n\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\tgo func() {\n\t\twaitDisrupt := make(chan struct{})\n\t\tctx, cancel = context.WithTimeout(ctx, time.Second*2)\n\t\tdefer cancel()\n\n\t\tcreateDialer = func(p *connectParams) dialer {\n\t\t\tnd := tcpDialer{&net.Dialer{Timeout: p.dial_timeout, KeepAlive: p.keepAlive}}\n\t\t\tdi := &dialerInterrupt{nd: nd}\n\t\t\tgo func() {\n\t\t\t\t<-waitDisrupt\n\t\t\t\tdi.Interrupt(false)\n\t\t\t}()\n\t\t\treturn di\n\t\t}\n\t\tdb, err := sql.Open(\"sqlserver\", makeConnStr(t).String())\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tif err := db.PingContext(ctx); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tdefer db.Close()\n\n\t\t_, err = db.ExecContext(ctx, `SET LOCK_TIMEOUT 1800;`)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tclose(waitDisrupt)\n\n\t\t_, err = db.ExecContext(ctx, `waitfor delay '00:00:3';`)\n\t\tend <- err\n\t}()\n\n\ttimeout := time.After(10 * time.Second)\n\tselect {\n\tcase err := <-end:\n\t\tif err == nil {\n\t\t\tt.Fatal(\"test err\")\n\t\t}\n\tcase <-timeout:\n\t\tt.Fatal(\"timeout\")\n\t}\n}", "func TestSendQErrorPropagation(t *testing.T) {\n\twg := &sync.WaitGroup{}\n\tr, w := io.Pipe()\n\tsq := NewSendQ(w, 100, wg)\n\texpected := errors.New(\"test\")\n\tr.CloseWithError(expected)\n\tsq.Write([]byte(\"this should fail\"))\n\terr := <-sq.ErrChan()\n\tif err != expected {\n\t\tt.Errorf(\"Expected a known error, got %d\", err)\n\t}\n\tif unknown, done := <-sq.ErrChan(); done {\n\t\tt.Errorf(\"Expected error channel to close, but it didn't. Instead, got '%s'\", unknown)\n\t}\n\twg.Wait()\n}" ]
[ "0.58487767", "0.55408037", "0.54952556", "0.54832953", "0.54605734", "0.5419732", "0.5407065", "0.5375777", "0.53394234", "0.52898824", "0.5242137", "0.52413064", "0.52413064", "0.5237375", "0.5227795", "0.52071977", "0.51766586", "0.5172305", "0.5154059", "0.5132827", "0.51303804", "0.51230603", "0.5110879", "0.5102149", "0.50980747", "0.5091961", "0.50802165", "0.5073268", "0.50730175", "0.5072353", "0.5068036", "0.50608903", "0.5046074", "0.50405526", "0.5031339", "0.5015527", "0.501427", "0.49833578", "0.4976485", "0.4964236", "0.49497762", "0.49479422", "0.494646", "0.4934401", "0.4933158", "0.49267218", "0.49256447", "0.49236792", "0.49016732", "0.4888439", "0.48876625", "0.4886723", "0.48787287", "0.48733586", "0.48722458", "0.48604652", "0.48594654", "0.48588482", "0.48550344", "0.48525825", "0.48412362", "0.48344293", "0.48317882", "0.48262224", "0.48188812", "0.48014134", "0.4801043", "0.47957718", "0.47927466", "0.47926623", "0.47885272", "0.47868523", "0.47758836", "0.476816", "0.47641134", "0.4755584", "0.4752086", "0.4751026", "0.47509387", "0.47457966", "0.47446388", "0.4743019", "0.47413257", "0.47338971", "0.4733376", "0.47332907", "0.47286567", "0.4720802", "0.47191784", "0.47149575", "0.47139078", "0.47126406", "0.47085252", "0.4706369", "0.470587", "0.4703038", "0.47015187", "0.4690218", "0.46870685", "0.46865186" ]
0.58927166
0
Test failure due to a response that is larger than the maximum message size for DNS over TCP (65535).
func TestAcceptOversize(t *testing.T) { doh := newFakeTransport() client, server := makePair() // Start the forwarder running. go Accept(doh, server) lbuf := make([]byte, 2) // Send Query queryData := simpleQueryBytes binary.BigEndian.PutUint16(lbuf, uint16(len(queryData))) client.Write(lbuf) client.Write(queryData) // Read query <-doh.query // Send oversize response doh.response <- make([]byte, 65536) // Accept should have closed the socket because the response // cannot be written. n, _ := client.Read(lbuf) if n != 0 { t.Error("Expected to read 0 bytes") } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func isPacketTooBig(err error) bool {\n\treturn false\n}", "func TestMalformedPacket(t *testing.T) {\n\t// copied as bytes from Wireshark, then modified the RelayMessage option length\n\tbytes := []byte{\n\t\t0x0c, 0x00, 0x24, 0x01, 0xdb, 0x00, 0x30, 0x10, 0xb0, 0x8a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n\t\t0x00, 0x0a, 0xfe, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x0b, 0xab, 0xff, 0xfe, 0x8a,\n\t\t0x6d, 0xf2, 0x00, 0x09, 0x00, 0x50 /*was 0x32*/, 0x01, 0x8d, 0x3e, 0x24, 0x00, 0x01, 0x00, 0x0e, 0x00, 0x01,\n\t\t0x00, 0x01, 0x0c, 0x71, 0x3d, 0x0e, 0x00, 0x0b, 0xab, 0x8a, 0x6d, 0xf2, 0x00, 0x08, 0x00, 0x02,\n\t\t0x00, 0x00, 0x00, 0x03, 0x00, 0x0c, 0xee, 0xbf, 0xfb, 0x6e, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,\n\t\t0xff, 0xff, 0x00, 0x06, 0x00, 0x02, 0x00, 0x17, 0x00, 0x25, 0x00, 0x0e, 0x00, 0x00, 0x00, 0x09,\n\t\t0x00, 0x03, 0x08, 0x00, 0xf0, 0x7f, 0x06, 0xd6, 0x4c, 0x3c, 0x00, 0x12, 0x00, 0x04, 0x09, 0x01,\n\t\t0x08, 0x5a,\n\t}\n\tpacket := Packet6(bytes)\n\t_, err := packet.dhcp6message()\n\tif err == nil {\n\t\tt.Fatalf(\"Should be unable to extract dhcp6message, but did not fail\")\n\t}\n}", "func TestOverflowValidFailureURLError(t *testing.T) {\n\tassert := assert.New(t)\n\n\tvar output bytes.Buffer\n\tlogger := getNewTestOutputLogger(&output)\n\n\ttrans := &transport{}\n\ttrans.fn = func(req *http.Request, count int) (resp *http.Response, err error) {\n\t\tresp = nil\n\t\terr = fmt.Errorf(\"My Error.\")\n\t\treturn\n\t}\n\n\tw := webhook.W{\n\t\tUntil: time.Now(),\n\t\tFailureURL: \"http://localhost:12345/bar\",\n\t\tEvents: []string{\"iot\", \"test\"},\n\t}\n\tw.Config.URL = \"http://localhost:9999/foo\"\n\tw.Config.ContentType = \"application/json\"\n\n\tobs, err := OutboundSenderFactory{\n\t\tListener: w,\n\t\tClient: &http.Client{Transport: trans},\n\t\tCutOffPeriod: time.Second,\n\t\tNumWorkers: 10,\n\t\tQueueSize: 10,\n\t\tLogger: logger,\n\t\tProfilerFactory: testServerProfilerFactory,\n\t}.New()\n\tassert.Nil(err)\n\n\tif _, ok := obs.(*CaduceusOutboundSender); !ok {\n\t\tassert.Fail(\"Interface returned by OutboundSenderFactory.New() must be implemented by a CaduceusOutboundSender.\")\n\t}\n\n\tobs.(*CaduceusOutboundSender).queueOverflow()\n\tassert.NotNil(output.String())\n}", "func expectedResponseLenth(responseCode uint8, responseLength uint8) (byteCount int, err error) {\n\tswitch responseCode {\n\tcase fcReadHoldingRegisters,\n\t fcReadInputRegisters,\n\t fcReadCoils,\n\t fcReadDiscreteInputs: byteCount = int(responseLength)\n\tcase fcWriteSingleRegister,\n\t fcWriteMultipleRegisters,\n\t fcWriteSingleCoil,\n\t fcWriteMultipleCoils: byteCount = 3\n\tcase fcMaskWriteRegister: byteCount = 5\n\tcase fcReadHoldingRegisters | 0x80,\n\t fcReadInputRegisters | 0x80,\n\t fcReadCoils | 0x80,\n\t fcReadDiscreteInputs | 0x80,\n\t fcWriteSingleRegister | 0x80,\n\t fcWriteMultipleRegisters | 0x80,\n\t fcWriteSingleCoil | 0x80,\n\t fcWriteMultipleCoils | 0x80,\n\t fcMaskWriteRegister | 0x80: byteCount = 0\n\tdefault: err = fmt.Errorf(\"unexpected response code (%v)\", responseCode)\n\t}\n\n\treturn\n}", "func TestReqRespTimeoutErr(t *testing.T) {\n\t// Connect to NATS\n\tm := NewMessenger(testConfig)\n\tdefer m.Close()\n\n\t// Use a WaitGroup to wait for the message to arrive\n\twg := sync.WaitGroup{}\n\twg.Add(1)\n\n\t// Subscribe to the source subject with the message processing function\n\ttestSubject := \"test_subject\"\n\ttestMsgContent := []byte(\"Some text to send...\")\n\tm.Response(testSubject, func(content []byte) ([]byte, error) {\n\t\tdefer wg.Done()\n\t\trequire.EqualValues(t, content, testMsgContent)\n\t\ttime.Sleep(100 * time.Millisecond)\n\t\treturn []byte(``), nil\n\t})\n\n\t// Send a message\n\t_, err := m.Request(testSubject, testMsgContent, 50*time.Millisecond)\n\tassert.NotNil(t, err)\n\tassert.Equal(t, err, errors.New(\"nats: timeout\"), \"should be equal\")\n\n\t// Wait for the message to come in\n\twg.Wait()\n}", "func dnsSize(isUDP bool, r *dns.Msg) (size int) {\n\tif !isUDP {\n\t\treturn dns.MaxMsgSize\n\t}\n\n\tvar size16 uint16\n\tif o := r.IsEdns0(); o != nil {\n\t\tsize16 = o.UDPSize()\n\t}\n\n\treturn int(mathutil.Max(dns.MinMsgSize, size16))\n}", "func Test_Cluster_Message_Length(t *testing.T) {\n\n\ttotal := 5\n\tvar server [5]cluster.Server\n\tfor i := 0; i < total; i++ {\n\t\tserver[i] = cluster.New(i+1, \"../config.json\")\n\t}\n\n\tmessage := make([]byte, 1000000)\n\tx := &cluster.Envelope{SendTo: -1, SendBy: 1, Msg: message}\n\t//Broadcasting long data\n\tserver[0].Outbox() <- x\n\n\tfor i := 0; i< total; i++ {\n\t\tclose(server[i].Outbox())\n\t}\n\tb, _ := json.Marshal(*x)\n\n\t// Calculating if all servers recieve the data fully...\n\tfor i := 1; i < total; i++ {\n\t\tenvelope := <-server[i].Inbox()\n\t\tc, _ := json.Marshal(*envelope)\n\t\tif len(c) != len(b) {\n\t\t\tpanic(\"Message not recieved fully..\")\n\t\t}\n\t}\n\n\tt.Log(\"Length test passed.\")\n}", "func TestSendDataLength(t *testing.T) {\n\tloadConfig()\n\tfor _, item := range tests {\n\t\tvar buf bytes.Buffer\n\t\tfmt.Fprint(&buf, \"UR001!\")\n\t\tv, err := sendData(&buf)\n\t\tif err != nil {\n\t\t\tt.Error(\"Error\", err.Error())\n\t\t}\n\t\tif len(v) != item.resultSize {\n\t\t\tt.Error(\n\t\t\t\t\"Command Sent\", item.command,\n\t\t\t\t\"Expected Return Size\", item.resultSize,\n\t\t\t\t\"Received\", len(v),\n\t\t\t)\n\t\t}\n\t}\n}", "func TestBigMessage(t *testing.T) {\n\tda := NewFrameOutputBuffer()\n\n\td := &model.Device{DeviceEUI: makeRandomEUI(), DevAddr: makeRandomDevAddr()}\n\tbuffer := make([]byte, 255)\n\tif n, err := rand.Read(buffer); n != len(buffer) || err != nil {\n\t\tt.Fatal(\"Couldn't generate random numbers\")\n\t}\n\n\tda.SetPayload(d.DeviceEUI, buffer, 12, false)\n\tda.AddMACCommand(d.DeviceEUI, &protocol.MACLinkADRReq{})\n\n\t// Retrieve the messages until there's no more\n\tvar returnedMacs []protocol.MACCommand\n\tvar returnedBuffer []byte\n\n\tvar err error\n\tvar ret protocol.PHYPayload\n\titerations := 0\n\tfor err == nil {\n\t\tret, err = da.GetPHYPayloadForDevice(d, &context)\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\tif iterations > 999 {\n\t\t\tt.Errorf(\"Breaking after %d iterations\", iterations)\n\t\t\tbreak\n\t\t}\n\t\titerations++\n\t\treturnedMacs = append(returnedMacs, ret.MACPayload.MACCommands.List()...)\n\t\treturnedMacs = append(returnedMacs, ret.MACPayload.FHDR.FOpts.List()...)\n\t\treturnedBuffer = append(returnedBuffer, ret.MACPayload.FRMPayload...)\n\t}\n\tif iterations == 0 {\n\t\tt.Fatalf(\"Didn't get anything at all, err = %s\", err)\n\t}\n\tif !bytes.Equal(buffer, returnedBuffer) {\n\t\tt.Fatalf(\"Mismatch on returned buffer (orig len=%d, returned len=%d)\", len(buffer), len(returnedBuffer))\n\t}\n}", "func (w *response) requestTooLarge() {\n\tw.closeAfterReply = true\n\tw.requestBodyLimitHit = true\n\tif !w.wroteHeader {\n\t\tw.Header().Set(\"Connection\", \"close\")\n\t}\n}", "func TestTCPProbeTimeout(t *testing.T) {\n\tprobeExpectTimeout(t, 49)\n\tprobeExpectTimeout(t, 50)\n\tprobeExpectTimeout(t, 51)\n}", "func TestIsErrTooLarge(t *testing.T) {\n\tassert.True(t, IsErrTooLarge(thrift.NewTTransportException(TRANSPORT_EXCEPTION_REQUEST_TOO_LARGE, \"error\")))\n\tassert.True(t, IsErrTooLarge(thrift.NewTTransportException(TRANSPORT_EXCEPTION_RESPONSE_TOO_LARGE, \"error\")))\n\tassert.False(t, IsErrTooLarge(nil))\n\tassert.False(t, IsErrTooLarge(errors.New(\"error\")))\n\tassert.False(t, IsErrTooLarge(thrift.NewTTransportException(TRANSPORT_EXCEPTION_NOT_OPEN, \"error\")))\n\tassert.False(t, IsErrTooLarge(thrift.NewTApplicationException(0, \"error\")))\n}", "func TestReadDataWithMaxSize(t *testing.T) {\n\ttests := []struct {\n\t\tlines string\n\t\tmaxSize int\n\t\terr error\n\t}{\n\t\t// Maximum size of zero (the default) should not return an error.\n\t\t{\"Test message.\\r\\n.\\r\\n\", 0, nil},\n\n\t\t// Messages below the maximum size should not return an error.\n\t\t{\"Test message.\\r\\n.\\r\\n\", 16, nil},\n\n\t\t// Messages matching the maximum size should not return an error.\n\t\t{\"Test message.\\r\\n.\\r\\n\", 15, nil},\n\n\t\t// Messages above the maximum size should return a maximum size exceeded error.\n\t\t{\"Test message.\\r\\n.\\r\\n\", 14, maxSizeExceeded(14)},\n\t}\n\tvar buf bytes.Buffer\n\ts := &session{}\n\ts.br = bufio.NewReader(&buf)\n\n\tfor _, tt := range tests {\n\t\ts.srv = &Server{MaxSize: tt.maxSize}\n\t\tbuf.Write([]byte(tt.lines))\n\t\t_, err := s.readData()\n\t\tif err != tt.err {\n\t\t\tt.Errorf(\"readData(%v) returned err: %v\", tt.lines, tt.err)\n\t\t}\n\t}\n}", "func SetTTLTooBig(t *testing.T, f func() (mangos.Socket, error)) {\n\ts, err := f()\n\tif err != nil {\n\t\tt.Errorf(\"Failed to make socket: %v\", err)\n\t\treturn\n\t}\n\tdefer s.Close()\n\terr = s.SetOption(mangos.OptionTTL, 256)\n\tswitch err {\n\tcase mangos.ErrBadValue: // expected result\n\tcase nil:\n\t\tt.Errorf(\"Negative test fail, permitted too large TTL\")\n\tdefault:\n\t\tt.Errorf(\"Negative test fail (256), wrong error %v\", err)\n\t}\n}", "func TestTimeoutTooShort(t *testing.T) {\n\t_, err := delayedBatcher.SendRequestWithTimeout(\n\t\t&nonEmptyRequestBody,\n\t\tdelayedBatcher.BatchTimeout,\n\t)\n\tif err == nil {\n\t\tt.Errorf(\n\t\t\t\"Expecting error when timeout too short %v\",\n\t\t\tdelayedBatcher.BatchTimeout,\n\t\t)\n\t}\n}", "func TestLen(t *testing.T) {\n\tconst N = 100000\n\tch := NewSize(100)\n\tvar wg sync.WaitGroup\n\twg.Add(N * 2)\n\tgo func() {\n\t\tfor i := 0; i < N; i++ {\n\t\t\tgo func() {\n\t\t\t\tdefer wg.Done()\n\t\t\t\tv, ok := ch.Recv(true)\n\t\t\t\tif !ok {\n\t\t\t\t\tt.Fatal(\"!ok\")\n\t\t\t\t}\n\t\t\t\tif ch.Len() == -1 {\n\t\t\t\t\tt.Fatalf(\"ch.Len() == -1: %v\", v)\n\t\t\t\t}\n\t\t\t}()\n\t\t}\n\t}()\n\n\tgo func() {\n\t\tfor i := 0; i < N; i++ {\n\t\t\tgo func(i int) {\n\t\t\t\tdefer wg.Done()\n\t\t\t\tch.Send(i, true)\n\t\t\t}(i)\n\t\t}\n\t}()\n\n\twg.Wait()\n\tch.Close()\n}", "func TestPacket_DataLength(t *testing.T) {\n\ttearDown := setUp(t)\n\tdefer tearDown(t)\n\n\tassert.Equal(t, len(payload), packet.DataLength())\n}", "func TestTFramedTransportFactoryMaxLength(t *testing.T) {\n\tmockTrFactory := new(mockTTransportFactory)\n\tmaxLength := uint32(1024)\n\ttrFactory := NewTFramedTransportFactoryMaxLength(mockTrFactory, maxLength)\n\tmockTr := new(mockTTransport)\n\tmockTrFactory.On(\"GetTransport\", mockTr).Return(mockTr, nil)\n\n\ttr, err := trFactory.GetTransport(mockTr)\n\tassert.NoError(t, err)\n\tassert.Equal(t, mockTr, tr.(*TFramedTransport).transport)\n\tassert.Equal(t, maxLength, tr.(*TFramedTransport).maxLength)\n\tmockTrFactory.AssertExpectations(t)\n}", "func (res *pbResponse) Size() (n int) {\n\tvar size uint64\n\tsize += 11\n\tsize += 11 + uint64(len(res.Error))\n\tsize += 11 + uint64(len(res.Reply))\n\treturn int(size)\n}", "func responseSize(res *http.Response) int64 {\n\tfield, ok := res.Header[\"Content-Length\"]\n\tif !ok {\n\t\treturn -1\n\t}\n\tif len(field) != 1 {\n\t\treturn -1\n\t}\n\tsize, err := strconv.ParseInt(field[0], 0, 64)\n\tif err != nil {\n\t\treturn -1\n\t}\n\treturn size\n}", "func (t HeartbeatResponse) Size(version int16) int32 {\n\tvar sz int32\n\tif version >= 1 {\n\t\tsz += sizeof.Int32 // ThrottleTimeMs\n\t}\n\tsz += sizeof.Int16 // ErrorCode\n\treturn sz\n}", "func TestHalfOpenConnsLimit(t *testing.T) {\n\tcfg := testingConfig()\n\tcfg.DialTimeout = time.Millisecond\n\tcfg.BaseDir = \"./leecher\"\n\tcl, err := NewClient(cfg)\n\ttr, err := cl.AddFromFile(helloWorldTorrentFile)\n\trequire.NoError(t, err)\n\ttr.StartDataTransfer()\n\taddInvalidPeers := func(invalidAddrPrefix string) {\n\t\tpeers := []Peer{}\n\t\tfor i := 0; i <= 255; i++ {\n\t\t\tpeers = append(peers, addrToPeer(invalidAddrPrefix+strconv.Itoa(i)+\":9090\", SourceUser))\n\t\t}\n\t\trequire.NoError(t, tr.AddPeers(peers...))\n\t}\n\t//these are invalid IP addreses (https://stackoverflow.com/questions/10456044/what-is-a-good-invalid-ip-address-to-use-for-unit-tests)\n\taddInvalidPeers(\"192.0.2.\")\n\taddInvalidPeers(\"198.51.100.\")\n\taddInvalidPeers(\"203.0.113.\")\n\t//wait until we have tried to connect to all peers\n\tfailure := time.NewTimer(10 * time.Second)\n\tfor {\n\t\ttime.Sleep(30 * time.Millisecond)\n\t\t//hacky way to get all conns that we tried to dial but failed\n\t\ttried, err := strconv.Atoi(cl.counters.Get(\"could not dial\").String())\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tif tried >= 3*256 {\n\t\t\t//we tried all conns\n\t\t\tbreak\n\t\t}\n\t\tselect {\n\t\tcase <-failure.C:\n\t\t\tt.FailNow()\n\t\tdefault:\n\t\t}\n\t}\n}", "func TestMaxGRPCMessageSize(t *testing.T) {\n\tvar maxmax int\n\n\tfor _, s := range splitter.SupportedAlgorithms() {\n\t\tif max := splitter.GetFactory(s)().MaxSegmentSize(); max > maxmax {\n\t\t\tmaxmax = max\n\t\t}\n\t}\n\n\tif got, want := maxmax, repo.MaxGRPCMessageSize-maxGRPCMessageOverhead; got > want {\n\t\tt.Fatalf(\"invalid constant MaxGRPCMessageSize: %v, want >=%v\", got, want)\n\t}\n}", "func TestUDPReadTimeout(t *testing.T) {\n\tla, err := ResolveUDPAddr(\"udp4\", \"127.0.0.1:0\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tc, err := ListenUDP(\"udp4\", la)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer c.Close()\n\n\tc.SetDeadline(time.Now())\n\tb := make([]byte, 1)\n\tn, addr, err := c.ReadFromUDP(b)\n\tif !errors.Is(err, os.ErrDeadlineExceeded) {\n\t\tt.Errorf(\"ReadFromUDP got err %v want os.ErrDeadlineExceeded\", err)\n\t}\n\tif n != 0 {\n\t\tt.Errorf(\"ReadFromUDP got n %d want 0\", n)\n\t}\n\tif addr != nil {\n\t\tt.Errorf(\"ReadFromUDP got addr %+#v want nil\", addr)\n\t}\n}", "func (r *Responder) RequestEntityTooLarge() { r.write(http.StatusRequestEntityTooLarge) }", "func CheckSize(buf []byte, expected int, descrip string) {\n\tif len(buf) != expected {\n\t\tpanic(fmt.Sprintf(\"Incorrect %s buffer size, expected (%d), got (%d).\", descrip, expected, len(buf)))\n\t}\n}", "func (b *Broker) readResponse(conn *net.TCPConn) (uint32, []byte, os.Error) {\n reader := bufio.NewReader(conn)\n length := make([]byte, 4)\n lenRead, err := io.ReadFull(reader, length)\n if err != nil {\n return 0, []byte{}, err\n }\n if lenRead != 4 || lenRead < 0 {\n return 0, []byte{}, os.NewError(\"invalid length of the packet length field\")\n }\n\n expectedLength := binary.BigEndian.Uint32(length)\n messages := make([]byte, expectedLength)\n lenRead, err = io.ReadFull(reader, messages)\n if err != nil {\n return 0, []byte{}, err\n }\n\n if uint32(lenRead) != expectedLength {\n return 0, []byte{}, os.NewError(fmt.Sprintf(\"Fatal Error: Unexpected Length: %d expected: %d\", lenRead, expectedLength))\n }\n\n errorCode := binary.BigEndian.Uint16(messages[0:2])\n if errorCode != 0 {\n return 0, []byte{}, os.NewError(strconv.Uitoa(uint(errorCode)))\n }\n return expectedLength, messages[2:], nil\n}", "func (ts *testSystem) serverTryRead(size int, expectedData []byte) {\n\tvar q struct{}\n\tts.t.Logf(\"server starts to read...\")\n\t_, data, err := ts.server.Read()\n\tif err != nil {\n\t\tts.t.Fatalf(\"Server received error during read.\")\n\t\treturn\n\t}\n\n\tswitch size {\n\tcase SHORT:\n\t\t//fmt.Printf(\"WRONG!! Server received short message: %s\\n\", data)\n\t\tfmt.Printf(\"expected data: %s, size: %d\\n\", expectedData, size)\n\t\tts.t.Fatalf(\"Server received short message: %s\\n\", data)\n\t\treturn\n\tcase LONG:\n\t\tts.exitChan <- q\n\t\tif len(data) != len(expectedData) {\n\t\t\tts.t.Fatalf(\"Expecting data %s, server received longer message: %s\",\n\t\t\t\texpectedData, data)\n\t\t}\n\t\treturn\n\tcase NORMAL:\n\t\tts.exitChan <- q\n\t\tif !bytes.Equal(data, expectedData) {\n\t\t\tts.t.Fatalf(\"Expecting %s, server received message: %s\",\n\t\t\t\texpectedData, data)\n\t\t}\n\t\treturn\n\t}\n}", "func (s *HTTPTestSuite) TestMakeRequestFailureResponse() {\n\tcheck := assert.New(s.T())\n\n\t// mock http request\n\thttpmock.RegisterResponder(http.MethodGet, s.url,\n\t\thttpmock.NewStringResponder(http.StatusBadGateway, ``))\n\n\t// make http request\n\tstatusCode, response, _, err := s.requestHandler.MakeRequest(s.requestSpecifications)\n\tif err == nil {\n\t\tcheck.Equal(statusCode, http.StatusBadGateway)\n\t\tcheck.Equal(string(response), ``)\n\t}\n\n\t// get the amount of calls for the registered responder\n\tinfo := httpmock.GetCallCountInfo()\n\tcheck.Equal(1, info[http.MethodGet+\" \"+s.url])\n}", "func get_len(addr, path string) (int64, error) {\n\tcon, err := net.Dial(\"tcp\", addr)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tdefer con.Close()\n\n\tif err = parget.Write(con, parget.Msg{Typ: parget.TypGetLen, Path: path}); err != nil {\n\t\treturn 0, err\n\t}\n\n\tresp, err := parget.Read(con)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tif resp.Typ == parget.TypErr {\n\t\treturn 0, errors.New(\"error message from server: \" + string(resp.Data))\n\t}\n\tif resp.Typ != parget.TypLen {\n\t\treturn 0, errors.New(\"invalid response type from server: \" + strconv.Itoa(int(resp.Typ)))\n\t}\n\treturn resp.Len, nil\n}", "func (t systemIntType) MaxTextResponseByteLength(_ *sql.Context) uint32 {\n\t// system types are not sent directly across the wire\n\treturn 0\n}", "func (s) TestResolverMaxStreamDuration(t *testing.T) {\n\tmgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer mgmtServer.Stop()\n\n\t// Create a bootstrap configuration specifying the above management server.\n\tnodeID := uuid.New().String()\n\tcleanup, err := xdsbootstrap.CreateFile(xdsbootstrap.Options{\n\t\tNodeID: nodeID,\n\t\tServerURI: mgmtServer.Address,\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer cleanup()\n\n\tconst serviceName = \"my-service-client-side-xds\"\n\ttcc, rClose := buildResolverForTarget(t, resolver.Target{URL: *testutils.MustParseURL(\"xds:///\" + serviceName)})\n\tdefer rClose()\n\n\t// Configure the management server with a listener resource that specifies a\n\t// max stream duration as part of its HTTP connection manager. Also\n\t// configure a route configuration resource, which has multiple routes with\n\t// different values of max stream duration.\n\tldsName := serviceName\n\trdsName := \"route-\" + serviceName\n\thcm := testutils.MarshalAny(&v3httppb.HttpConnectionManager{\n\t\tRouteSpecifier: &v3httppb.HttpConnectionManager_Rds{Rds: &v3httppb.Rds{\n\t\t\tConfigSource: &v3corepb.ConfigSource{\n\t\t\t\tConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{Ads: &v3corepb.AggregatedConfigSource{}},\n\t\t\t},\n\t\t\tRouteConfigName: rdsName,\n\t\t}},\n\t\tHttpFilters: []*v3httppb.HttpFilter{e2e.RouterHTTPFilter},\n\t\tCommonHttpProtocolOptions: &v3corepb.HttpProtocolOptions{\n\t\t\tMaxStreamDuration: durationpb.New(1 * time.Second),\n\t\t},\n\t})\n\tresources := e2e.UpdateOptions{\n\t\tNodeID: nodeID,\n\t\tListeners: []*v3listenerpb.Listener{{\n\t\t\tName: ldsName,\n\t\t\tApiListener: &v3listenerpb.ApiListener{ApiListener: hcm},\n\t\t\tFilterChains: []*v3listenerpb.FilterChain{{\n\t\t\t\tName: \"filter-chain-name\",\n\t\t\t\tFilters: []*v3listenerpb.Filter{{\n\t\t\t\t\tName: wellknown.HTTPConnectionManager,\n\t\t\t\t\tConfigType: &v3listenerpb.Filter_TypedConfig{TypedConfig: hcm},\n\t\t\t\t}},\n\t\t\t}},\n\t\t}},\n\t\tRoutes: []*v3routepb.RouteConfiguration{{\n\t\t\tName: rdsName,\n\t\t\tVirtualHosts: []*v3routepb.VirtualHost{{\n\t\t\t\tDomains: []string{ldsName},\n\t\t\t\tRoutes: []*v3routepb.Route{\n\t\t\t\t\t{\n\t\t\t\t\t\tMatch: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: \"/foo\"}},\n\t\t\t\t\t\tAction: &v3routepb.Route_Route{Route: &v3routepb.RouteAction{\n\t\t\t\t\t\t\tClusterSpecifier: &v3routepb.RouteAction_WeightedClusters{WeightedClusters: &v3routepb.WeightedCluster{\n\t\t\t\t\t\t\t\tClusters: []*v3routepb.WeightedCluster_ClusterWeight{\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\tName: \"A\",\n\t\t\t\t\t\t\t\t\t\tWeight: &wrapperspb.UInt32Value{Value: 100},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t}},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tMaxStreamDuration: &v3routepb.RouteAction_MaxStreamDuration{\n\t\t\t\t\t\t\t\tMaxStreamDuration: durationpb.New(5 * time.Second),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tMatch: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: \"/bar\"}},\n\t\t\t\t\t\tAction: &v3routepb.Route_Route{Route: &v3routepb.RouteAction{\n\t\t\t\t\t\t\tClusterSpecifier: &v3routepb.RouteAction_WeightedClusters{WeightedClusters: &v3routepb.WeightedCluster{\n\t\t\t\t\t\t\t\tClusters: []*v3routepb.WeightedCluster_ClusterWeight{\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\tName: \"B\",\n\t\t\t\t\t\t\t\t\t\tWeight: &wrapperspb.UInt32Value{Value: 100},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t}},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tMaxStreamDuration: &v3routepb.RouteAction_MaxStreamDuration{\n\t\t\t\t\t\t\t\tMaxStreamDuration: durationpb.New(0 * time.Second),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tMatch: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: \"/\"}},\n\t\t\t\t\t\tAction: &v3routepb.Route_Route{Route: &v3routepb.RouteAction{\n\t\t\t\t\t\t\tClusterSpecifier: &v3routepb.RouteAction_WeightedClusters{WeightedClusters: &v3routepb.WeightedCluster{\n\t\t\t\t\t\t\t\tClusters: []*v3routepb.WeightedCluster_ClusterWeight{\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\tName: \"C\",\n\t\t\t\t\t\t\t\t\t\tWeight: &wrapperspb.UInt32Value{Value: 100},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t}},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}},\n\t\t}},\n\t\tSkipValidation: true,\n\t}\n\tctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)\n\tdefer cancel()\n\tif err := mgmtServer.Update(ctx, resources); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Read the update pushed by the resolver to the ClientConn.\n\tgotState, err := tcc.stateCh.Receive(ctx)\n\tif err != nil {\n\t\tt.Fatalf(\"Timeout waiting for an update from the resolver: %v\", err)\n\t}\n\trState := gotState.(resolver.State)\n\tif err := rState.ServiceConfig.Err; err != nil {\n\t\tt.Fatalf(\"Received error in service config: %v\", rState.ServiceConfig.Err)\n\t}\n\tcs := iresolver.GetConfigSelector(rState)\n\tif cs == nil {\n\t\tt.Fatal(\"Received nil config selector in update from resolver\")\n\t}\n\n\ttestCases := []struct {\n\t\tname string\n\t\tmethod string\n\t\twant *time.Duration\n\t}{{\n\t\tname: \"RDS setting\",\n\t\tmethod: \"/foo/method\",\n\t\twant: newDurationP(5 * time.Second),\n\t}, {\n\t\tname: \"explicit zero in RDS; ignore LDS\",\n\t\tmethod: \"/bar/method\",\n\t\twant: nil,\n\t}, {\n\t\tname: \"no config in RDS; fallback to LDS\",\n\t\tmethod: \"/baz/method\",\n\t\twant: newDurationP(time.Second),\n\t}}\n\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\treq := iresolver.RPCInfo{\n\t\t\t\tMethod: tc.method,\n\t\t\t\tContext: ctx,\n\t\t\t}\n\t\t\tres, err := cs.SelectConfig(req)\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"cs.SelectConfig(%v): %v\", req, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tres.OnCommitted()\n\t\t\tgot := res.MethodConfig.Timeout\n\t\t\tif !cmp.Equal(got, tc.want) {\n\t\t\t\tt.Errorf(\"For method %q: res.MethodConfig.Timeout = %v; want %v\", tc.method, got, tc.want)\n\t\t\t}\n\t\t})\n\t}\n}", "func ReplyLineTooLong() *Reply { return &Reply{500, []string{\"Line too long\"}, nil} }", "func main () {\n\n if len(os.Args) < 2 {\n fmt.Println(\"To few args\")\n os.Exit(1)\n }\n rem := os.Args[1]\n port := \"1075\"\n if len(os.Args) == 3 {\n port = os.Args[2]\n }\n\n address, err := exec.Command(\"hostname\", \"-I\").Output()\n checkError(err, 901)\n adres := strings.Fields(string(address))\n addr := \"\"\n for i:=0; i < len(adres); i++ {\n if adres[i][0:3] == \"172\" {\n addr = adres[i]\n }\n }\nfmt.Println(\"My IP: \", addr)\n\n service := addr + \":12500\"\n src, err := net.ResolveUDPAddr(\"udp\", service)\n checkError(err, 902)\n\n service = rem + \":\" + port\n nod, err := net.ResolveUDPAddr(\"udp\", service)\n checkError(err, 903)\n\n var msg Message\n msg.Idx = 04\n msg.Key = \"\"\n msg.Src = src\n msg.Dst = nod\n\n conn, err := net.DialUDP(\"udp\", nil, nod)\n checkError(err, 904)\n\n defer conn.Close()\n buffer, err := json.Marshal(msg)\n checkError(err, 905)\n\n// fmt.Println(\"Sending: \", buffer[0:])\n _, err = conn.Write(buffer)\n checkError(err, 906)\n\n localAddr, err := net.ResolveUDPAddr(\"udp\", \":12500\")\n checkError(err, 907)\n conn, err = net.ListenUDP(\"udp\", localAddr)\n checkError(err, 908)\n for {\n answ := waitForRec(conn)\n if answ.Key == \"\" {\n conn.Close()\n break\n }\n nBigInt := big.Int{}\n nBigInt.SetString(answ.Key, 16)\n// fmt.Printf(\"%s %s\\n\", answ.Key, nBigInt.String())\n fmt.Printf(\"%s \", answ.Key)\n fmt.Println(\" \", answ.Src)\n }\n conn.Close()\n}", "func testMdns(t *testing.T) {\n\tservice := \"_liqo._tcp\"\n\tdomain := \"local.\"\n\n\tgo clientCluster.discoveryCtrl.Register()\n\n\ttime.Sleep(1 * time.Second)\n\n\ttxts := []*discovery.TxtData{}\n\tclientCluster.discoveryCtrl.Resolve(service, domain, 3, &txts)\n\n\ttime.Sleep(1 * time.Second)\n\n\t// TODO: find better way to test mDNS, local IP is not always detected\n\tassert.Assert(t, len(txts) >= 0, \"If this line is reached test would be successful, no foreign packet can reach our testing environment at the moment\")\n}", "func TestFlushFrameSizeError(t *testing.T) {\n\tmockTr := new(mockTTransport)\n\ttr := NewTFramedTransport(mockTr)\n\tbuff := make([]byte, 10)\n\t_, err := tr.Write(buff)\n\tassert.Nil(t, err)\n\tmockTr.On(\"Write\", []byte{0, 0, 0, 10}).Return(0, errors.New(\"error\"))\n\n\tassert.Error(t, tr.Flush(context.TODO()))\n\tmockTr.AssertExpectations(t)\n}", "func (s *IntegrationTestSuite) TestGRPCServerInvalidHeaderHeights() {\n\tt := s.T()\n\n\t// We should reject connections with invalid block heights off the bat.\n\tinvalidHeightStrs := []struct {\n\t\tvalue string\n\t\twantErr string\n\t}{\n\t\t{\"-1\", \"height < 0\"},\n\t\t{\"9223372036854775808\", \"value out of range\"}, // > max(int64) by 1\n\t\t{\"-10\", \"height < 0\"},\n\t\t{\"18446744073709551615\", \"value out of range\"}, // max uint64, which is > max(int64)\n\t\t{\"-9223372036854775809\", \"value out of range\"}, // Out of the range of for negative int64\n\t}\n\tfor _, tt := range invalidHeightStrs {\n\t\tt.Run(tt.value, func(t *testing.T) {\n\t\t\ttestClient := testdata.NewQueryClient(s.conn)\n\t\t\tctx := metadata.AppendToOutgoingContext(context.Background(), grpctypes.GRPCBlockHeightHeader, tt.value)\n\t\t\ttestRes, err := testClient.Echo(ctx, &testdata.EchoRequest{Message: \"hello\"})\n\t\t\trequire.Error(t, err)\n\t\t\trequire.Nil(t, testRes)\n\t\t\trequire.Contains(t, err.Error(), tt.wantErr)\n\t\t})\n\t}\n}", "func TestServerStdoutRespectsMaxPacketSize(t *testing.T) {\n\tconn := dial(largeSendHandler, t)\n\tdefer conn.Close()\n\tsession, err := conn.NewSession()\n\tif err != nil {\n\t\tt.Fatalf(\"Unable to request new session: %v\", err)\n\t}\n\tdefer session.Close()\n\tout, err := session.StdoutPipe()\n\tif err != nil {\n\t\tt.Fatalf(\"Unable to connect to Stdout: %v\", err)\n\t}\n\tif err := session.Shell(); err != nil {\n\t\tt.Fatalf(\"Unable to execute command: %v\", err)\n\t}\n\tif _, err := ioutil.ReadAll(out); err != nil {\n\t\tt.Fatalf(\"failed to read: %v\", err)\n\t}\n}", "func badSequence(text string) *smtpResponse {\n\treturn response(503, text, telnet.REQUEST)\n}", "func mockErr(mockErrOpts *MockErrOptions, n apns.Packet) error {\n\ti := rand.Intn(101-1) + 1\n\tif i < mockErrOpts.fail {\n\t\tif en, isEN := n.(*apns.EnhancedNotification); isEN {\n\t\t\tresp := &apns.ErrorResponse{\n\t\t\t\tStatus: apns.InvalidTokenStatus,\n\t\t\t\tIdentifier: en.Identifier,\n\t\t\t}\n\t\t\treturn resp\n\t\t}\n\t\treturn io.EOF\n\t}\n\treturn nil\n}", "func FuzzClampEDNSSize(f *testing.F) {\n\t// Empty DNS packet\n\tf.Add([]byte{\n\t\t// query id\n\t\t0x12, 0x34,\n\t\t// flags: standard query, recurse\n\t\t0x01, 0x20,\n\t\t// num questions\n\t\t0x00, 0x00,\n\t\t// num answers\n\t\t0x00, 0x00,\n\t\t// num authority RRs\n\t\t0x00, 0x00,\n\t\t// num additional RRs\n\t\t0x00, 0x00,\n\t})\n\n\t// Empty OPT\n\tf.Add([]byte{\n\t\t// header\n\t\t0xaf, 0x66, 0x01, 0x20, 0x00, 0x01, 0x00, 0x00,\n\t\t0x00, 0x00, 0x00, 0x01,\n\t\t// query\n\t\t0x06, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x03, 0x63, 0x6f,\n\t\t0x6d, 0x00, 0x00, 0x01, 0x00, 0x01,\n\t\t// OPT\n\t\t0x00, // name: <root>\n\t\t0x00, 0x29, // type: OPT\n\t\t0x10, 0x00, // UDP payload size\n\t\t0x00, // higher bits in extended RCODE\n\t\t0x00, // EDNS0 version\n\t\t0x80, 0x00, // \"Z\" field\n\t\t0x00, 0x00, // data length\n\t})\n\n\t// Query for \"google.com\"\n\tf.Add([]byte{\n\t\t// header\n\t\t0xaf, 0x66, 0x01, 0x20, 0x00, 0x01, 0x00, 0x00,\n\t\t0x00, 0x00, 0x00, 0x01,\n\t\t// query\n\t\t0x06, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x03, 0x63, 0x6f,\n\t\t0x6d, 0x00, 0x00, 0x01, 0x00, 0x01,\n\t\t// OPT\n\t\t0x00, 0x00, 0x29, 0x10, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00,\n\t\t0x0c, 0x00, 0x0a, 0x00, 0x08, 0x62, 0x18, 0x1a, 0xcb, 0x19,\n\t\t0xd7, 0xee, 0x23,\n\t})\n\n\tf.Fuzz(func(t *testing.T, data []byte) {\n\t\tclampEDNSSize(data, maxResponseBytes)\n\t})\n}", "func TestReqRespServerErr(t *testing.T) {\n\t// Connect to NATS\n\tm := NewMessenger(testConfig)\n\tdefer m.Close()\n\n\t// Use a WaitGroup to wait for the message to arrive\n\twg := sync.WaitGroup{}\n\twg.Add(1)\n\n\t// Subscribe to the source subject with the message processing function\n\ttestSubject := \"test_subject\"\n\ttestMsgContent := []byte(\"Some text to send...\")\n\ttestRespErr := errors.New(\"Server error\")\n\tm.Response(testSubject, func(content []byte) ([]byte, error) {\n\t\tdefer wg.Done()\n\t\trequire.EqualValues(t, content, testMsgContent)\n\t\treturn nil, testRespErr\n\t})\n\n\t// Send a message\n\tresp, err := m.Request(testSubject, testMsgContent, 50*time.Millisecond)\n\tassert.Nil(t, err)\n\trequire.EqualValues(t, resp, testRespErr.Error())\n\n\t// Wait for the message to come in\n\twg.Wait()\n}", "func TestParseSenderToReceiverWrongLength(t *testing.T) {\n\tvar line = \"{7072}SwiftSwift Line One Swift Line Two Swift Line Three Swift Line Four Swift Line Five Swift Line Six \"\n\tr := NewReader(strings.NewReader(line))\n\tr.line = line\n\n\terr := r.parseSenderToReceiver()\n\n\trequire.EqualError(t, err, r.parseError(fieldError(\"SwiftLineSix\", ErrValidLength)).Error())\n}", "func TestDnsMessageUncompressedQueryConfidenceCheck(t *testing.T) {\n\tm := mustUnpack(uncompressedQueryBytes)\n\tpackedBytes := mustPack(m)\n\tif len(packedBytes) >= len(uncompressedQueryBytes) {\n\t\tt.Errorf(\"Compressed query is not smaller than uncompressed query\")\n\t}\n}", "func (r *Responder) RequestHeaderFieldsTooLarge() { r.write(http.StatusRequestHeaderFieldsTooLarge) }", "func (s *TestSuite) TestSetTooBig() {\n\ts.Run(\"TestSetTooBig PROTO\", func() {\n\t\ts.testSetTooBig(gnmiapi.Encoding_PROTO)\n\t})\n}", "func TestConnBodyLen(t *testing.T) {\n\tn, _ := openConn(t)\n\tconn_headers := check11(TEST_HEADERS)\n\tc, e := Connect(n, conn_headers)\n\tif e != nil {\n\t\tt.Errorf(\"Expected no connect error, got [%v]\\n\", e)\n\t}\n\tif len(c.ConnectResponse.Body) != 0 {\n\t\tt.Errorf(\"Expected body length 0, got [%v]\\n\", len(c.ConnectResponse.Body))\n\t}\n\t_ = c.Disconnect(empty_headers)\n\t_ = closeConn(t, n)\n}", "func checkContentLength(res *http.Response) (int64, string, error) {\n\tif res.ContentLength > 0 {\n\t\treturn res.ContentLength, \"Good\", nil\n\t}\n\tif res.ContentLength == 0 {\n\t\t//Means exactly none\n\t\tif res.Body != nil {\n\t\t\treturn res.ContentLength, \"Artifact content is empty\", nil\n\t\t}\n\t\treturn res.ContentLength, \"None\", nil\n\t}\n\tif res.ContentLength < 0 {\n\t\t//Means Unknown\n\t\treturn res.ContentLength, \"Chunked\", nil\n\t}\n\treturn 0, \"\", nil\n}", "func TestSendFailed(t *testing.T) {\n\tdoh, _ := NewTransport(testURL, ips, nil, nil, nil)\n\ttransport := doh.(*transport)\n\trt := makeTestRoundTripper()\n\ttransport.client.Transport = rt\n\n\trt.err = errors.New(\"test\")\n\t_, err := doh.Query(simpleQueryBytes)\n\tvar qerr *queryError\n\tif err == nil {\n\t\tt.Error(\"Send failure should be reported\")\n\t} else if !errors.As(err, &qerr) {\n\t\tt.Errorf(\"Wrong error type: %v\", err)\n\t} else if qerr.status != SendFailed {\n\t\tt.Errorf(\"Wrong error status: %d\", qerr.status)\n\t} else if !errors.Is(qerr, rt.err) {\n\t\tt.Errorf(\"Underlying error is not retained\")\n\t}\n}", "func TestRecordTooBig(t *testing.T) {\n\tvar r Record\n\tkey := randomString(10)\n\n\t// set a big value for random key, expect error\n\tstr := msg.String(randomString(SizeLimit))\n\tr.Set(WithEntry(key, &str))\n\tif err := signTest([]byte{5}, &r); err != errTooBig {\n\t\tt.Fatalf(\"expected to get errTooBig, got %#v\", err)\n\t}\n\tstr2 := msg.String(randomString(100))\n\t// set an acceptable value for random key, expect no error\n\tr.Set(WithEntry(key, &str2))\n\trequire.NoError(t, signTest([]byte{5}, &r))\n}", "func TestResponse(t *testing.T) {\n\tdoh, _ := NewTransport(testURL, ips, nil, nil, nil)\n\ttransport := doh.(*transport)\n\trt := makeTestRoundTripper()\n\ttransport.client.Transport = rt\n\n\t// Fake server.\n\tgo func() {\n\t\t<-rt.req\n\t\tr, w := io.Pipe()\n\t\trt.resp <- &http.Response{\n\t\t\tStatusCode: 200,\n\t\t\tBody: r,\n\t\t\tRequest: &http.Request{URL: parsedURL},\n\t\t}\n\t\t// The DOH response should have a zero query ID.\n\t\tvar modifiedQuery dnsmessage.Message = simpleQuery\n\t\tmodifiedQuery.Header.ID = 0\n\t\tw.Write(mustPack(&modifiedQuery))\n\t\tw.Close()\n\t}()\n\n\tresp, err := doh.Query(simpleQueryBytes)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\t// Parse the response as a DNS message.\n\trespParsed := mustUnpack(resp)\n\n\t// Query() should reconstitute the query ID in the response.\n\tif respParsed.Header.ID != simpleQuery.Header.ID ||\n\t\t!queriesMostlyEqual(*respParsed, simpleQuery) {\n\t\tt.Errorf(\"Unexpected response %v\", resp)\n\t}\n}", "func (*ResponseTimeoutError) Timeout() bool { return true }", "func GetErrorFromResponse(r *http.Response) error {\n\t// If the content length is not set, limit reading to 4K worth of data.\n\t// It is probably way more than needed because an error that long is\n\t// very unusual. Plus it will only cut it off rather than show nothing.\n\ts, err := getResponse(r, errMax)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ts = strings.TrimSpace(s)\n\tif len(s) == 0 {\n\t\treturn fmt.Errorf(\"server did not provide a message (status %v: %v)\", r.StatusCode, http.StatusText(r.StatusCode))\n\t}\n\treturn errors.New(s)\n}", "func TestReadLargeBuffer(t *testing.T) {\n\tmockTr := new(mockTTransport)\n\treads := make(chan []byte, 2)\n\treads <- frame[0:4]\n\treads <- frame[4:]\n\tclose(reads)\n\tmockTr.reads = reads\n\ttr := NewTFramedTransport(mockTr)\n\n\tbuff := make([]byte, len(frame)+10)\n\tn, err := tr.Read(buff)\n\n\tassert.Error(t, err)\n\tassert.Equal(t, \"frugal: not enough frame (size 70) to read 84 bytes\", err.Error())\n\tassert.Equal(t, len(frame)-4, n)\n\tassert.Equal(t, append(frame[4:], make([]byte, 14)...), buff)\n}", "func (t FindCoordinatorResponse) Size(version int16) int32 {\n\tvar sz int32\n\tif version >= 1 {\n\t\tsz += sizeof.Int32 // ThrottleTimeMs\n\t}\n\tsz += sizeof.Int16 // ErrorCode\n\tif version >= 1 {\n\t\tsz += sizeof.String(t.ErrorMessage) // ErrorMessage\n\t}\n\tsz += sizeof.Int32 // NodeId\n\tsz += sizeof.String(t.Host) // Host\n\tsz += sizeof.Int32 // Port\n\treturn sz\n}", "func TestOverflowValidFailureURLWithSecret(t *testing.T) {\n\tassert := assert.New(t)\n\n\tvar output bytes.Buffer\n\tlogger := getNewTestOutputLogger(&output)\n\n\ttrans := &transport{}\n\ttrans.fn = func(req *http.Request, count int) (resp *http.Response, err error) {\n\t\tassert.Equal(\"POST\", req.Method)\n\t\tassert.Equal([]string{\"application/json\"}, req.Header[\"Content-Type\"])\n\t\t// There is a timestamp in the body, so it's not worth trying to do a string comparison\n\t\tassert.NotNil(req.Header[\"X-Webpa-Signature\"])\n\t\tpayload, _ := ioutil.ReadAll(req.Body)\n\t\tassert.NotNil(payload)\n\n\t\tresp = &http.Response{Status: \"200 OK\",\n\t\t\tStatusCode: 200,\n\t\t}\n\t\treturn\n\t}\n\n\tw := webhook.W{\n\t\tUntil: time.Now(),\n\t\tFailureURL: \"http://localhost:12345/bar\",\n\t\tEvents: []string{\"iot\", \"test\"},\n\t}\n\tw.Config.URL = \"http://localhost:9999/foo\"\n\tw.Config.ContentType = \"application/json\"\n\tw.Config.Secret = \"123456\"\n\n\tobs, err := OutboundSenderFactory{\n\t\tListener: w,\n\t\tClient: &http.Client{Transport: trans},\n\t\tCutOffPeriod: time.Second,\n\t\tNumWorkers: 10,\n\t\tQueueSize: 10,\n\t\tProfilerFactory: testServerProfilerFactory,\n\t\tLogger: logger,\n\t}.New()\n\tassert.Nil(err)\n\n\tif _, ok := obs.(*CaduceusOutboundSender); !ok {\n\t\tassert.Fail(\"Interface returned by OutboundSenderFactory.New() must be implemented by a CaduceusOutboundSender.\")\n\t}\n\n\tobs.(*CaduceusOutboundSender).queueOverflow()\n\tassert.NotNil(output.String())\n}", "func CheckUnlimitedSize(size int) error {\n\treturn nil\n}", "func (msg *MsgFetchSmartContractInfo) MaxPayloadLength(pver uint32) uint32 {\n\t// 10k. In theory this message is very small.\n\treturn 10240\n}", "func checkResponse(r io.Reader) error {\n\tresponse, err := ParseResponse(r)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif response.IsFailure() {\n\t\treturn errors.New(response.GetMessage())\n\t}\n\n\treturn nil\n\n}", "func (*testObject) MTU() uint32 {\n\treturn 65536\n}", "func TestLongLineErr(t *testing.T) {\n\t// create a reader with a buffer size of only 10 characters\n\t// lines longer than that will be error\n\tinput := \"key1=an entry longer than 10 chars\"\n\tbr := bufio.NewReaderSize(strings.NewReader(input), 10)\n\tp := newParser(br)\n\t_, err := p.NextValue()\n\tif err.(*ParseError).Code() != ErrLineTooLong {\n\t\tt.Fatalf(\"expected err=ErrLineTooLong actual=%s\", err)\n\t}\n}", "func SendPayloadTooLarge(w http.ResponseWriter, opts ...ErrorOpts) {\n\tres := errorResponse{\n\t\tCode: CodePayloadTooLarge,\n\t\tMessage: \"Payload too large\",\n\t}\n\tres.apply(opts)\n\tSendJSON(w, 413, &res)\n}", "func TestStringSenderToReceiverVariableLength(t *testing.T) {\n\tvar line = \"{7072}\"\n\tr := NewReader(strings.NewReader(line))\n\tr.line = line\n\n\terr := r.parseSenderToReceiver()\n\trequire.Nil(t, err)\n\n\tline = \"{7072} NNN\"\n\tr = NewReader(strings.NewReader(line))\n\tr.line = line\n\n\terr = r.parseSenderToReceiver()\n\trequire.ErrorContains(t, err, r.parseError(NewTagMaxLengthErr(errors.New(\"\"))).Error())\n\n\tline = \"{7072}**************\"\n\tr = NewReader(strings.NewReader(line))\n\tr.line = line\n\n\terr = r.parseSenderToReceiver()\n\trequire.ErrorContains(t, err, r.parseError(NewTagMaxLengthErr(errors.New(\"\"))).Error())\n\n\tline = \"{7072}*\"\n\tr = NewReader(strings.NewReader(line))\n\tr.line = line\n\n\terr = r.parseSenderToReceiver()\n\trequire.Equal(t, err, nil)\n}", "func (t JsonType) MaxTextResponseByteLength(_ *sql.Context) uint32 {\n\treturn uint32(MaxJsonFieldByteLength*sql.Collation_Default.CharacterSet().MaxLength()) - 1\n}", "func (msg *MsgPing) MaxPayloadLength(pver uint32) uint32 {\n\tplen := uint32(0)\n\treturn plen\n}", "func SafeByteSize(sizeStr string) uint64 {\n\tsize, _ := ToByteSize(sizeStr)\n\treturn size\n}", "func CheckResponse(r *http.Response) error {\n\tif r.StatusCode == 200 {\n\t\treader := bufio.NewReader(r.Body)\n\t\tfirstByte, err := reader.ReadByte()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treader.UnreadByte()\n\n\t\tif string(firstByte) == \"-\" {\n\t\t\terrorString, _ := reader.ReadString('\\n')\n\t\t\tif matched, _ := regexp.MatchString(\"-\\\\d+,.+\", errorString); matched == false {\n\t\t\t\treturn fmt.Errorf(\"invalid message format\")\n\t\t\t}\n\t\t\terrors := strings.Split(errorString, \",\")\n\t\t\terrorCode, _ := strconv.Atoi(errors[0])\n\n\t\t\treturn &ErrorResponse{\n\t\t\t\tResponse: r,\n\t\t\t\tMessage: strings.TrimSpace(errors[1]),\n\t\t\t\tErrorCode: StatusCode(errorCode),\n\t\t\t}\n\t\t}\n\n\t\t// reset the response body to the original unread state\n\t\tr.Body = ioutil.NopCloser(reader)\n\n\t\treturn nil\n\t}\n\n\t// EVERY8D API always return status code 200\n\treturn fmt.Errorf(\"unexpected status code: %d\", r.StatusCode)\n}", "func TestHTTPError(t *testing.T) {\n\tdoh, _ := NewTransport(testURL, ips, nil, nil, nil)\n\ttransport := doh.(*transport)\n\trt := makeTestRoundTripper()\n\ttransport.client.Transport = rt\n\n\tgo func() {\n\t\t<-rt.req\n\t\tr, w := io.Pipe()\n\t\trt.resp <- &http.Response{\n\t\t\tStatusCode: 500,\n\t\t\tBody: r,\n\t\t\tRequest: &http.Request{URL: parsedURL},\n\t\t}\n\t\tw.Write([]byte{0, 0, 8, 9, 10})\n\t\tw.Close()\n\t}()\n\n\t_, err := doh.Query(simpleQueryBytes)\n\tvar qerr *queryError\n\tif err == nil {\n\t\tt.Error(\"Empty body should cause an error\")\n\t} else if !errors.As(err, &qerr) {\n\t\tt.Errorf(\"Wrong error type: %v\", err)\n\t} else if qerr.status != HTTPError {\n\t\tt.Errorf(\"Wrong error status: %d\", qerr.status)\n\t}\n}", "func TestMakeEHLOResponse(t *testing.T) {\n\ts := &session{}\n\ts.srv = &Server{}\n\n\t// Greeting should be returned without trailing newlines.\n\tgreeting := s.makeEHLOResponse()\n\tif len(greeting) != len(strings.TrimSpace(greeting)) {\n\t\tt.Errorf(\"EHLO greeting string has leading or trailing whitespace\")\n\t}\n\n\t// By default, TLS is not configured, so STARTTLS should not appear.\n\textensions := parseExtensions(t, s.makeEHLOResponse())\n\tif _, ok := extensions[\"STARTTLS\"]; ok {\n\t\tt.Errorf(\"STARTTLS appears in the extension list when TLS is not configured\")\n\t}\n\n\t// If TLS is configured, but not already in use, STARTTLS should appear.\n\ts.srv.TLSConfig = &tls.Config{}\n\textensions = parseExtensions(t, s.makeEHLOResponse())\n\tif _, ok := extensions[\"STARTTLS\"]; !ok {\n\t\tt.Errorf(\"STARTTLS does not appear in the extension list when TLS is configured\")\n\t}\n\n\t// If TLS is already used on the connection, STARTTLS should not appear.\n\ts.tls = true\n\textensions = parseExtensions(t, s.makeEHLOResponse())\n\tif _, ok := extensions[\"STARTTLS\"]; ok {\n\t\tt.Errorf(\"STARTTLS appears in the extension list when TLS is already in use\")\n\t}\n\n\t// Verify default SIZE extension is zero.\n\ts.srv = &Server{}\n\textensions = parseExtensions(t, s.makeEHLOResponse())\n\tif _, ok := extensions[\"SIZE\"]; !ok {\n\t\tt.Errorf(\"SIZE does not appear in the extension list\")\n\t} else if extensions[\"SIZE\"] != \"0\" {\n\t\tt.Errorf(\"SIZE appears in the extension list with incorrect parameter %s, want %s\", extensions[\"SIZE\"], \"0\")\n\t}\n\n\t// Verify configured maximum message size is listed correctly.\n\t// Any integer will suffice, as long as it's not hardcoded.\n\tmaxSize := 10 + time.Now().Minute()\n\tmaxSizeStr := fmt.Sprintf(\"%d\", maxSize)\n\ts.srv = &Server{MaxSize: maxSize}\n\textensions = parseExtensions(t, s.makeEHLOResponse())\n\tif _, ok := extensions[\"SIZE\"]; !ok {\n\t\tt.Errorf(\"SIZE does not appear in the extension list\")\n\t} else if extensions[\"SIZE\"] != maxSizeStr {\n\t\tt.Errorf(\"SIZE appears in the extension list with incorrect parameter %s, want %s\", extensions[\"SIZE\"], maxSizeStr)\n\t}\n}", "func TestManagedStream_Receiver(t *testing.T) {\n\n\tvar customErr = fmt.Errorf(\"foo\")\n\n\ttestCases := []struct {\n\t\tdescription string\n\t\trecvResp []*testRecvResponse\n\t\twantFinalErr error\n\t\twantTotalAttempts int\n\t}{\n\t\t{\n\t\t\tdescription: \"no errors\",\n\t\t\trecvResp: []*testRecvResponse{\n\t\t\t\t{\n\t\t\t\t\tresp: &storagepb.AppendRowsResponse{},\n\t\t\t\t\terr: nil,\n\t\t\t\t},\n\t\t\t},\n\t\t\twantTotalAttempts: 1,\n\t\t},\n\t\t{\n\t\t\tdescription: \"recv err w/io.EOF\",\n\t\t\trecvResp: []*testRecvResponse{\n\t\t\t\t{\n\t\t\t\t\tresp: nil,\n\t\t\t\t\terr: io.EOF,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tresp: &storagepb.AppendRowsResponse{},\n\t\t\t\t\terr: nil,\n\t\t\t\t},\n\t\t\t},\n\t\t\twantTotalAttempts: 2,\n\t\t},\n\t\t{\n\t\t\tdescription: \"recv err retried and then failed\",\n\t\t\trecvResp: []*testRecvResponse{\n\t\t\t\t{\n\t\t\t\t\tresp: nil,\n\t\t\t\t\terr: io.EOF,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tresp: nil,\n\t\t\t\t\terr: customErr,\n\t\t\t\t},\n\t\t\t},\n\t\t\twantTotalAttempts: 2,\n\t\t\twantFinalErr: customErr,\n\t\t},\n\t\t{\n\t\t\tdescription: \"recv err w/ custom error\",\n\t\t\trecvResp: []*testRecvResponse{\n\t\t\t\t{\n\t\t\t\t\tresp: nil,\n\t\t\t\t\terr: customErr,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tresp: &storagepb.AppendRowsResponse{},\n\t\t\t\t\terr: nil,\n\t\t\t\t},\n\t\t\t},\n\t\t\twantTotalAttempts: 1,\n\t\t\twantFinalErr: customErr,\n\t\t},\n\n\t\t{\n\t\t\tdescription: \"resp embeds Unavailable\",\n\t\t\trecvResp: []*testRecvResponse{\n\t\t\t\t{\n\t\t\t\t\tresp: &storagepb.AppendRowsResponse{\n\t\t\t\t\t\tResponse: &storagepb.AppendRowsResponse_Error{\n\t\t\t\t\t\t\tError: &statuspb.Status{\n\t\t\t\t\t\t\t\tCode: int32(codes.Unavailable),\n\t\t\t\t\t\t\t\tMessage: \"foo\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\terr: nil,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tresp: &storagepb.AppendRowsResponse{},\n\t\t\t\t\terr: nil,\n\t\t\t\t},\n\t\t\t},\n\t\t\twantTotalAttempts: 2,\n\t\t},\n\t\t{\n\t\t\tdescription: \"resp embeds generic ResourceExhausted\",\n\t\t\trecvResp: []*testRecvResponse{\n\t\t\t\t{\n\t\t\t\t\tresp: &storagepb.AppendRowsResponse{\n\t\t\t\t\t\tResponse: &storagepb.AppendRowsResponse_Error{\n\t\t\t\t\t\t\tError: &statuspb.Status{\n\t\t\t\t\t\t\t\tCode: int32(codes.ResourceExhausted),\n\t\t\t\t\t\t\t\tMessage: \"foo\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\terr: nil,\n\t\t\t\t},\n\t\t\t},\n\t\t\twantTotalAttempts: 1,\n\t\t},\n\t\t{\n\t\t\tdescription: \"resp embeds throughput ResourceExhausted\",\n\t\t\trecvResp: []*testRecvResponse{\n\t\t\t\t{\n\t\t\t\t\tresp: &storagepb.AppendRowsResponse{\n\t\t\t\t\t\tResponse: &storagepb.AppendRowsResponse_Error{\n\t\t\t\t\t\t\tError: &statuspb.Status{\n\t\t\t\t\t\t\t\tCode: int32(codes.ResourceExhausted),\n\t\t\t\t\t\t\t\tMessage: \"Exceeds 'AppendRows throughput' quota for stream blah\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\terr: nil,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tresp: &storagepb.AppendRowsResponse{},\n\t\t\t\t\terr: nil,\n\t\t\t\t},\n\t\t\t},\n\t\t\twantTotalAttempts: 2,\n\t\t},\n\t\t{\n\t\t\tdescription: \"retriable failures until max attempts\",\n\t\t\trecvResp: []*testRecvResponse{\n\t\t\t\t{\n\t\t\t\t\terr: io.EOF,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\terr: io.EOF,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\terr: io.EOF,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\terr: io.EOF,\n\t\t\t\t},\n\t\t\t},\n\t\t\twantTotalAttempts: 4,\n\t\t\twantFinalErr: io.EOF,\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tctx, cancel := context.WithCancel(context.Background())\n\n\t\ttestArc := &testAppendRowsClient{\n\t\t\tresponses: tc.recvResp,\n\t\t}\n\n\t\tms := &ManagedStream{\n\t\t\tctx: ctx,\n\t\t\topen: openTestArc(testArc, nil,\n\t\t\t\tfunc() (*storagepb.AppendRowsResponse, error) {\n\t\t\t\t\tif len(testArc.responses) == 0 {\n\t\t\t\t\t\tpanic(\"out of responses\")\n\t\t\t\t\t}\n\t\t\t\t\tcurResp := testArc.responses[0]\n\t\t\t\t\ttestArc.responses = testArc.responses[1:]\n\t\t\t\t\treturn curResp.resp, curResp.err\n\t\t\t\t},\n\t\t\t),\n\t\t\tstreamSettings: defaultStreamSettings(),\n\t\t\tfc: newFlowController(0, 0),\n\t\t\tretry: newStatelessRetryer(),\n\t\t}\n\t\t// use openWithRetry to get the reference to the channel and add our test pending write.\n\t\t_, ch, _ := ms.openWithRetry()\n\t\tpw := newPendingWrite(ctx, [][]byte{[]byte(\"foo\")})\n\t\tpw.attemptCount = 1 // we're injecting directly, but attribute this as a single attempt.\n\t\tch <- pw\n\n\t\t// Wait until the write is marked done.\n\t\t<-pw.result.Ready()\n\n\t\t// Check retry count is as expected.\n\t\tgotTotalAttempts, err := pw.result.TotalAttempts(ctx)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"failed to get total appends: %v\", err)\n\t\t}\n\t\tif gotTotalAttempts != tc.wantTotalAttempts {\n\t\t\tt.Errorf(\"%s: got %d total attempts, want %d attempts\", tc.description, gotTotalAttempts, tc.wantTotalAttempts)\n\t\t}\n\n\t\t// Check that the write got the expected final result.\n\t\tif gotFinalErr := pw.result.err; !errors.Is(gotFinalErr, tc.wantFinalErr) {\n\t\t\tt.Errorf(\"%s: got final error %v, wanted final error %v\", tc.description, gotFinalErr, tc.wantFinalErr)\n\t\t}\n\t\tms.Close()\n\t\tcancel()\n\t}\n}", "func TestLimitListenerError(t *testing.T) {\n\tdonec := make(chan bool, 1)\n\tgo func() {\n\t\tconst n = 2\n\t\tll := LimitListener(errorListener{}, n)\n\t\tfor i := 0; i < n+1; i++ {\n\t\t\t_, err := ll.Accept()\n\t\t\tif err != errFake {\n\t\t\t\tt.Fatalf(\"Accept error = %v; want errFake\", err)\n\t\t\t}\n\t\t}\n\t\tdonec <- true\n\t}()\n\tselect {\n\tcase <-donec:\n\tcase <-time.After(5 * time.Second):\n\t\tt.Fatal(\"timeout. deadlock?\")\n\t}\n}", "func TestAfPacketConfigureHostAvailError(t *testing.T) {\n\tctx, plugin, _ := afPacketTestSetup(t)\n\tdefer afPacketTestTeardown(ctx)\n\n\t// Reply set\n\tctx.MockVpp.MockReply(&ap_api.AfPacketCreateReply{\n\t\tRetval: 1,\n\t\tSwIfIndex: 2,\n\t})\n\t// Data\n\tdata := getTestAfPacketData(\"if1\", []string{\"10.0.0.1/24\"}, \"host1\")\n\n\t// Test configure af packet with return value != 0\n\tplugin.ResolveCreatedLinuxInterface(\"host1\", \"host1\", 1)\n\tswIfIdx, pending, err := plugin.ConfigureAfPacketInterface(data)\n\tExpect(err).ToNot(BeNil())\n\tExpect(swIfIdx).To(BeZero())\n\tExpect(pending).To(BeTrue())\n\texists, pending, cachedData := plugin.GetAfPacketStatusByName(\"if1\")\n\tExpect(exists).To(BeTrue())\n\tExpect(cachedData).ToNot(BeNil())\n\texists, pending, cachedData = plugin.GetAfPacketStatusByHost(\"host1\")\n\tExpect(exists).To(BeTrue())\n\tExpect(cachedData).ToNot(BeNil())\n}", "func (r *Responder) TooManyRequests() { r.write(http.StatusTooManyRequests) }", "func TestNonFatalDispatch(t *testing.T) {\n\tin, out := net.Pipe()\n\n\tm := NewMux(Config{\n\t\tConn: out,\n\t\tLoggerFactory: logging.NewDefaultLoggerFactory(),\n\t\tBufferSize: 1500,\n\t})\n\n\te := m.NewEndpoint(MatchSRTP)\n\te.buffer.SetLimitSize(1)\n\n\tfor i := 0; i <= 25; i++ {\n\t\tsrtpPacket := []byte{128, 1, 2, 3, 4}\n\t\t_, err := in.Write(srtpPacket)\n\t\trequire.NoError(t, err)\n\t}\n\n\trequire.NoError(t, m.Close())\n\trequire.NoError(t, in.Close())\n\trequire.NoError(t, out.Close())\n}", "func (t LeaveGroupResponse) Size(version int16) int32 {\n\tvar sz int32\n\tif version >= 1 {\n\t\tsz += sizeof.Int32 // ThrottleTimeMs\n\t}\n\tsz += sizeof.Int16 // ErrorCode\n\tif version >= 3 {\n\t\tsz += sizeof.ArrayLength // Members\n\t\tfor i := len(t.Members) - 1; i >= 0; i-- {\n\t\t\tsz += t.Members[i].Size(version)\n\t\t}\n\t}\n\treturn sz\n}", "func TestICMPInclusionSize(t *testing.T) {\n\tconst (\n\t\treplyHeaderLength4 = header.IPv4MinimumSize + header.IPv4MinimumSize + header.ICMPv4MinimumSize\n\t\treplyHeaderLength6 = header.IPv6MinimumSize + header.IPv6MinimumSize + header.ICMPv6MinimumSize\n\t\ttargetSize4 = header.IPv4MinimumProcessableDatagramSize\n\t\ttargetSize6 = header.IPv6MinimumMTU\n\t\t// A protocol number that will cause an error response.\n\t\treservedProtocol = 254\n\t)\n\n\t// IPv4 function to create a IP packet and send it to the stack.\n\t// The packet should generate an error response. We can do that by using an\n\t// unknown transport protocol (254).\n\trxIPv4Bad := func(e *channel.Endpoint, src tcpip.Address, payload []byte) []byte {\n\t\ttotalLen := header.IPv4MinimumSize + len(payload)\n\t\thdr := prependable.New(header.IPv4MinimumSize)\n\t\tip := header.IPv4(hdr.Prepend(header.IPv4MinimumSize))\n\t\tip.Encode(&header.IPv4Fields{\n\t\t\tTotalLength: uint16(totalLen),\n\t\t\tProtocol: reservedProtocol,\n\t\t\tTTL: ipv4.DefaultTTL,\n\t\t\tSrcAddr: src,\n\t\t\tDstAddr: localIPv4Addr,\n\t\t})\n\t\tip.SetChecksum(^ip.CalculateChecksum())\n\t\tbuf := buffer.MakeWithData(hdr.View())\n\t\tbuf.Append(buffer.NewViewWithData(payload))\n\t\t// Take a copy before InjectInbound takes ownership of vv\n\t\t// as vv may be changed during the call.\n\t\tv := buf.Flatten()\n\t\tpkt := stack.NewPacketBuffer(stack.PacketBufferOptions{\n\t\t\tPayload: buf,\n\t\t})\n\t\te.InjectInbound(header.IPv4ProtocolNumber, pkt)\n\t\tpkt.DecRef()\n\t\treturn v\n\t}\n\n\t// IPv6 function to create a packet and send it to the stack.\n\t// The packet should be errant in a way that causes the stack to send an\n\t// ICMP error response and have enough data to allow the testing of the\n\t// inclusion of the errant packet. Use `unknown next header' to generate\n\t// the error.\n\trxIPv6Bad := func(e *channel.Endpoint, src tcpip.Address, payload []byte) []byte {\n\t\thdr := prependable.New(header.IPv6MinimumSize)\n\t\tip := header.IPv6(hdr.Prepend(header.IPv6MinimumSize))\n\t\tip.Encode(&header.IPv6Fields{\n\t\t\tPayloadLength: uint16(len(payload)),\n\t\t\tTransportProtocol: reservedProtocol,\n\t\t\tHopLimit: ipv6.DefaultTTL,\n\t\t\tSrcAddr: src,\n\t\t\tDstAddr: localIPv6Addr,\n\t\t})\n\t\tbuf := buffer.MakeWithData(hdr.View())\n\t\tbuf.Append(buffer.NewViewWithData(payload))\n\t\t// Take a copy before InjectInbound takes ownership of vv\n\t\t// as vv may be changed during the call.\n\t\tv := buf.Flatten()\n\n\t\tpkt := stack.NewPacketBuffer(stack.PacketBufferOptions{\n\t\t\tPayload: buf,\n\t\t})\n\t\te.InjectInbound(header.IPv6ProtocolNumber, pkt)\n\t\tpkt.DecRef()\n\t\treturn v\n\t}\n\n\tv4Checker := func(t *testing.T, pkt stack.PacketBufferPtr, payload []byte) {\n\t\t// We already know the entire packet is the right size so we can use its\n\t\t// length to calculate the right payload size to check.\n\t\texpectedPayloadLength := pkt.Size() - header.IPv4MinimumSize - header.ICMPv4MinimumSize\n\t\tp := stack.PayloadSince(pkt.NetworkHeader())\n\t\tdefer p.Release()\n\t\tchecker.IPv4(t, p,\n\t\t\tchecker.SrcAddr(localIPv4Addr),\n\t\t\tchecker.DstAddr(remoteIPv4Addr),\n\t\t\tchecker.IPv4HeaderLength(header.IPv4MinimumSize),\n\t\t\tchecker.IPFullLength(uint16(header.IPv4MinimumSize+header.ICMPv4MinimumSize+expectedPayloadLength)),\n\t\t\tchecker.ICMPv4(\n\t\t\t\tchecker.ICMPv4Checksum(),\n\t\t\t\tchecker.ICMPv4Type(header.ICMPv4DstUnreachable),\n\t\t\t\tchecker.ICMPv4Code(header.ICMPv4ProtoUnreachable),\n\t\t\t\tchecker.ICMPv4Payload(payload[:expectedPayloadLength]),\n\t\t\t),\n\t\t)\n\t}\n\n\tv6Checker := func(t *testing.T, pkt stack.PacketBufferPtr, payload []byte) {\n\t\t// We already know the entire packet is the right size so we can use its\n\t\t// length to calculate the right payload size to check.\n\t\texpectedPayloadLength := pkt.Size() - header.IPv6MinimumSize - header.ICMPv6MinimumSize\n\t\tp := stack.PayloadSince(pkt.NetworkHeader())\n\t\tdefer p.Release()\n\t\tchecker.IPv6(t, p,\n\t\t\tchecker.SrcAddr(localIPv6Addr),\n\t\t\tchecker.DstAddr(remoteIPv6Addr),\n\t\t\tchecker.IPFullLength(uint16(header.IPv6MinimumSize+header.ICMPv6MinimumSize+expectedPayloadLength)),\n\t\t\tchecker.ICMPv6(\n\t\t\t\tchecker.ICMPv6Type(header.ICMPv6ParamProblem),\n\t\t\t\tchecker.ICMPv6Code(header.ICMPv6UnknownHeader),\n\t\t\t\tchecker.ICMPv6Payload(payload[:expectedPayloadLength]),\n\t\t\t),\n\t\t)\n\t}\n\ttests := []struct {\n\t\tname string\n\t\tsrcAddress tcpip.Address\n\t\tinjector func(*channel.Endpoint, tcpip.Address, []byte) []byte\n\t\tchecker func(*testing.T, stack.PacketBufferPtr, []byte)\n\t\tpayloadLength int // Not including IP header.\n\t\tlinkMTU uint32 // Largest IP packet that the link can send as payload.\n\t\treplyLength int // Total size of IP/ICMP packet expected back.\n\t}{\n\t\t{\n\t\t\tname: \"IPv4 exact match\",\n\t\t\tsrcAddress: remoteIPv4Addr,\n\t\t\tinjector: rxIPv4Bad,\n\t\t\tchecker: v4Checker,\n\t\t\tpayloadLength: targetSize4 - replyHeaderLength4,\n\t\t\tlinkMTU: targetSize4,\n\t\t\treplyLength: targetSize4,\n\t\t},\n\t\t{\n\t\t\tname: \"IPv4 larger MTU\",\n\t\t\tsrcAddress: remoteIPv4Addr,\n\t\t\tinjector: rxIPv4Bad,\n\t\t\tchecker: v4Checker,\n\t\t\tpayloadLength: targetSize4,\n\t\t\tlinkMTU: targetSize4 + 1000,\n\t\t\treplyLength: targetSize4,\n\t\t},\n\t\t{\n\t\t\tname: \"IPv4 smaller MTU\",\n\t\t\tsrcAddress: remoteIPv4Addr,\n\t\t\tinjector: rxIPv4Bad,\n\t\t\tchecker: v4Checker,\n\t\t\tpayloadLength: targetSize4,\n\t\t\tlinkMTU: targetSize4 - 50,\n\t\t\treplyLength: targetSize4 - 50,\n\t\t},\n\t\t{\n\t\t\tname: \"IPv4 payload exceeds\",\n\t\t\tsrcAddress: remoteIPv4Addr,\n\t\t\tinjector: rxIPv4Bad,\n\t\t\tchecker: v4Checker,\n\t\t\tpayloadLength: targetSize4 + 10,\n\t\t\tlinkMTU: targetSize4,\n\t\t\treplyLength: targetSize4,\n\t\t},\n\t\t{\n\t\t\tname: \"IPv4 1 byte less\",\n\t\t\tsrcAddress: remoteIPv4Addr,\n\t\t\tinjector: rxIPv4Bad,\n\t\t\tchecker: v4Checker,\n\t\t\tpayloadLength: targetSize4 - replyHeaderLength4 - 1,\n\t\t\tlinkMTU: targetSize4,\n\t\t\treplyLength: targetSize4 - 1,\n\t\t},\n\t\t{\n\t\t\tname: \"IPv4 No payload\",\n\t\t\tsrcAddress: remoteIPv4Addr,\n\t\t\tinjector: rxIPv4Bad,\n\t\t\tchecker: v4Checker,\n\t\t\tpayloadLength: 0,\n\t\t\tlinkMTU: targetSize4,\n\t\t\treplyLength: replyHeaderLength4,\n\t\t},\n\t\t{\n\t\t\tname: \"IPv6 exact match\",\n\t\t\tsrcAddress: remoteIPv6Addr,\n\t\t\tinjector: rxIPv6Bad,\n\t\t\tchecker: v6Checker,\n\t\t\tpayloadLength: targetSize6 - replyHeaderLength6,\n\t\t\tlinkMTU: targetSize6,\n\t\t\treplyLength: targetSize6,\n\t\t},\n\t\t{\n\t\t\tname: \"IPv6 larger MTU\",\n\t\t\tsrcAddress: remoteIPv6Addr,\n\t\t\tinjector: rxIPv6Bad,\n\t\t\tchecker: v6Checker,\n\t\t\tpayloadLength: targetSize6,\n\t\t\tlinkMTU: targetSize6 + 400,\n\t\t\treplyLength: targetSize6,\n\t\t},\n\t\t// NB. No \"smaller MTU\" test here as less than 1280 is not permitted\n\t\t// in IPv6.\n\t\t{\n\t\t\tname: \"IPv6 payload exceeds\",\n\t\t\tsrcAddress: remoteIPv6Addr,\n\t\t\tinjector: rxIPv6Bad,\n\t\t\tchecker: v6Checker,\n\t\t\tpayloadLength: targetSize6,\n\t\t\tlinkMTU: targetSize6,\n\t\t\treplyLength: targetSize6,\n\t\t},\n\t\t{\n\t\t\tname: \"IPv6 1 byte less\",\n\t\t\tsrcAddress: remoteIPv6Addr,\n\t\t\tinjector: rxIPv6Bad,\n\t\t\tchecker: v6Checker,\n\t\t\tpayloadLength: targetSize6 - replyHeaderLength6 - 1,\n\t\t\tlinkMTU: targetSize6,\n\t\t\treplyLength: targetSize6 - 1,\n\t\t},\n\t\t{\n\t\t\tname: \"IPv6 no payload\",\n\t\t\tsrcAddress: remoteIPv6Addr,\n\t\t\tinjector: rxIPv6Bad,\n\t\t\tchecker: v6Checker,\n\t\t\tpayloadLength: 0,\n\t\t\tlinkMTU: targetSize6,\n\t\t\treplyLength: replyHeaderLength6,\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\tctx := newTestContext()\n\t\t\tdefer ctx.cleanup()\n\t\t\ts := ctx.s\n\n\t\t\te := addLinkEndpointToStackWithMTU(t, s, test.linkMTU)\n\t\t\tdefer e.Close()\n\t\t\t// Allocate and initialize the payload view.\n\t\t\tpayload := make([]byte, test.payloadLength)\n\t\t\tfor i := 0; i < len(payload); i++ {\n\t\t\t\tpayload[i] = uint8(i)\n\t\t\t}\n\t\t\t// Default routes for IPv4&6 so ICMP can find a route to the remote\n\t\t\t// node when attempting to send the ICMP error Reply.\n\t\t\ts.SetRouteTable([]tcpip.Route{\n\t\t\t\t{\n\t\t\t\t\tDestination: header.IPv4EmptySubnet,\n\t\t\t\t\tNIC: nicID,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tDestination: header.IPv6EmptySubnet,\n\t\t\t\t\tNIC: nicID,\n\t\t\t\t},\n\t\t\t})\n\t\t\tv := test.injector(e, test.srcAddress, payload)\n\t\t\tpkt := e.Read()\n\t\t\tif pkt.IsNil() {\n\t\t\t\tt.Fatal(\"expected a packet to be written\")\n\t\t\t}\n\t\t\tif got, want := pkt.Size(), test.replyLength; got != want {\n\t\t\t\tt.Fatalf(\"got %d bytes of icmp error packet, want %d\", got, want)\n\t\t\t}\n\t\t\ttest.checker(t, pkt, v)\n\t\t\tpkt.DecRef()\n\t\t})\n\t}\n}", "func ValidateResponse(res *http.Response) (err error) {\n\tvar resLength int\n\t// non 200 errors\n\tif res.StatusCode != 200 {\n\t\terr = fmt.Errorf(\"Received %d status code\\n\", res.StatusCode)\n\t} else if res.Header[\"Content-Type\"][0] != \"application/json\" {\n\t\terr = fmt.Errorf(\"Content type not spplication/json. Received => %s\\n\", res.Header[\"Content-Type\"][0])\n\t} else {\n\t\tif len(res.Header[\"Content-Length\"]) > 0 {\n\t\t\tresLength, err = strconv.Atoi(res.Header[\"Content-Length\"][0])\n\t\t\tif err == nil && resLength < (CONTENT_LENGTH-100) || resLength > (CONTENT_LENGTH+100) {\n\t\t\t\terr = fmt.Errorf(\"content-Length mismatch 905 vs %d\\n\", resLength)\n\t\t\t}\n\t\t}\n\t}\n\treturn err\n}", "func TestReverseProxyMaxConnLimit(t *testing.T) {\n\tlog.SetOutput(ioutil.Discard)\n\tdefer log.SetOutput(os.Stderr)\n\n\tconst MaxTestConns = 2\n\tconnReceived := make(chan bool, MaxTestConns)\n\tconnContinue := make(chan bool)\n\tbackend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tconnReceived <- true\n\t\t<-connContinue\n\t}))\n\tdefer backend.Close()\n\n\tsu, err := NewStaticUpstreams(caddyfile.NewDispenser(\"Testfile\", strings.NewReader(`\n\t\tproxy / `+backend.URL+` {\n\t\t\tmax_conns `+fmt.Sprint(MaxTestConns)+`\n\t\t}\n\t`)), \"\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// set up proxy\n\tp := &Proxy{\n\t\tNext: httpserver.EmptyNext, // prevents panic in some cases when test fails\n\t\tUpstreams: su,\n\t}\n\n\tvar jobs sync.WaitGroup\n\n\tfor i := 0; i < MaxTestConns; i++ {\n\t\tjobs.Add(1)\n\t\tgo func(i int) {\n\t\t\tdefer jobs.Done()\n\t\t\tw := httptest.NewRecorder()\n\t\t\tcode, err := p.ServeHTTP(w, httptest.NewRequest(\"GET\", \"/\", nil))\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"Request %d failed: %v\", i, err)\n\t\t\t} else if code != 0 {\n\t\t\t\tt.Errorf(\"Bad return code for request %d: %d\", i, code)\n\t\t\t} else if w.Code != 200 {\n\t\t\t\tt.Errorf(\"Bad statuc code for request %d: %d\", i, w.Code)\n\t\t\t}\n\t\t}(i)\n\t}\n\t// Wait for all the requests to hit the backend.\n\tfor i := 0; i < MaxTestConns; i++ {\n\t\t<-connReceived\n\t}\n\n\t// Now we should have MaxTestConns requests connected and sitting on the backend\n\t// server. Verify that the next request is rejected.\n\tw := httptest.NewRecorder()\n\tcode, err := p.ServeHTTP(w, httptest.NewRequest(\"GET\", \"/\", nil))\n\tif code != http.StatusBadGateway {\n\t\tt.Errorf(\"Expected request to be rejected, but got: %d [%v]\\nStatus code: %d\",\n\t\t\tcode, err, w.Code)\n\t}\n\n\t// Now let all the requests complete and verify the status codes for those:\n\tclose(connContinue)\n\n\t// Wait for the initial requests to finish and check their results.\n\tjobs.Wait()\n}", "func (t SyncGroupResponse) Size(version int16) int32 {\n\tvar sz int32\n\tif version >= 1 {\n\t\tsz += sizeof.Int32 // ThrottleTimeMs\n\t}\n\tsz += sizeof.Int16 // ErrorCode\n\tsz += sizeof.Bytes(t.Assignment) // Assignment\n\treturn sz\n}", "func TestDustLimitForSize(t *testing.T) {\n\tt.Parallel()\n\n\ttests := []struct {\n\t\tname string\n\t\tsize int\n\t\texpectedLimit btcutil.Amount\n\t}{\n\t\t{\n\t\t\tname: \"p2pkh dust limit\",\n\t\t\tsize: input.P2PKHSize,\n\t\t\texpectedLimit: btcutil.Amount(546),\n\t\t},\n\t\t{\n\t\t\tname: \"p2sh dust limit\",\n\t\t\tsize: input.P2SHSize,\n\t\t\texpectedLimit: btcutil.Amount(540),\n\t\t},\n\t\t{\n\t\t\tname: \"p2wpkh dust limit\",\n\t\t\tsize: input.P2WPKHSize,\n\t\t\texpectedLimit: btcutil.Amount(294),\n\t\t},\n\t\t{\n\t\t\tname: \"p2wsh dust limit\",\n\t\t\tsize: input.P2WSHSize,\n\t\t\texpectedLimit: btcutil.Amount(330),\n\t\t},\n\t\t{\n\t\t\tname: \"unknown witness limit\",\n\t\t\tsize: input.UnknownWitnessSize,\n\t\t\texpectedLimit: btcutil.Amount(354),\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\ttest := test\n\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\tdustlimit := DustLimitForSize(test.size)\n\t\t\trequire.Equal(t, test.expectedLimit, dustlimit)\n\t\t})\n\t}\n}", "func (t StopReplicaResponse) Size(version int16) int32 {\n\tvar sz int32\n\tsz += sizeof.Int16 // ErrorCode\n\tsz += sizeof.ArrayLength // PartitionErrors\n\tfor i := len(t.PartitionErrors) - 1; i >= 0; i-- {\n\t\tsz += t.PartitionErrors[i].Size(version)\n\t}\n\treturn sz\n}", "func TestV4UnknownDestination(t *testing.T) {\n\tc := newDualTestContext(t, defaultMTU)\n\tdefer c.cleanup()\n\n\ttestCases := []struct {\n\t\tflow testFlow\n\t\ticmpRequired bool\n\t\t// largePayload if true, will result in a payload large enough\n\t\t// so that the final generated IPv4 packet is larger than\n\t\t// header.IPv4MinimumProcessableDatagramSize.\n\t\tlargePayload bool\n\t}{\n\t\t{unicastV4, true, false},\n\t\t{unicastV4, true, true},\n\t\t{multicastV4, false, false},\n\t\t{multicastV4, false, true},\n\t\t{broadcast, false, false},\n\t\t{broadcast, false, true},\n\t}\n\tfor _, tc := range testCases {\n\t\tt.Run(fmt.Sprintf(\"flow:%s icmpRequired:%t largePayload:%t\", tc.flow, tc.icmpRequired, tc.largePayload), func(t *testing.T) {\n\t\t\tpayload := newPayload()\n\t\t\tif tc.largePayload {\n\t\t\t\tpayload = newMinPayload(576)\n\t\t\t}\n\t\t\tc.injectPacket(tc.flow, payload)\n\t\t\tif !tc.icmpRequired {\n\t\t\t\tselect {\n\t\t\t\tcase p := <-c.linkEP.C:\n\t\t\t\t\tt.Fatalf(\"unexpected packet received: %+v\", p)\n\t\t\t\tcase <-time.After(1 * time.Second):\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tselect {\n\t\t\tcase p := <-c.linkEP.C:\n\t\t\t\tvar pkt []byte\n\t\t\t\tpkt = append(pkt, p.Header...)\n\t\t\t\tpkt = append(pkt, p.Payload...)\n\t\t\t\tif got, want := len(pkt), header.IPv4MinimumProcessableDatagramSize; got > want {\n\t\t\t\t\tt.Fatalf(\"got an ICMP packet of size: %d, want: sz <= %d\", got, want)\n\t\t\t\t}\n\n\t\t\t\thdr := header.IPv4(pkt)\n\t\t\t\tchecker.IPv4(t, hdr, checker.ICMPv4(\n\t\t\t\t\tchecker.ICMPv4Type(header.ICMPv4DstUnreachable),\n\t\t\t\t\tchecker.ICMPv4Code(header.ICMPv4PortUnreachable)))\n\n\t\t\t\ticmpPkt := header.ICMPv4(hdr.Payload())\n\t\t\t\tpayloadIPHeader := header.IPv4(icmpPkt.Payload())\n\t\t\t\twantLen := len(payload)\n\t\t\t\tif tc.largePayload {\n\t\t\t\t\twantLen = header.IPv4MinimumProcessableDatagramSize - header.IPv4MinimumSize*2 - header.ICMPv4MinimumSize - header.UDPMinimumSize\n\t\t\t\t}\n\n\t\t\t\t// In case of large payloads the IP packet may be truncated. Update\n\t\t\t\t// the length field before retrieving the udp datagram payload.\n\t\t\t\tpayloadIPHeader.SetTotalLength(uint16(wantLen + header.UDPMinimumSize + header.IPv4MinimumSize))\n\n\t\t\t\torigDgram := header.UDP(payloadIPHeader.Payload())\n\t\t\t\tif got, want := len(origDgram.Payload()), wantLen; got != want {\n\t\t\t\t\tt.Fatalf(\"unexpected payload length got: %d, want: %d\", got, want)\n\t\t\t\t}\n\t\t\t\tif got, want := origDgram.Payload(), payload[:wantLen]; !bytes.Equal(got, want) {\n\t\t\t\t\tt.Fatalf(\"unexpected payload got: %d, want: %d\", got, want)\n\t\t\t\t}\n\t\t\tcase <-time.After(1 * time.Second):\n\t\t\t\tt.Fatalf(\"packet wasn't written out\")\n\t\t\t}\n\t\t})\n\t}\n}", "func (*testObject) MaxHeaderLength() uint16 {\n\treturn 0\n}", "func Exhausted(w http.ResponseWriter, err error) {\n\t(Response{Error: err.Error()}).json(w, http.StatusTooManyRequests)\n}", "func (queue *Queue) GetFailedLength() int64 {\n\treturn queue.redisClient.LLen(queueFailedKey(queue.Name)).Val()\n}", "func testFailingWrite(c *testContext, flow testFlow, wantErr *tcpip.Error) {\n\tc.t.Helper()\n\n\th := flow.header4Tuple(outgoing)\n\twriteDstAddr := flow.mapAddrIfApplicable(h.dstAddr.Addr)\n\n\tpayload := buffer.View(newPayload())\n\t_, _, gotErr := c.ep.Write(tcpip.SlicePayload(payload), tcpip.WriteOptions{\n\t\tTo: &tcpip.FullAddress{Addr: writeDstAddr, Port: h.dstAddr.Port},\n\t})\n\tif gotErr != wantErr {\n\t\tc.t.Fatalf(\"Write returned unexpected error: got %v, want %v\", gotErr, wantErr)\n\t}\n}", "func (t AlterPartitionReassignmentsResponse) Size(version int16) int32 {\n\tvar sz int32\n\tsz += sizeof.Int32 // ThrottleTimeMs\n\tsz += sizeof.Int16 // ErrorCode\n\tsz += sizeof.String(t.ErrorMessage) // ErrorMessage\n\tsz += sizeof.ArrayLength // Responses\n\tfor i := len(t.Responses) - 1; i >= 0; i-- {\n\t\tsz += t.Responses[i].Size(version)\n\t}\n\treturn sz\n}", "func (t ExpireDelegationTokenResponse) Size(version int16) int32 {\n\tvar sz int32\n\tsz += sizeof.Int16 // ErrorCode\n\tsz += sizeof.Int64 // ExpiryTimestampMs\n\tsz += sizeof.Int32 // ThrottleTimeMs\n\treturn sz\n}", "func (msg *MsgReturnTxs) MaxPayloadLength(pver uint32) uint32 {\n\treturn MaxReturnedMsgsPayload\n}", "func TestRequest(t *testing.T) {\n\tdoh, _ := NewTransport(testURL, ips, nil, nil, nil)\n\ttransport := doh.(*transport)\n\trt := makeTestRoundTripper()\n\ttransport.client.Transport = rt\n\tgo doh.Query(simpleQueryBytes)\n\treq := <-rt.req\n\tif req.URL.String() != testURL {\n\t\tt.Errorf(\"URL mismatch: %s != %s\", req.URL.String(), testURL)\n\t}\n\treqBody, err := ioutil.ReadAll(req.Body)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tif len(reqBody)%PaddingBlockSize != 0 {\n\t\tt.Errorf(\"reqBody has unexpected length: %d\", len(reqBody))\n\t}\n\t// Parse reqBody into a Message.\n\tnewQuery := mustUnpack(reqBody)\n\t// Ensure the converted request has an ID of zero.\n\tif newQuery.Header.ID != 0 {\n\t\tt.Errorf(\"Unexpected request header id: %v\", newQuery.Header.ID)\n\t}\n\t// Check that all fields except for Header.ID and Additionals\n\t// are the same as the original. Additionals may differ if\n\t// padding was added.\n\tif !queriesMostlyEqual(simpleQuery, *newQuery) {\n\t\tt.Errorf(\"Unexpected query body:\\n\\t%v\\nExpected:\\n\\t%v\", newQuery, simpleQuery)\n\t}\n\tcontentType := req.Header.Get(\"Content-Type\")\n\tif contentType != \"application/dns-message\" {\n\t\tt.Errorf(\"Wrong content type: %s\", contentType)\n\t}\n\taccept := req.Header.Get(\"Accept\")\n\tif accept != \"application/dns-message\" {\n\t\tt.Errorf(\"Wrong Accept header: %s\", accept)\n\t}\n}", "func (r Response) PayloadTooLarge(code string, payload Payload, header ...ResponseHeader) {\n\tr.Response(code, http.PayloadTooLarge, payload, header...)\n}", "func CheckSizeInRange(buf []byte, min int, max int, descrip string) {\n\tif len(buf) < min || len(buf) > max {\n\t\tpanic(fmt.Sprintf(\"Incorrect %s buffer size, expected (%d - %d), got (%d).\", descrip, min, max, len(buf)))\n\t}\n}", "func ErrLengthRequiredf(format string, arguments ...interface{}) *Status {\n\treturn &Status{Code: http.StatusLengthRequired, Text: fmt.Sprintf(format, arguments...)}\n}", "func readResponse(p packetType) (response responseType, err error) {\n\t// The calls to bencode.Unmarshal() can be fragile.\n\tdefer func() {\n\t\tif x := recover(); x != nil {\n\t\t\tlogger.Infof(\"DHT: !!! Recovering from panic() after bencode.Unmarshal %q, %v\", string(p.b), x)\n\t\t}\n\t}()\n\tif e2 := bencode.Unmarshal(bytes.NewBuffer(p.b), &response); e2 == nil {\n\t\terr = nil\n\t\treturn\n\t} else {\n\t\tlogger.Infof(\"DHT: unmarshal error, odd or partial data during UDP read? %v, err=%s\", string(p.b), e2)\n\t\treturn response, e2\n\t}\n\treturn\n}", "func verifyPacketComms(t *testing.T, cConn, sConn *Conn, data []byte) {\n\t// All three writes, with ReadPacket.\n\tverifyPacketCommsSpecific(t, cConn, data, useWritePacket, sConn.ReadPacket)\n\tverifyPacketCommsSpecific(t, cConn, data, useWriteEphemeralPacketBuffered, sConn.ReadPacket)\n\tverifyPacketCommsSpecific(t, cConn, data, useWriteEphemeralPacketDirect, sConn.ReadPacket)\n\n\t// All three writes, with readEphemeralPacket.\n\tverifyPacketCommsSpecific(t, cConn, data, useWritePacket, sConn.readEphemeralPacket)\n\tsConn.recycleReadPacket()\n\tverifyPacketCommsSpecific(t, cConn, data, useWriteEphemeralPacketBuffered, sConn.readEphemeralPacket)\n\tsConn.recycleReadPacket()\n\tverifyPacketCommsSpecific(t, cConn, data, useWriteEphemeralPacketDirect, sConn.readEphemeralPacket)\n\tsConn.recycleReadPacket()\n\n\t// All three writes, with readEphemeralPacketDirect, if size allows it.\n\tif len(data) < MaxPacketSize {\n\t\tverifyPacketCommsSpecific(t, cConn, data, useWritePacket, sConn.readEphemeralPacketDirect)\n\t\tsConn.recycleReadPacket()\n\t\tverifyPacketCommsSpecific(t, cConn, data, useWriteEphemeralPacketBuffered, sConn.readEphemeralPacketDirect)\n\t\tsConn.recycleReadPacket()\n\t\tverifyPacketCommsSpecific(t, cConn, data, useWriteEphemeralPacketDirect, sConn.readEphemeralPacketDirect)\n\t\tsConn.recycleReadPacket()\n\t}\n}", "func (q *queue) checkDataSize() error {\n\tif q.dataPageFct.Size()+q.indexPageFct.Size() > q.dataSizeLimit {\n\t\treturn ErrExceedingTotalSizeLimit\n\t}\n\treturn nil\n}", "func TestUnresponsiveDest(test *testing.T) {\n\tt := NewGomegaWithT(test)\n\n\t// Options that do not require administrative privileges.\n\topts := []nettrace.TraceOpt{\n\t\t&nettrace.WithLogging{},\n\t\t&nettrace.WithHTTPReqTrace{\n\t\t\tHeaderFields: nettrace.HdrFieldsOptWithValues,\n\t\t},\n\t\t&nettrace.WithSockTrace{},\n\t\t&nettrace.WithDNSQueryTrace{},\n\t}\n\tclient, err := nettrace.NewHTTPClient(nettrace.HTTPClientCfg{\n\t\tReqTimeout: 5 * time.Second,\n\t}, opts...)\n\tt.Expect(err).ToNot(HaveOccurred())\n\n\treq, err := http.NewRequest(\"GET\", \"https://198.51.100.100\", nil)\n\tt.Expect(err).ToNot(HaveOccurred())\n\tresp, err := client.Do(req)\n\tt.Expect(err).To(HaveOccurred())\n\tt.Expect(resp).To(BeNil())\n\ttime.Sleep(time.Second)\n\ttrace, _, err := client.GetTrace(\"unresponsive dest\")\n\tt.Expect(err).ToNot(HaveOccurred())\n\ttraceBeginAsRel := nettrace.Timestamp{IsRel: true, Rel: 0}\n\n\t// Dial trace\n\tt.Expect(trace.Dials).To(HaveLen(1)) // one failed Dial (DNS failed)\n\tdial := trace.Dials[0]\n\tt.Expect(dial.TraceID).ToNot(BeZero())\n\trelTimeIsInBetween(t, dial.DialBeginAt, traceBeginAsRel, trace.TraceEndAt)\n\trelTimeIsInBetween(t, dial.DialEndAt, dial.DialBeginAt, trace.TraceEndAt)\n\trelTimeIsInBetween(t, dial.CtxCloseAt, dial.DialBeginAt, trace.TraceEndAt)\n\tt.Expect(dial.DstAddress).To(Equal(\"198.51.100.100:443\"))\n\tt.Expect(dial.ResolverDials).To(BeEmpty())\n\tt.Expect(dial.DialErr).ToNot(BeZero())\n\tt.Expect(dial.EstablishedConn).To(BeZero())\n\n\t// DNS trace\n\tt.Expect(trace.DNSQueries).To(BeEmpty())\n\n\t// UDP connection trace\n\tt.Expect(trace.UDPConns).To(BeEmpty())\n\n\t// TCP connection trace\n\tt.Expect(trace.TCPConns).To(HaveLen(1))\n\ttcpConn := trace.TCPConns[0]\n\tt.Expect(tcpConn.TraceID).ToNot(BeZero())\n\tt.Expect(tcpConn.FromDial == dial.TraceID).To(BeTrue())\n\tt.Expect(tcpConn.Reused).To(BeFalse())\n\trelTimeIsInBetween(t, tcpConn.HandshakeBeginAt, dial.DialBeginAt, dial.DialEndAt)\n\t// killed from outside of Dial\n\trelTimeIsInBetween(t, tcpConn.HandshakeEndAt, tcpConn.HandshakeBeginAt, trace.TraceEndAt)\n\tt.Expect(tcpConn.ConnCloseAt.Undefined()).To(BeTrue())\n\tt.Expect(net.ParseIP(tcpConn.AddrTuple.SrcIP)).ToNot(BeNil())\n\tt.Expect(net.ParseIP(tcpConn.AddrTuple.DstIP)).ToNot(BeNil())\n\tt.Expect(tcpConn.AddrTuple.SrcPort).ToNot(BeZero()) // btw. not easy to get when TLS handshake fails\n\tt.Expect(tcpConn.AddrTuple.DstPort).ToNot(BeZero())\n\tt.Expect(tcpConn.SocketTrace).To(BeZero())\n\tt.Expect(tcpConn.Conntract).To(BeNil())\n\tt.Expect(tcpConn.TotalRecvBytes).To(BeZero())\n\tt.Expect(tcpConn.TotalSentBytes).To(BeZero())\n\n\t// TLS tunnel trace\n\tt.Expect(trace.TLSTunnels).To(BeEmpty())\n\n\t// HTTP request trace\n\tt.Expect(trace.HTTPRequests).To(HaveLen(1))\n\thttpReq := trace.HTTPRequests[0]\n\tt.Expect(httpReq.TraceID).ToNot(BeZero())\n\tt.Expect(httpReq.TCPConn).To(BeZero())\n\tt.Expect(httpReq.ProtoMajor).To(BeEquivalentTo(1))\n\tt.Expect(httpReq.ProtoMinor).To(BeEquivalentTo(1))\n\trelTimeIsInBetween(t, httpReq.ReqSentAt, traceBeginAsRel, trace.TraceEndAt)\n\tt.Expect(httpReq.ReqError).ToNot(BeZero())\n\tt.Expect(httpReq.ReqMethod).To(Equal(\"GET\"))\n\tt.Expect(httpReq.ReqURL).To(Equal(\"https://198.51.100.100\"))\n\tt.Expect(httpReq.ReqHeader).To(BeEmpty())\n\tt.Expect(httpReq.ReqContentLen).To(BeZero())\n\tt.Expect(httpReq.RespRecvAt.Undefined()).To(BeTrue())\n\tt.Expect(httpReq.RespStatusCode).To(BeZero())\n\tt.Expect(httpReq.RespHeader).To(BeEmpty())\n\tt.Expect(httpReq.RespContentLen).To(BeZero())\n\n\terr = client.Close()\n\tt.Expect(err).ToNot(HaveOccurred())\n}", "func truncated(state request.Request, ret *dns.Msg, err error) (*dns.Msg, error) {\n\t// If you query for instance ANY isc.org; you get a truncated query back which miekg/dns fails to unpack\n\t// because the RRs are not finished. The returned message can be useful or useless. Return the original\n\t// query with some header bits set that they should retry with TCP.\n\tif err != dns.ErrTruncated {\n\t\treturn ret, err\n\t}\n\n\t// We may or may not have something sensible... if not reassemble something to send to the client.\n\tm := ret\n\tif ret == nil {\n\t\tm = new(dns.Msg)\n\t\tm.SetReply(state.Req)\n\t\tm.Truncated = true\n\t\tm.Authoritative = true\n\t\tm.Rcode = dns.RcodeSuccess\n\t}\n\treturn m, nil\n}", "func isENOBUFS(err error) bool {\n\treturn errors.Is(err, unix.ENOBUFS)\n}", "func TestConnectInvalidAddr(t *testing.T) {\n\t// connect\n\tctx := createContext(t, time.Second*20)\n\n\t_, errConnect := base.NewMilvusClient(ctx, client.Config{Address: \"aa\"})\n\tcommon.CheckErr(t, errConnect, false, \"context deadline exceeded\")\n}" ]
[ "0.61080605", "0.57464087", "0.5693387", "0.5656182", "0.5621133", "0.5612451", "0.56082", "0.55885124", "0.5567672", "0.5557804", "0.5530482", "0.5529525", "0.54606605", "0.54481703", "0.54177463", "0.54153514", "0.540712", "0.539885", "0.5369703", "0.5310614", "0.5308976", "0.53025156", "0.52552795", "0.5251431", "0.5244154", "0.5223376", "0.51941717", "0.5173942", "0.51629543", "0.51608366", "0.5160277", "0.5156324", "0.5155385", "0.51442087", "0.51438653", "0.51437503", "0.5136319", "0.5131482", "0.5126528", "0.5121159", "0.508618", "0.5077151", "0.5064504", "0.5053237", "0.5048364", "0.5041778", "0.50391155", "0.50370955", "0.5034613", "0.50289685", "0.50041616", "0.50019425", "0.5001042", "0.49980265", "0.49919277", "0.4948573", "0.49461368", "0.49458367", "0.49411285", "0.49309275", "0.49271584", "0.49260008", "0.49247175", "0.49217844", "0.49160254", "0.49057788", "0.4904231", "0.48867053", "0.48847803", "0.48748946", "0.48697898", "0.48697275", "0.48696223", "0.4861715", "0.4855696", "0.48553348", "0.48442906", "0.48426223", "0.48376167", "0.48341727", "0.48335862", "0.48325709", "0.48247707", "0.48068148", "0.48034427", "0.47900552", "0.47874585", "0.47786427", "0.47678953", "0.47639474", "0.47631532", "0.4762536", "0.47619247", "0.4759406", "0.4753512", "0.47491544", "0.47486094", "0.4748172", "0.47481182", "0.47478154" ]
0.54559296
13
Check that packing |compressedQueryBytes| constructs the same query byteforbyte.
func TestDnsMessageCompressedQueryConfidenceCheck(t *testing.T) { m := mustUnpack(compressedQueryBytes) packedBytes := mustPack(m) if len(packedBytes) != len(compressedQueryBytes) { t.Errorf("Packed query has different size than original:\n %v\n %v", packedBytes, compressedQueryBytes) } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func TestDnsMessageUncompressedQueryConfidenceCheck(t *testing.T) {\n\tm := mustUnpack(uncompressedQueryBytes)\n\tpackedBytes := mustPack(m)\n\tif len(packedBytes) >= len(uncompressedQueryBytes) {\n\t\tt.Errorf(\"Compressed query is not smaller than uncompressed query\")\n\t}\n}", "func (q *CompoundQuery) GetCompressedQuery() []byte {\n\treturn flateCompressor.MustCompressString(\n\t\tojson.MarshalJSON(q),\n\t)\n}", "func TestAddEdnsPaddingCompressedQuery(t *testing.T) {\n\tif len(compressedQueryBytes)%PaddingBlockSize == 0 {\n\t\tt.Errorf(\"compressedQueryBytes does not require padding, so this test is invalid\")\n\t}\n\tpadded, err := AddEdnsPadding(compressedQueryBytes)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif len(padded)%PaddingBlockSize != 0 {\n\t\tt.Errorf(\"AddEdnsPadding failed to correctly pad compressed query\")\n\t}\n}", "func TestAddEdnsPaddingCompressedPaddedQuery(t *testing.T) {\n\tpaddedQuery := simpleQuery\n\tpaddedQuery.Additionals = make([]dnsmessage.Resource, len(simpleQuery.Additionals))\n\tcopy(paddedQuery.Additionals, simpleQuery.Additionals)\n\n\tpaddedQuery.Additionals = append(paddedQuery.Additionals,\n\t\tdnsmessage.Resource{\n\t\t\tHeader: dnsmessage.ResourceHeader{\n\t\t\t\tName: dnsmessage.MustNewName(\".\"),\n\t\t\t\tClass: dnsmessage.ClassINET,\n\t\t\t\tTTL: 0,\n\t\t\t},\n\t\t\tBody: &dnsmessage.OPTResource{\n\t\t\t\tOptions: []dnsmessage.Option{\n\t\t\t\t\t{\n\t\t\t\t\t\tCode: OptResourcePaddingCode,\n\t\t\t\t\t\tData: make([]byte, 5),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t)\n\toriginalOnWire := mustPack(&paddedQuery)\n\n\tpaddedOnWire, err := AddEdnsPadding(mustPack(&paddedQuery))\n\tif err != nil {\n\t\tt.Errorf(\"Failed to pad padded query: %v\", err)\n\t}\n\n\tif !bytes.Equal(originalOnWire, paddedOnWire) {\n\t\tt.Errorf(\"AddEdnsPadding tampered with a query that was already padded\")\n\t}\n}", "func TestAddEdnsPaddingUncompressedQuery(t *testing.T) {\n\tif len(uncompressedQueryBytes)%PaddingBlockSize == 0 {\n\t\tt.Errorf(\"uncompressedQueryBytes does not require padding, so this test is invalid\")\n\t}\n\tpadded, err := AddEdnsPadding(uncompressedQueryBytes)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif len(padded)%PaddingBlockSize != 0 {\n\t\tt.Errorf(\"AddEdnsPadding failed to correctly pad uncompressed query\")\n\t}\n}", "func (p *Packet) PackQuery() []byte {\n\tout := new(bytes.Buffer)\n\n\tp.packHeader(out)\n\tp.Question.pack(out)\n\n\tp.Bytes = out.Bytes() // swap in\n\treturn p.Bytes\n}", "func (v Document) QueryBytes(query string) []byte {\n\tr, ok := v.QueryOne(query).([]byte)\n\tif !ok {\n\t\treturn nil\n\t}\n\n\treturn r\n}", "func CheckQueryPattern(b []byte) bool {\n\n\ttheQuery := string(b)\n\ttheQuery = strings.ToLower(theQuery)\n\ttheQuery = strings.TrimSpace(theQuery)\n\n\t// проверка на первый key_word\n\tif !strings.HasPrefix(theQuery, \"select\") {\n\t\treturn false\n\t}\n\n\tfor _, patt := range QueryPatterns {\n\t\tmatched, _ := regexp.Match(patt, []byte(theQuery))\n\t\tif matched {\n\t\t\treturn true // также надо запомнить, какой паттерн подошел\n\t\t}\n\t}\n\treturn false\n}", "func TestAddEdnsPaddingCompressedOptQuery(t *testing.T) {\n\toptQuery := simpleQuery\n\toptQuery.Additionals = make([]dnsmessage.Resource, len(simpleQuery.Additionals))\n\tcopy(optQuery.Additionals, simpleQuery.Additionals)\n\n\toptQuery.Additionals = append(optQuery.Additionals,\n\t\tdnsmessage.Resource{\n\t\t\tHeader: dnsmessage.ResourceHeader{\n\t\t\t\tName: dnsmessage.MustNewName(\".\"),\n\t\t\t\tClass: dnsmessage.ClassINET,\n\t\t\t\tTTL: 0,\n\t\t\t},\n\t\t\tBody: &dnsmessage.OPTResource{\n\t\t\t\tOptions: []dnsmessage.Option{},\n\t\t\t},\n\t\t},\n\t)\n\tpaddedOnWire, err := AddEdnsPadding(mustPack(&optQuery))\n\tif err != nil {\n\t\tt.Errorf(\"Failed to pad query with OPT but no padding: %v\", err)\n\t}\n\tif len(paddedOnWire)%PaddingBlockSize != 0 {\n\t\tt.Errorf(\"AddEdnsPadding failed to correctly pad query with OPT but no padding\")\n\t}\n}", "func IsCompressed(id uint8) bool {\n\treturn id >= 20\n}", "func (v Document) QueryAll(query string) []interface{} {\n\tvar results []interface{}\n\tvar err error\n\tif v.table != nil && v.table.keyToCompressed != nil {\n\t\tresults, err = msgpack.NewDecoder(bytes.NewReader(v.data)).\n\t\t\tQueryCompressed(v.table.keyToC, query)\n\t} else {\n\t\tresults, err = msgpack.NewDecoder(bytes.NewReader(v.data)).Query(query)\n\t}\n\n\tif err != nil || len(results) == 0 {\n\t\treturn nil\n\t}\n\treturn results\n}", "func IsCompressed(msg []byte) bool {\n\treturn msg[0]&compressionMask != 0\n}", "func (b *BloomFilter) ContainsBytes(value []byte) bool {\n\tres := true\n\tfor h := 0; h < b.k; h++ {\n\t\tindex := b.kiMiHash(value, h)\n\t\tif b.bucket.Bit(index) == 0 {\n\t\t\tres = false\n\t\t}\n\t}\n\treturn res\n}", "func IsCompressed(proof *CommitmentProof) bool {\n\treturn proof.GetCompressed() != nil\n}", "func DeSerializeQuery(bytes []byte) Query {\n if len(bytes) != 32 {\n fmt.Println(\"Error : bytes length is not 32. Its \", len(bytes))\n }\n\n return Query {\n action : bytes[0],\n empty : 0,\n replyIp : binary.BigEndian.Uint32(bytes[2:6]),\n replyPort : binary.BigEndian.Uint16(bytes[6:8]),\n key : binary.BigEndian.Uint64(bytes[8:16]),\n value : binary.BigEndian.Uint64(bytes[16:24]),\n timeToLive: binary.BigEndian.Uint32(bytes[24:28]),\n requestId : binary.BigEndian.Uint32(bytes[28:32]),\n }\n}", "func (ms *memoryStorer) IsCompressed() bool {\n\treturn true\n}", "func TestShortQuery(t *testing.T) {\n\tvar qerr *queryError\n\tdoh, _ := NewTransport(testURL, ips, nil, nil, nil)\n\t_, err := doh.Query([]byte{})\n\tif err == nil {\n\t\tt.Error(\"Empty query should fail\")\n\t} else if !errors.As(err, &qerr) {\n\t\tt.Errorf(\"Wrong error type: %v\", err)\n\t} else if qerr.status != BadQuery {\n\t\tt.Errorf(\"Wrong error status: %d\", qerr.status)\n\t}\n\n\t_, err = doh.Query([]byte{1})\n\tif err == nil {\n\t\tt.Error(\"One byte query should fail\")\n\t} else if !errors.As(err, &qerr) {\n\t\tt.Errorf(\"Wrong error type: %v\", err)\n\t} else if qerr.status != BadQuery {\n\t\tt.Errorf(\"Wrong error status: %d\", qerr.status)\n\t}\n}", "func (s IntegrationSuite) TestCheckSchemaCompression(t *testing.T) {\n\tdir := getDir(t, \"testdata/validcfg\")\n\n\t// Ignore all linters except for the compression one\n\tforceOnlyRulesWarning(dir.Config, \"compression\")\n\topts, err := OptionsForDir(dir)\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected error from OptionsForDir: %v\", err)\n\t}\n\tlogicalSchema := dir.LogicalSchemas[0]\n\twsOpts, err := workspace.OptionsForDir(dir, s.d.Instance)\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected error from workspace.OptionsForDir: %v\", err)\n\t}\n\twsSchema, err := workspace.ExecLogicalSchema(logicalSchema, wsOpts)\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected error from workspace.ExecLogicalSchema: %v\", err)\n\t}\n\n\t// Count the InnoDB tables in the dir, for use in computing the expected\n\t// warning annotation count below\n\tvar innoTableCount int\n\tfor _, tbl := range wsSchema.Tables {\n\t\tif tbl.Engine == \"InnoDB\" {\n\t\t\tinnoTableCount++\n\t\t}\n\t}\n\n\t// Perform tests with various permutations of allow-list and flavor, and\n\t// confirm the number of annotations matches expectations. Note that the only\n\t// compressed tables in the dir are the two in testdata/validcfg/compression.sql;\n\t// one uses KEY_BLOCK_SIZE=2, and the other effectively uses 8 by way of\n\t// defaulting to half the page size.\n\tcases := []struct {\n\t\tallowList []string\n\t\tflavor tengo.Flavor\n\t\texpectedWarningCount int\n\t}{\n\t\t{[]string{\"8kb\"}, s.d.Flavor(), innoTableCount - 1},\n\t\t{[]string{\"page\", \"8kb\"}, tengo.FlavorMySQL57, innoTableCount - 1},\n\t\t{[]string{\"page\"}, tengo.FlavorMariaDB103, innoTableCount},\n\t\t{[]string{\"none\"}, s.d.Flavor(), 2},\n\t\t{[]string{\"none\", \"4kb\"}, s.d.Flavor(), 2},\n\t\t{[]string{\"none\", \"4kb\", \"page\"}, s.d.Flavor(), 2},\n\t\t{[]string{\"none\", \"invalid-value\"}, s.d.Flavor(), 2},\n\t\t{[]string{\"invalid-value\"}, s.d.Flavor(), innoTableCount},\n\t}\n\tfor n, c := range cases {\n\t\topts.RuleConfig[\"compression\"] = c.allowList\n\t\topts.Flavor = c.flavor\n\t\tresult := CheckSchema(wsSchema, opts)\n\t\tif result.WarningCount != c.expectedWarningCount {\n\t\t\tt.Errorf(\"cases[%d] expected warning count %d, instead found %d\", n, c.expectedWarningCount, result.WarningCount)\n\t\t}\n\t}\n\n\t// If the Dockerized test instance's Flavor supports page compression, verify\n\t// that the regexp used by tableCompressionMode() works properly.\n\t// Store a mapping of table name -> expected 2nd return value of tableCompressionMode().\n\tvar tableExpectedClause map[string]string\n\tif s.d.Flavor().Min(tengo.FlavorMySQL57) {\n\t\tdir = getDir(t, \"testdata/pagecomprmysql\")\n\t\ttableExpectedClause = map[string]string{\n\t\t\t\"page_comp_zlib\": \"COMPRESSION='zlib'\",\n\t\t\t\"page_comp_lz4\": \"COMPRESSION='lz4'\",\n\t\t\t\"page_comp_none\": \"\",\n\t\t}\n\t} else if s.d.Flavor().Min(tengo.FlavorMariaDB102) {\n\t\tdir = getDir(t, \"testdata/pagecomprmaria\")\n\t\ttableExpectedClause = map[string]string{\n\t\t\t\"page_comp_1\": \"`PAGE_COMPRESSED`=1\",\n\t\t\t\"page_comp_on\": \"`PAGE_COMPRESSED`='on'\",\n\t\t\t\"page_comp_0\": \"\",\n\t\t\t\"page_comp_off\": \"\",\n\t\t}\n\t}\n\tif tableExpectedClause != nil {\n\t\tlogicalSchema := dir.LogicalSchemas[0]\n\t\twsOpts, err := workspace.OptionsForDir(dir, s.d.Instance)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Unexpected error from workspace.OptionsForDir: %v\", err)\n\t\t}\n\t\twsSchema, err := workspace.ExecLogicalSchema(logicalSchema, wsOpts)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Unexpected error from workspace.ExecLogicalSchema: %v\", err)\n\t\t}\n\t\tif len(wsSchema.Failures) > 0 {\n\t\t\tt.Fatalf(\"%d of the CREATEs in %s unexpectedly failed: %+v\", len(wsSchema.Failures), dir, wsSchema.Failures)\n\t\t}\n\t\tfor _, tbl := range wsSchema.Tables {\n\t\t\texpectedClause, ok := tableExpectedClause[tbl.Name]\n\t\t\tif !ok {\n\t\t\t\tt.Fatalf(\"Unexpectedly found table %s in dir %s, not present in tableExpectedClause mapping for flavor %s\", tbl.Name, dir, s.d.Flavor())\n\t\t\t}\n\t\t\tvar expectedMode string\n\t\t\tif expectedClause == \"\" {\n\t\t\t\texpectedMode = \"none\"\n\t\t\t} else {\n\t\t\t\texpectedMode = \"page\"\n\t\t\t}\n\t\t\tactualMode, actualClause := tableCompressionMode(tbl)\n\t\t\tif actualMode != expectedMode || actualClause != expectedClause {\n\t\t\t\tt.Errorf(\"Unexpected return value from tableCompressionMode(%s): got %q,%q; expected %q,%q\", tbl.Name, actualMode, actualClause, expectedMode, expectedClause)\n\t\t\t}\n\t\t}\n\t}\n}", "func (g G1) BytesCompressed() []byte { return g.encodeBytes(true) }", "func canMakePaliQueries(s string, queries [][]int) []bool {\n\tn := len(queries)\n\n\tcnt := make([]int, 1, n+1)\n\tc := 0\n\tfor _, l := range s {\n\t\tc ^= 1 << uint(l-'a')\n\t\tcnt = append(cnt, c)\n\t}\n\n\tres := make([]bool, n)\n\tfor i, q := range queries {\n\t\tlo, hi, k := q[0], q[1], q[2]\n\t\tif k >= 13 {\n\t\t\tres[i] = true\n\t\t\tcontinue\n\t\t}\n\t\tremains := bits(cnt[hi+1] ^ cnt[lo])\n\t\tres[i] = remains/2 <= k\n\t}\n\n\treturn res\n}", "func (o *StorageHyperFlexStorageContainer) HasUnCompressedUsedBytes() bool {\n\tif o != nil && o.UnCompressedUsedBytes != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func didCompress(input []byte) bool {\n\tvar output bytes.Buffer\n\n\tw := zlib.NewWriter(&output)\n\t_, err := w.Write(input)\n\tw.Close()\n\n\treturn err == nil && len(input) > output.Len()\n}", "func contains(shorter, longer *TrieKey, prematchedBits uint) (matches, exact bool, common, child uint) {\n\t// Two variables important in finding which child to descend into\n\tvar pivotByte, numBytes uint\n\tpivotMask := byte(0x80)\n\n\t// calculate `exact`, `common`, and `child` at the end with defer\n\tdefer func() {\n\t\tif !matches {\n\t\t\tvar s, l byte\n\n\t\t\t// We know both of these slices are large enough to index with\n\t\t\t// `numBytes` because `matches` is false and therefore it must have\n\t\t\t// been a previous comparison of these bytes that got us here.\n\t\t\tfor i := prematchedBits / 8; i <= numBytes; i++ {\n\t\t\t\ts, l = shorter.Bits[i], longer.Bits[i]\n\t\t\t\tif s == l {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tcommon = 8*i + uint(bits.LeadingZeros8(s^l))\n\n\t\t\t\t// Whether `longer` goes on the left (0) or right (1)\n\t\t\t\tif longer.Bits[i] < shorter.Bits[i] {\n\t\t\t\t\tchild = 0\n\t\t\t\t} else {\n\t\t\t\t\tchild = 1\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tcommon = shorter.Length\n\t\texact = shorter.Length == longer.Length\n\t\tif !exact {\n\t\t\t// Whether `longer` goes on the left (0) or right (1)\n\t\t\tif longer.Bits[pivotByte]&pivotMask == 0 {\n\t\t\t\tchild = 0\n\t\t\t} else {\n\t\t\t\tchild = 1\n\t\t\t}\n\t\t}\n\t}()\n\n\t// Prefix length of 0 matches everything!\n\tif shorter.Length == 0 {\n\t\tmatches = true\n\t\treturn\n\t}\n\n\t// The bits to compare in the two keys always follows the following pattern:\n\t// 1. any number of leading \"full\" bytes which must match exactly\n\t// 2. 0 or 1 \"partial\" byte in the least significant (last) position which\n\t// must match up to the number of partial bits (1-7 bits).\n\t//\n\t// The strategy here is to compare the bytes from the least significant\n\t// (last) to the most significant (first) to avoid redundantly comparing\n\t// bytes that might have already matched higher in the tree.\n\n\t// Calculate number of bytes (including possible least-significant partial)\n\t// Decrement this as we compare bytes up to the most significant.\n\tnumBytes = bitsToBytes(shorter.Length)\n\n\t// Figure out how many bits are in the partial byte (0 means no partial)\n\tmaskLen := shorter.Length % 8\n\n\t// If the last byte is partial, compare using a bitmask\n\tif maskLen > 0 {\n\t\tvar mask byte\n\t\tmask = 0xff << (8 - maskLen)\n\n\t\t// decrement before comparing since the slices are indexed from 0\n\t\tnumBytes--\n\t\tif shorter.Bits[numBytes]&mask != longer.Bits[numBytes]&mask {\n\t\t\tmatches = false\n\t\t\treturn\n\t\t}\n\n\t\tpivotMask >>= maskLen\n\t}\n\n\tpivotByte = numBytes\n\n\t// The other bytes are all full and can be compared simply\n\tfor numBytes > (prematchedBits / 8) {\n\t\t// decrement before comparing since the slices are indexed from 0\n\t\tnumBytes--\n\t\tif shorter.Bits[numBytes] != longer.Bits[numBytes] {\n\t\t\tmatches = false\n\t\t\treturn\n\t\t}\n\t}\n\n\tmatches = true\n\treturn\n}", "func (info TrackInfo) IsCompressed() bool {\n\t// bit 31 - data is compressed\n\treturn info&0x80000000 != 0\n}", "func CompareBytes(bs []byte) bool {\n\tcbs := dummyBytes()\n\tfor i, b := range bs {\n\t\tif cbs[i] != b {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n\n\t// We don't use this as we want to be able to test partial Byte slices\n\t//return bytes.Compare(b, DummyBytes()) == 0\n}", "func compareBytes(a []byte, b []byte) bool {\n\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tfor i, j := range a {\n\t\tif b[i] != j {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func isDiffVarintSnappyEncodedPostings(input []byte) bool {\n\treturn bytes.HasPrefix(input, []byte(codecHeaderSnappy))\n}", "func TestBasic(t *testing.T) {\n\n\tq := \"This is the time for all good men to come to the aid of their country...\"\n\t//qq := []byte{\"xThis is the time for all good men to come to the aid of their country...\"}\n\t//qqq := []byte{\"xxThis is the time for all good men to come to the aid of their country...\"}\n\t//qqqq[] := []byte{\"xxxThis is the time for all good men to come to the aid of their country...\"}\n\n\tu := stu(q)\n\th1 := jenkins3.HashWordsLen(u, 13)\n\tfmt.Printf(\"%08x, %0x8, %08x\\n\", h1)\n\n\tb, c := uint32(0), uint32(0)\n\tc, b = jenkins3.HashString(\"\", c, b)\n\t//fmt.Printf(\"%08x, %08x\\n\", c, b)\n\tif c != 0xdeadbeef || b != 0xdeadbeef {\n\t\tt.Logf(\"c=0x%x != 0xdeadbeef || b=0x%x != 0xdeadbeef\\n\", c, b)\n\t\tt.FailNow()\n\t}\n\n\tb, c = 0xdeadbeef, 0\n\tc, b = jenkins3.HashString(\"\", c, b)\n\t//fmt.Printf(\"%08x, %08x\\n\", c, b)\t// bd5b7dde deadbeef\n\tif c != 0xbd5b7dde || b != 0xdeadbeef {\n\t\tt.Logf(\"c=0x%x != 0xbd5b7dde || b=0x%x != 0xdeadbeef\\n\", c, b)\n\t\tt.FailNow()\n\t}\n\n\tb, c = 0xdeadbeef, 0xdeadbeef\n\tc, b = jenkins3.HashString(\"\", c, b)\n\t//fmt.Printf(\"%08x, %08x\\n\", c, b)\t// 9c093ccd bd5b7dde\n\tif c != 0x9c093ccd || b != 0xbd5b7dde {\n\t\tt.Logf(\"c=0x%x != 0x9c093ccd || b=0x%x != 0xbd5b7dde\\n\", c, b)\n\t\tt.FailNow()\n\t}\n\n\tb, c = 0, 0\n\tc, b = jenkins3.HashString(\"Four score and seven years ago\", c, b)\n\t//fmt.Printf(\"%08x, %08x\\n\", c, b)\t// 17770551 ce7226e6\n\tif c != 0x17770551 || b != 0xce7226e6 {\n\t\tt.Logf(\"c=0x%x != 0x17770551 || b=0x%x != 0xce7226e6\\n\", c, b)\n\t\tt.FailNow()\n\t}\n\n\tb, c = 1, 0\n\tc, b = jenkins3.HashString(\"Four score and seven years ago\", c, b)\n\t//fmt.Printf(\"%08x, %08x\\n\", c, b)\t// e3607cae bd371de4\n\tif c != 0xe3607cae || b != 0xbd371de4 {\n\t\tt.Logf(\"c=0x%x != 0xe3607cae || b=0x%x != 0xbd371de4\\n\", c, b)\n\t\tt.FailNow()\n\t}\n\n\tb, c = 0, 1\n\tc, b = jenkins3.HashString(\"Four score and seven years ago\", c, b)\n\t//fmt.Printf(\"%08x, %08x\\n\", c, b)\t// cd628161 6cbea4b3\n\tif c != 0xcd628161 || b != 0x6cbea4b3 {\n\t\tt.Logf(\"c=0x%x != 0xcd628161 || b=0x%x != 0x6cbea4b3\\n\", c, b)\n\t\tt.FailNow()\n\t}\n\n}", "func ShouldBeCompressed(recoveryFlag int) bool {\n\treturn recoveryFlag >= 31\n}", "func TestCheckSignatureEncoding(t *testing.T) {\n\tt.Parallel()\n\n\ttests := []struct {\n\t\tname string\n\t\tsig []byte\n\t\tisValid bool\n\t}{\n\t\t{\n\t\t\tname: \"valid signature\",\n\t\t\tsig: decodeHex(\"304402204e45e16932b8af514961a1d3a1a25\" +\n\t\t\t\t\"fdf3f4f7732e9d624c6c61548ab5fb8cd41022018152\" +\n\t\t\t\t\"2ec8eca07de4860a4acdd12909d831cc56cbbac46220\" +\n\t\t\t\t\"82221a8768d1d09\"),\n\t\t\tisValid: true,\n\t\t},\n\t\t{\n\t\t\tname: \"empty.\",\n\t\t\tsig: nil,\n\t\t\tisValid: false,\n\t\t},\n\t\t{\n\t\t\tname: \"bad magic\",\n\t\t\tsig: decodeHex(\"314402204e45e16932b8af514961a1d3a1a25\" +\n\t\t\t\t\"fdf3f4f7732e9d624c6c61548ab5fb8cd41022018152\" +\n\t\t\t\t\"2ec8eca07de4860a4acdd12909d831cc56cbbac46220\" +\n\t\t\t\t\"82221a8768d1d09\"),\n\t\t\tisValid: false,\n\t\t},\n\t\t{\n\t\t\tname: \"bad 1st int marker magic\",\n\t\t\tsig: decodeHex(\"304403204e45e16932b8af514961a1d3a1a25\" +\n\t\t\t\t\"fdf3f4f7732e9d624c6c61548ab5fb8cd41022018152\" +\n\t\t\t\t\"2ec8eca07de4860a4acdd12909d831cc56cbbac46220\" +\n\t\t\t\t\"82221a8768d1d09\"),\n\t\t\tisValid: false,\n\t\t},\n\t\t{\n\t\t\tname: \"bad 2nd int marker\",\n\t\t\tsig: decodeHex(\"304402204e45e16932b8af514961a1d3a1a25\" +\n\t\t\t\t\"fdf3f4f7732e9d624c6c61548ab5fb8cd41032018152\" +\n\t\t\t\t\"2ec8eca07de4860a4acdd12909d831cc56cbbac46220\" +\n\t\t\t\t\"82221a8768d1d09\"),\n\t\t\tisValid: false,\n\t\t},\n\t\t{\n\t\t\tname: \"short len\",\n\t\t\tsig: decodeHex(\"304302204e45e16932b8af514961a1d3a1a25\" +\n\t\t\t\t\"fdf3f4f7732e9d624c6c61548ab5fb8cd41022018152\" +\n\t\t\t\t\"2ec8eca07de4860a4acdd12909d831cc56cbbac46220\" +\n\t\t\t\t\"82221a8768d1d09\"),\n\t\t\tisValid: false,\n\t\t},\n\t\t{\n\t\t\tname: \"long len\",\n\t\t\tsig: decodeHex(\"304502204e45e16932b8af514961a1d3a1a25\" +\n\t\t\t\t\"fdf3f4f7732e9d624c6c61548ab5fb8cd41022018152\" +\n\t\t\t\t\"2ec8eca07de4860a4acdd12909d831cc56cbbac46220\" +\n\t\t\t\t\"82221a8768d1d09\"),\n\t\t\tisValid: false,\n\t\t},\n\t\t{\n\t\t\tname: \"long X\",\n\t\t\tsig: decodeHex(\"304402424e45e16932b8af514961a1d3a1a25\" +\n\t\t\t\t\"fdf3f4f7732e9d624c6c61548ab5fb8cd41022018152\" +\n\t\t\t\t\"2ec8eca07de4860a4acdd12909d831cc56cbbac46220\" +\n\t\t\t\t\"82221a8768d1d09\"),\n\t\t\tisValid: false,\n\t\t},\n\t\t{\n\t\t\tname: \"long Y\",\n\t\t\tsig: decodeHex(\"304402204e45e16932b8af514961a1d3a1a25\" +\n\t\t\t\t\"fdf3f4f7732e9d624c6c61548ab5fb8cd41022118152\" +\n\t\t\t\t\"2ec8eca07de4860a4acdd12909d831cc56cbbac46220\" +\n\t\t\t\t\"82221a8768d1d09\"),\n\t\t\tisValid: false,\n\t\t},\n\t\t{\n\t\t\tname: \"short Y\",\n\t\t\tsig: decodeHex(\"304402204e45e16932b8af514961a1d3a1a25\" +\n\t\t\t\t\"fdf3f4f7732e9d624c6c61548ab5fb8cd41021918152\" +\n\t\t\t\t\"2ec8eca07de4860a4acdd12909d831cc56cbbac46220\" +\n\t\t\t\t\"82221a8768d1d09\"),\n\t\t\tisValid: false,\n\t\t},\n\t\t{\n\t\t\tname: \"trailing crap\",\n\t\t\tsig: decodeHex(\"304402204e45e16932b8af514961a1d3a1a25\" +\n\t\t\t\t\"fdf3f4f7732e9d624c6c61548ab5fb8cd41022018152\" +\n\t\t\t\t\"2ec8eca07de4860a4acdd12909d831cc56cbbac46220\" +\n\t\t\t\t\"82221a8768d1d0901\"),\n\t\t\tisValid: false,\n\t\t},\n\t\t{\n\t\t\tname: \"X == N \",\n\t\t\tsig: decodeHex(\"30440220fffffffffffffffffffffffffffff\" +\n\t\t\t\t\"ffebaaedce6af48a03bbfd25e8cd0364141022018152\" +\n\t\t\t\t\"2ec8eca07de4860a4acdd12909d831cc56cbbac46220\" +\n\t\t\t\t\"82221a8768d1d09\"),\n\t\t\tisValid: false,\n\t\t},\n\t\t{\n\t\t\tname: \"X == N \",\n\t\t\tsig: decodeHex(\"30440220fffffffffffffffffffffffffffff\" +\n\t\t\t\t\"ffebaaedce6af48a03bbfd25e8cd0364142022018152\" +\n\t\t\t\t\"2ec8eca07de4860a4acdd12909d831cc56cbbac46220\" +\n\t\t\t\t\"82221a8768d1d09\"),\n\t\t\tisValid: false,\n\t\t},\n\t\t{\n\t\t\tname: \"Y == N\",\n\t\t\tsig: decodeHex(\"304402204e45e16932b8af514961a1d3a1a25\" +\n\t\t\t\t\"fdf3f4f7732e9d624c6c61548ab5fb8cd410220fffff\" +\n\t\t\t\t\"ffffffffffffffffffffffffffebaaedce6af48a03bb\" +\n\t\t\t\t\"fd25e8cd0364141\"),\n\t\t\tisValid: false,\n\t\t},\n\t\t{\n\t\t\tname: \"Y > N\",\n\t\t\tsig: decodeHex(\"304402204e45e16932b8af514961a1d3a1a25\" +\n\t\t\t\t\"fdf3f4f7732e9d624c6c61548ab5fb8cd410220fffff\" +\n\t\t\t\t\"ffffffffffffffffffffffffffebaaedce6af48a03bb\" +\n\t\t\t\t\"fd25e8cd0364142\"),\n\t\t\tisValid: false,\n\t\t},\n\t\t{\n\t\t\tname: \"0 len X\",\n\t\t\tsig: decodeHex(\"302402000220181522ec8eca07de4860a4acd\" +\n\t\t\t\t\"d12909d831cc56cbbac4622082221a8768d1d09\"),\n\t\t\tisValid: false,\n\t\t},\n\t\t{\n\t\t\tname: \"0 len Y\",\n\t\t\tsig: decodeHex(\"302402204e45e16932b8af514961a1d3a1a25\" +\n\t\t\t\t\"fdf3f4f7732e9d624c6c61548ab5fb8cd410200\"),\n\t\t\tisValid: false,\n\t\t},\n\t\t{\n\t\t\tname: \"extra R padding\",\n\t\t\tsig: decodeHex(\"30450221004e45e16932b8af514961a1d3a1a\" +\n\t\t\t\t\"25fdf3f4f7732e9d624c6c61548ab5fb8cd410220181\" +\n\t\t\t\t\"522ec8eca07de4860a4acdd12909d831cc56cbbac462\" +\n\t\t\t\t\"2082221a8768d1d09\"),\n\t\t\tisValid: false,\n\t\t},\n\t\t{\n\t\t\tname: \"extra S padding\",\n\t\t\tsig: decodeHex(\"304502204e45e16932b8af514961a1d3a1a25\" +\n\t\t\t\t\"fdf3f4f7732e9d624c6c61548ab5fb8cd41022100181\" +\n\t\t\t\t\"522ec8eca07de4860a4acdd12909d831cc56cbbac462\" +\n\t\t\t\t\"2082221a8768d1d09\"),\n\t\t\tisValid: false,\n\t\t},\n\t}\n\n\t// flags := ScriptVerifyStrictEncoding\n\tflags := StandardVerifyFlags\n\tfor _, test := range tests {\n\t\terr := TstCheckSignatureEncoding(test.sig, flags)\n\t\tif err != nil && test.isValid {\n\t\t\tt.Errorf(\"checkSignatureEncoding test '%s' failed \"+\n\t\t\t\t\"when it should have succeeded: %v\", test.name,\n\t\t\t\terr)\n\t\t} else if err == nil && !test.isValid {\n\t\t\tt.Errorf(\"checkSignatureEncooding test '%s' succeeded \"+\n\t\t\t\t\"when it should have failed\", test.name)\n\t\t}\n\t}\n}", "func TestBytes(a, b []byte) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tfor i := range a {\n\t\tif a[i] != b[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (bf *Bloom) Query (element []byte) (found bool) {\n hashFunc := sha1.New()\n var truncHash uint64\n var byteBuffer *bytes.Buffer\n found = true\n for i := 0; i < seedCount; i++ {\n hashCode := seededHash(hashFunc, element, bf.seeds[i])\n byteBuffer = bytes.NewBuffer(hashCode)\n binary.Read(byteBuffer, defaultEndianness, &truncHash)\n \n mappedBit := truncHash % uint64(bf.size)\n found = checkFilter(bf.filter, mappedBit)\n if !found {\n break\n }\n }\n return found\n}", "func CheckBigIntInField(a *big.Int) bool {\n\treturn a.Cmp(constants.Q) == -1\n}", "func GetMsgCompressedFlag(header uint64) bool {\n return (header & (1 << msgCompressedOffset)) != 0\n}", "func (me TxsdRecordPatternSimpleContentExtensionOffsetunit) IsByte() bool {\n\treturn me.String() == \"byte\"\n}", "func canCompress(value value) bool { return value&0x7fffffff == value }", "func PrefixBytesDetector(prefix []byte, handler Handler) Detector {\n\treturn Detector{\n\t\tNeeded: len(prefix),\n\t\tTest: func(b []byte) bool {\n\t\t\tfor i, v := range prefix {\n\t\t\t\tif b[i] != v {\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn true\n\t\t},\n\t\tHandler: handler,\n\t}\n}", "func TestQuoteBytes(t *testing.T) {\n\tfor i, tc := range quoteTestcases {\n\t\tresult, err := QuoteBytes([]byte(tc.Input))\n\t\tif err != tc.ExpectedError {\n\t\t\tt.Errorf(\"Expected error '%s' in testcase %d, got '%s'\", tc.ExpectedError,\n\t\t\t\ti, err)\n\t\t}\n\t\tif string(result) != tc.Output {\n\t\t\tt.Errorf(\"Expected '%s' in testcase %d, got '%s'\", tc.Output, i, result)\n\t\t}\n\t}\n}", "func VPCOMPRESSQ(ops ...operand.Op) { ctx.VPCOMPRESSQ(ops...) }", "func PTESTm128byte(X1 []byte, X2 []byte)", "func (a *scriptAddress) Compressed() bool {\n\treturn false\n}", "func (encryptor *QueryDataEncryptor) encryptInsertQuery(insert *sqlparser.Insert) (bool, error) {\n\ttableName := sqlparser.String(insert.Table.Name)\n\tschema := encryptor.schemaStore.GetTableSchema(tableName)\n\tif schema == nil {\n\t\t// unsupported table, we have not schema and query hasn't columns description\n\t\tlogrus.Debugf(\"Hasn't schema for table %s\", tableName)\n\t\treturn false, nil\n\t}\n\n\tvar columnsName []string\n\tif len(insert.Columns) > 0 {\n\t\tcolumnsName = make([]string, 0, len(insert.Columns))\n\t\tfor _, col := range insert.Columns {\n\t\t\tcolumnsName = append(columnsName, sqlparser.String(col))\n\t\t}\n\t} else if len(schema.Columns) > 0 {\n\t\tcolumnsName = schema.Columns\n\t}\n\n\tchanged := false\n\n\tif len(columnsName) > 0 {\n\t\tswitch rows := insert.Rows.(type) {\n\t\tcase sqlparser.Values:\n\t\t\tfor _, valTuple := range rows {\n\t\t\t\t// collect values per column\n\t\t\t\tfor j, value := range valTuple {\n\t\t\t\t\tcolumnName := columnsName[j]\n\t\t\t\t\tif changedValue, err := encryptor.encryptExpression(value, schema, columnName); err != nil {\n\t\t\t\t\t\tlogrus.WithError(err).Errorln(\"Can't encrypt expression\")\n\t\t\t\t\t\treturn changed, err\n\t\t\t\t\t} else if changedValue {\n\t\t\t\t\t\tchanged = true\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tif len(insert.OnDup) > 0 {\n\t\tonDupChanged, err := encryptor.encryptUpdateExpressions(sqlparser.UpdateExprs(insert.OnDup), insert.Table, qualifierToTableMap{insert.Table.Name.String(): insert.Table.Name.String()})\n\t\tif err != nil {\n\t\t\treturn changed, err\n\t\t}\n\t\tchanged = changed || onDupChanged\n\t}\n\n\treturn changed, nil\n}", "func TestAssertEqualBytes(t *testing.T) {\n\tdata := []byte{9, 9, 1, 1, 1, 9, 9}\n\tassertBytesEqual(t, data, data, \"Self\")\n\tassertBytesEqual(t, data[1:4], data[1:4], \"Self\")\n\tassertBytesEqual(t, []byte{1, 1}, []byte{1, 1}, \"Simple match\")\n\tassertBytesEqual(t, []byte{1, 2, 3}, []byte{1, 2, 3}, \"content mismatch\")\n\tassertBytesEqual(t, []byte{1, 1, 1}, data[2:5], \"slice match\")\n}", "func Q(a, b, c, d, t byte) {\n\tq[a][GETBYTE(t,0)] ^ (q[b][GETBYTE(t,1)] << 8) ^ (q[c][GETBYTE(t,2)] << 16) ^ (q[d][GETBYTE(t,3)] << 24)\n}", "func IsObjectCompressed(path string) bool {\n\treturn path[len(path)-len(lz4.Extension):] == lz4.Extension\n}", "func buffersDistinct(buffers ...[]byte) bool {\n\tfor i := 0; i < len(buffers); i++ {\n\t\tfor j := i + 1; j < len(buffers); j++ {\n\t\t\tif bytes.Equal(buffers[i], buffers[j]) {\n\t\t\t\t// Different entry, but equal arrays\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t}\n\treturn true\n}", "func (p *CassandraClient) PrepareCqlQuery(query []byte, compression Compression) (r *CqlPreparedResult_, err error) {\n\tif err = p.sendPrepareCqlQuery(query, compression); err != nil {\n\t\treturn\n\t}\n\treturn p.recvPrepareCqlQuery()\n}", "func TestCompletePackUnpack(t *testing.T) {\n\n\tif testing.Short() {\n\t\tt.Skip(\"skipping testing in short mode\")\n\t}\n\n\tmaxCategory := categoryBitMask >> 24\n\tmaxLocation := locationBitMask >> 15\n\tmaxResult := resultBitMask\n\n\tfor c := 0; c < maxCategory; c++ {\n\t\tfor l := 0; l < maxLocation; l++ {\n\t\t\tfor r := 0; r < maxResult; r++ {\n\t\t\t\tpacked := pack(uint32(c), uint32(l), uint32(r))\n\t\t\t\tunpackedC, unpackedL, unpackedR := unpack(packed)\n\t\t\t\tif uint32(c) != unpackedC || uint32(l) != unpackedL || uint32(r) != unpackedR {\n\t\t\t\t\tt.Errorf(\"pack/unpack does not work properly\")\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func (o *StorageHyperFlexStorageContainer) GetUnCompressedUsedBytesOk() (*int64, bool) {\n\tif o == nil || o.UnCompressedUsedBytes == nil {\n\t\treturn nil, false\n\t}\n\treturn o.UnCompressedUsedBytes, true\n}", "func (m *DigestHolderMock) MinimockAsBytesInspect() {\n\tfor _, e := range m.AsBytesMock.expectations {\n\t\tif mm_atomic.LoadUint64(&e.Counter) < 1 {\n\t\t\tm.t.Error(\"Expected call to DigestHolderMock.AsBytes\")\n\t\t}\n\t}\n\n\t// if default expectation was set then invocations count should be greater than zero\n\tif m.AsBytesMock.defaultExpectation != nil && mm_atomic.LoadUint64(&m.afterAsBytesCounter) < 1 {\n\t\tm.t.Error(\"Expected call to DigestHolderMock.AsBytes\")\n\t}\n\t// if func was set then invocations count should be greater than zero\n\tif m.funcAsBytes != nil && mm_atomic.LoadUint64(&m.afterAsBytesCounter) < 1 {\n\t\tm.t.Error(\"Expected call to DigestHolderMock.AsBytes\")\n\t}\n}", "func (c parser) IsQueryReport(command []byte) bool {\n\tif len(command) == 0 {\n\t\treturn false\n\t}\n\n\tloginByte := command[25:27]\n\thexReportNumber, err := convert.FromByteToHex(loginByte)\n\n\tif (strings.ToUpper(hexReportNumber) == \"0xA002\") && err == nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func TestCompression(t *testing.T) {\n\tvec := NewVector()\n\tfor i := 0; i < 1e5; i++ {\n\t\tv := int(rand.Uint32())\n\t\tvec.Add(v)\n\t}\n\n\tsizeofUint := int(unsafe.Sizeof(uint(0)))\n\n\trawsize := float64(sizeofUint * 1e5)\n\tvecsize := float64(vec.Size())\n\n\tpercentage := ((rawsize - vecsize) / rawsize) * 100\n\tfmt.Printf(\"=== COMPRESSION: %.2f%%\\n\", percentage)\n}", "func digestToBytes(t *testing.T, digest types.Digest) []byte {\n\tbytes, err := sql.DigestToBytes(digest)\n\trequire.NoError(t, err)\n\treturn bytes\n}", "func assertBytesEqual(t *testing.T, expected, actual []byte, format string, args ...interface{}) {\n\tmatch := true\n\tmismatchIndex := 0\n\tif len(expected) == len(actual) {\n\t\tfor i := 0; i < len(expected); i++ {\n\t\t\tif expected[i] != actual[i] {\n\t\t\t\tmatch = false\n\t\t\t\tmismatchIndex = i\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t} else {\n\t\tmatch = false\n\t\tt.Errorf(\"Lengths don't match Expected=%d Actual=%d\", len(expected), len(actual))\n\t}\n\tif !match {\n\t\tt.Errorf(\"Mismatch at index %d \", mismatchIndex)\n\t\tt.Errorf(\"\\tActual String = %s\", string(actual))\n\t\tt.Errorf(\"\\tExpected String = %s\", string(expected))\n\t\tt.Errorf(\"\\tActual = %v\", actual)\n\t\tt.Errorf(\"\\tExpected = %v\", expected)\n\t\tt.Errorf(format, args)\n\t}\n}", "func TestBenchmarkCompressed(t *testing.T) {\n\tstartFakeBookingApp()\n\tresp := httptest.NewRecorder()\n\tc := NewController(NewRequest(showRequest), NewResponse(resp))\n\tc.SetAction(\"Hotels\", \"Show\")\n\tConfig.SetOption(\"results.compressed\", \"true\")\n\tresult := Hotels{c}.Show(3)\n\tresult.Apply(c.Request, c.Response)\n\tif !strings.Contains(resp.Body.String(), \"300 Main St.\") {\n\t\tt.Errorf(\"Failed to find hotel address in action response:\\n%s\", resp.Body)\n\t}\n}", "func QueryVerify(name string) bool {\n\tcount, err := dbmap.SelectInt(\"select count(1) from cr_image where Image_name = ?\", name)\n\tif err != nil {\n\t\tlog.Fatalln(\"Verify failed\", err)\n\t\treturn false\n\t}\n\tif count < 1 {\n\t\treturn true\n\t}\n\treturn false\n}", "func (me TxsdCounterSimpleContentExtensionType) IsByte() bool { return me.String() == \"byte\" }", "func (m *SignatureKeyHolderMock) AsBytesFinished() bool {\n\t// if expectation series were set then invocations count should be equal to expectations count\n\tif len(m.AsBytesMock.expectationSeries) > 0 {\n\t\treturn atomic.LoadUint64(&m.AsBytesCounter) == uint64(len(m.AsBytesMock.expectationSeries))\n\t}\n\n\t// if main expectation was set then invocations count should be greater than zero\n\tif m.AsBytesMock.mainExpectation != nil {\n\t\treturn atomic.LoadUint64(&m.AsBytesCounter) > 0\n\t}\n\n\t// if func was set then invocations count should be greater than zero\n\tif m.AsBytesFunc != nil {\n\t\treturn atomic.LoadUint64(&m.AsBytesCounter) > 0\n\t}\n\n\treturn true\n}", "func TestCheckPubKeyEncoding(t *testing.T) {\n\tt.Parallel()\n\n\ttests := []struct {\n\t\tname string\n\t\tkey []byte\n\t\tisValid bool\n\t}{\n\t\t{\n\t\t\tname: \"uncompressed ok\",\n\t\t\tkey: decodeHex(\"0411db93e1dcdb8a016b49840f8c53bc1eb68\" +\n\t\t\t\t\"a382e97b1482ecad7b148a6909a5cb2e0eaddfb84ccf\" +\n\t\t\t\t\"9744464f82e160bfa9b8b64f9d4c03f999b8643f656b\" +\n\t\t\t\t\"412a3\"),\n\t\t\tisValid: true,\n\t\t},\n\t\t{\n\t\t\tname: \"compressed ok\",\n\t\t\tkey: decodeHex(\"02ce0b14fb842b1ba549fdd675c98075f12e9\" +\n\t\t\t\t\"c510f8ef52bd021a9a1f4809d3b4d\"),\n\t\t\tisValid: true,\n\t\t},\n\t\t{\n\t\t\tname: \"compressed ok\",\n\t\t\tkey: decodeHex(\"032689c7c2dab13309fb143e0e8fe39634252\" +\n\t\t\t\t\"1887e976690b6b47f5b2a4b7d448e\"),\n\t\t\tisValid: true,\n\t\t},\n\t\t{\n\t\t\tname: \"hybrid\",\n\t\t\tkey: decodeHex(\"0679be667ef9dcbbac55a06295ce870b07029\" +\n\t\t\t\t\"bfcdb2dce28d959f2815b16f81798483ada7726a3c46\" +\n\t\t\t\t\"55da4fbfc0e1108a8fd17b448a68554199c47d08ffb1\" +\n\t\t\t\t\"0d4b8\"),\n\t\t\tisValid: false,\n\t\t},\n\t\t{\n\t\t\tname: \"empty\",\n\t\t\tkey: nil,\n\t\t\tisValid: false,\n\t\t},\n\t}\n\n\t// flags := ScriptVerifyWitnessPubKeyType | ScriptVerifyStrictEncoding\n\tflags := StandardVerifyFlags\n\tfor _, test := range tests {\n\t\terr := TstCheckPubKeyEncoding(test.key, flags)\n\t\tif err != nil && test.isValid {\n\t\t\tt.Errorf(\"checkPubkeyEncoding test '%s' failed \"+\n\t\t\t\t\"when it should have succeeded: %v\", test.name,\n\t\t\t\terr)\n\t\t} else if err == nil && !test.isValid {\n\t\t\tt.Errorf(\"checkPubkeyEncooding test '%s' succeeded \"+\n\t\t\t\t\"when it should have failed\", test.name)\n\t\t}\n\t}\n\n}", "func (encryptor *QueryDataEncryptor) encryptUpdateQuery(update *sqlparser.Update) (bool, error) {\n\ttables := encryptor.getTablesFromUpdate(update.TableExprs)\n\tif !encryptor.hasTablesToEncrypt(tables) {\n\t\treturn false, nil\n\t}\n\tif len(tables) == 0 {\n\t\treturn false, nil\n\t}\n\tqualifierMap := qualifierToTableMap{}\n\tfor _, table := range tables {\n\t\tif table.As.IsEmpty() {\n\t\t\tqualifierMap[table.TableName.Name.String()] = table.TableName.Name.String()\n\t\t} else {\n\t\t\tqualifierMap[table.As.String()] = table.TableName.Name.String()\n\t\t}\n\t}\n\tfirstTable := tables[0].TableName\n\treturn encryptor.encryptUpdateExpressions(update.Exprs, firstTable, qualifierMap)\n}", "func (ma *MixedcaseAddress) ValidChecksum() bool {\n\treturn ma.original == ma.addr.Hex()\n}", "func (q queryManager) checkQueryNeedsTransaction(qp dbquery.QueryParsed) (bool, error) {\n\n\tif qp.IsSelect() {\n\t\treturn false, nil\n\t}\n\t// transaction for any update\n\treturn true, nil\n}", "func queryCore(r *bitrow, bits []bitrow, hashes []uint32)", "func PACKUSDWm128byte(X1 []byte, X2 []byte)", "func TestHardCoded(t *testing.T) {\n\tc1, err := SigCompress(sig1big)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tt.Logf(\"big1:\\n%x\\ncom1:\\n%x\\n\", sig1big, c1)\n\n\tc2, err := SigCompress(sig2big)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tt.Logf(\"big2:\\n%x\\ncom2:\\n%x\\n\", sig2big, c2)\n\n\tc3, err := SigCompress(sig3big)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tt.Logf(\"big3:\\n%x\\ncom3:\\n%x\\n\", sig3big, c3)\n\n\tr1 := SigDecompress(c1)\n\tt.Logf(\"dec1:\\n%x\\n\", r1)\n\n\tr2 := SigDecompress(c2)\n\tt.Logf(\"dec1:\\n%x\\n\", r2)\n\n\tr3 := SigDecompress(c3)\n\tt.Logf(\"dec1:\\n%x\\n\", r3)\n\n}", "func (d *digest) compress() {\n\tvar m, v [16]uint32\n\tfor i := 0; i < 16; i++ {\n\t\tm[i] = binary.LittleEndian.Uint32(d.buf[i*4:])\n\t}\n\tfor i := 0; i < 8; i++ {\n\t\tv[i] = d.h[i]\n\t}\n\tv[8] = iv[0]\n\tv[9] = iv[1]\n\tv[10] = iv[2]\n\tv[11] = iv[3]\n\tv[12] = d.t[0] ^ iv[4]\n\tv[13] = d.t[1] ^ iv[5]\n\tv[14] = d.f[0] ^ iv[6]\n\tv[15] = d.f[1] ^ iv[7]\n\n\trotr32 := func (w uint32, c uint32) uint32 {\n\t\treturn (w>>c) | (w<<(32-c))\n\t}\n\tG := func(r, i, a, b, c, d int) {\n\t\tv[a] = v[a] + v[b] + m[sigma[r][2*i+0]]\n\t\tv[d] = rotr32(v[d] ^ v[a], 16)\n\t\tv[c] = v[c] + v[d]\n\t\tv[b] = rotr32(v[b] ^ v[c], 12)\n\t\tv[a] = v[a] + v[b] + m[sigma[r][2*i+1]]\n\t\tv[d] = rotr32(v[d] ^ v[a], 8)\n\t\tv[c] = v[c] + v[d]\n\t\tv[b] = rotr32(v[b] ^ v[c], 7)\n\t}\n\tfor i := 0; i < 10; i++ {\n\t\tG(i, 0, 0, 4, 8, 12);\n\t\tG(i, 1, 1, 5, 9, 13);\n\t\tG(i, 2, 2, 6, 10, 14);\n\t\tG(i, 3, 3, 7, 11, 15);\n\t\tG(i, 4, 0, 5, 10, 15);\n\t\tG(i, 5, 1, 6, 11, 12);\n\t\tG(i, 6, 2, 7, 8, 13);\n\t\tG(i, 7, 3, 4, 9, 14);\n\t}\n\tfor i := 0; i < 8; i++ {\n\t\td.h[i] = d.h[i] ^ v[i] ^ v[i+8]\n\t}\n}", "func hasValidDataExtractionQuery(query interface{}) error {\n\tqueryConverted := query.(map[string]interface{})\n\tif val, ok := queryConverted[\"columns\"]; ok {\n\t\tcolumns := reflect.ValueOf(val)\n\t\tif columns.Len() > 10 {\n\t\t\treturn errors.New(\"Data Extraction Validator: The key 'columns' in data extraction result must have up to 10 columns.\")\n\t\t}\n\t}\n\treturn nil\n}", "func TestGenerateQuery(t *testing.T) {\n\tfor _, test := range []struct {\n\t\tname, q, want string\n\t}{\n\t\t{\"querySearchSymbol\", SymbolQuery(SearchTypeSymbol), querySearchSymbol},\n\t\t{\"querySearchPackageDotSymbol\", SymbolQuery(SearchTypePackageDotSymbol), querySearchPackageDotSymbol},\n\t\t{\"querySearchMultiWordExact\", SymbolQuery(SearchTypeMultiWordExact), querySearchMultiWordExact},\n\t} {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\tif diff := cmp.Diff(test.want, test.q); diff != \"\" {\n\t\t\t\tt.Errorf(\"mismatch (-want, +got):\\n%s\", diff)\n\t\t\t}\n\t\t})\n\t}\n}", "func CompareBytes(b1, b2 []byte) int8 {\n\tif b1 == nil && b2 == nil {\n\t\tpanic(\"cannot compare with nil array\")\n\t}\n\tvar i, j int\n\tif len(b1) < len(b2) {\n\t\ti = len(b1) - len(b2)\n\t} else if len(b1) > len(b2) {\n\t\tj = len(b2) - len(b1)\n\t}\n\tfor {\n\t\tif i < 0 && j >= 0 && b2[j] > 0 {\n\t\t\treturn -1\n\t\t} else if j < 0 && i >= 0 && b1[i] > 0 {\n\t\t\treturn 1\n\t\t} else if j >= len(b1) && i >= len(b2) {\n\t\t\treturn 0\n\t\t} else if b1[i] < b2[j] {\n\t\t\treturn -1\n\t\t} else if b1[i] > b2[j] {\n\t\t\treturn 1\n\t\t}\n\t\ti++\n\t\tj++\n\t}\n}", "func ParsePromCompressedRequest(r *http.Request) ([]byte, *handler.ParseError) {\n\tbody := r.Body\n\tif r.Body == nil {\n\t\terr := fmt.Errorf(\"empty request body\")\n\t\treturn nil, handler.NewParseError(err, http.StatusBadRequest)\n\t}\n\tdefer body.Close()\n\tcompressed, err := ioutil.ReadAll(body)\n\n\tif err != nil {\n\t\treturn nil, handler.NewParseError(err, http.StatusInternalServerError)\n\t}\n\n\tif len(compressed) == 0 {\n\t\treturn nil, handler.NewParseError(fmt.Errorf(\"empty request body\"), http.StatusBadRequest)\n\t}\n\n\treqBuf, err := snappy.Decode(nil, compressed)\n\tif err != nil {\n\t\treturn nil, handler.NewParseError(err, http.StatusBadRequest)\n\t}\n\n\treturn reqBuf, nil\n}", "func (x *Big) isCompact() bool { return x.compact != c.Inflated }", "func queryBlockEnc(blockStr string, prefix int64, blockId int) QueryBlockCipher {\n\tvar ret QueryBlockCipher\n\texp := getHashedValue(blockStr, prefix, blockId) // get the hash value in power part of ciphertext\n\tsubIndex, _ := strconv.Atoi(new(big.Int).Mod(exp, big.NewInt(subIndexSize)).String()) // calculate the sub-index value (G_k mod subIndexSize)\n\tret.subIndex = uint8(subIndex)\n\n\t// generate the ciphertext\n\tret.cipher = exp.Bytes()\n\treturn ret\n}", "func (q *Quantity) AsCanonicalBytes(out []byte) (result []byte, exponent int32) {\n\tif q.d.Dec != nil {\n\t\treturn q.d.AsCanonicalBytes(out)\n\t}\n\treturn q.i.AsCanonicalBytes(out)\n}", "func queriesMostlyEqual(m1 dnsmessage.Message, m2 dnsmessage.Message) bool {\n\t// Make fields we don't care about match, so that equality check is easy.\n\tm1.Header.ID = m2.Header.ID\n\tm1.Additionals = m2.Additionals\n\treturn reflect.DeepEqual(m1, m2)\n}", "func isMsgPackString(b byte) bool {\n\treturn (0xbf&b) == b || b == 0xd9 || b == 0xda || b == 0xdb\n}", "func QueryObject(stub shim.ChaincodeStubInterface, objectType string, keys []string) ([]byte, error) {\n\n // Check how many keys\n\n err := VerifyAtLeastOneKeyIsPresent(objectType, keys )\n if err != nil {\n return nil, err\n }\n\n compoundKey, _ := stub.CreateCompositeKey(objectType, keys)\n fmt.Println(\"QueryObject() : Compound Key : \", compoundKey)\n\n Avalbytes, err := stub.GetState(compoundKey)\n if err != nil {\n return nil, err\n }\n\n return Avalbytes, nil\n}", "func (m *DigestHolderMock) MinimockAsBytesDone() bool {\n\tfor _, e := range m.AsBytesMock.expectations {\n\t\tif mm_atomic.LoadUint64(&e.Counter) < 1 {\n\t\t\treturn false\n\t\t}\n\t}\n\n\t// if default expectation was set then invocations count should be greater than zero\n\tif m.AsBytesMock.defaultExpectation != nil && mm_atomic.LoadUint64(&m.afterAsBytesCounter) < 1 {\n\t\treturn false\n\t}\n\t// if func was set then invocations count should be greater than zero\n\tif m.funcAsBytes != nil && mm_atomic.LoadUint64(&m.afterAsBytesCounter) < 1 {\n\t\treturn false\n\t}\n\treturn true\n}", "func isDiffVarintSnappyStreamedEncodedPostings(input []byte) bool {\n\treturn bytes.HasPrefix(input, []byte(codecHeaderStreamedSnappy))\n}", "func TestResultBlockResults_regression8583(t *testing.T) {\n\tconst keyData = \"0123456789abcdef0123456789abcdef\" // 32 bytes\n\twantKey := base64.StdEncoding.EncodeToString([]byte(keyData))\n\n\trsp := &ResultBlockResults{\n\t\tValidatorUpdates: []abci.ValidatorUpdate{{\n\t\t\tPubKey: pbcrypto.PublicKey{\n\t\t\t\tSum: &pbcrypto.PublicKey_Ed25519{Ed25519: []byte(keyData)},\n\t\t\t},\n\t\t\tPower: 400,\n\t\t}},\n\t}\n\n\t// Use compact here so the test data remain legible. The output from the\n\t// marshaler will have whitespace folded out so we need to do that too for\n\t// the comparison to be valid.\n\tvar buf bytes.Buffer\n\trequire.NoError(t, json.Compact(&buf, []byte(fmt.Sprintf(`\n{\n \"height\": \"0\",\n \"txs_results\": null,\n \"total_gas_used\": \"0\",\n \"finalize_block_events\": null,\n \"validator_updates\": [\n {\n \"pub_key\":{\"type\": \"tendermint/PubKeyEd25519\", \"value\": \"%s\"},\n \"power\": \"400\"\n }\n ],\n \"consensus_param_updates\": null\n}`, wantKey))))\n\n\tbits, err := json.Marshal(rsp)\n\tif err != nil {\n\t\tt.Fatalf(\"Encoding block result: %v\", err)\n\t}\n\tif diff := cmp.Diff(buf.String(), string(bits)); diff != \"\" {\n\t\tt.Errorf(\"Marshaled result (-want, +got):\\n%s\", diff)\n\t}\n\n\tback := new(ResultBlockResults)\n\tif err := json.Unmarshal(bits, back); err != nil {\n\t\tt.Fatalf(\"Unmarshaling: %v\", err)\n\t}\n\tif diff := cmp.Diff(rsp, back); diff != \"\" {\n\t\tt.Errorf(\"Unmarshaled result (-want, +got):\\n%s\", diff)\n\t}\n}", "func (e ChecksumMismatch) IsChecksumMismatch() {}", "func (*BlockQuery) Descriptor() ([]byte, []int) {\n\treturn file_protos_query_proto_rawDescGZIP(), []int{0}\n}", "func compactSigCheck(t *testing.T, sig []byte) {\n\tt.Helper()\n\tb := int(sig[32])\n\tif b < 0 {\n\t\tt.Errorf(\"highest bit is negative: %d\", b)\n\t}\n\tif ((b >> 7) == 1) != ((b & 0x80) == 0x80) {\n\t\tt.Errorf(\"highest bit: %d bit >> 7: %d\", b, b>>7)\n\t}\n\tif (b & 0x80) == 0x80 {\n\t\tt.Errorf(\"highest bit: %d bit & 0x80: %d\", b, b&0x80)\n\t}\n}", "func (query *Query) Serialize() []byte {\n bytes := make([]byte, 32, 32)\n\n bytes[0] = query.action\n bytes[1] = query.empty\n binary.BigEndian.PutUint32(bytes[2:6], query.replyIp)\n binary.BigEndian.PutUint16(bytes[6:8], query.replyPort)\n binary.BigEndian.PutUint64(bytes[8:16], query.key)\n binary.BigEndian.PutUint64(bytes[16:24], query.value)\n binary.BigEndian.PutUint32(bytes[24:28], query.timeToLive)\n binary.BigEndian.PutUint32(bytes[28:32], query.requestId)\n\n return bytes\n}", "func (q *Quantity) CanonicalizeBytes(out []byte) (result, suffix []byte) {\n\tif q.IsZero() {\n\t\treturn zeroBytes, nil\n\t}\n\n\tvar rounded CanonicalValue\n\tformat := q.Format\n\tswitch format {\n\tcase DecimalExponent, DecimalSI:\n\tcase BinarySI:\n\t\tif q.CmpInt64(-1024) > 0 && q.CmpInt64(1024) < 0 {\n\t\t\t// This avoids rounding and hopefully confusion, too.\n\t\t\tformat = DecimalSI\n\t\t} else {\n\t\t\tvar exact bool\n\t\t\tif rounded, exact = q.AsScale(0); !exact {\n\t\t\t\t// Don't lose precision-- show as DecimalSI\n\t\t\t\tformat = DecimalSI\n\t\t\t}\n\t\t}\n\tdefault:\n\t\tformat = DecimalExponent\n\t}\n\n\t// TODO: If BinarySI formatting is requested but would cause rounding, upgrade to\n\t// one of the other formats.\n\tswitch format {\n\tcase DecimalExponent, DecimalSI:\n\t\tnumber, exponent := q.AsCanonicalBytes(out)\n\t\tsuffix, _ := quantitySuffixer.constructBytes(10, exponent, format)\n\t\treturn number, suffix\n\tdefault:\n\t\t// format must be BinarySI\n\t\tnumber, exponent := rounded.AsCanonicalBase1024Bytes(out)\n\t\tsuffix, _ := quantitySuffixer.constructBytes(2, exponent*10, format)\n\t\treturn number, suffix\n\t}\n}", "func CheckEncode(b []byte, ver byte) string {\n\tblob := make([]byte, 0, VersionSize+len(b)+ChecksumSize)\n\tblob = append(blob, ver)\n\tblob = append(blob, b...)\n\n\tchecksum := Checksum(blob)\n\tblob = append(blob, checksum[:]...)\n\n\treturn EncodeToString(blob)\n}", "func AssertDebugSerialize(t *testing.T, dialect jet.Dialect, clause jet.Serializer, query string, args ...interface{}) {\n\tout := jet.SQLBuilder{Dialect: dialect, Debug: true}\n\tjet.Serialize(clause, jet.SelectStatementType, &out)\n\n\tAssertDeepEqual(t, out.Buff.String(), query)\n\n\tif len(args) > 0 {\n\t\tAssertDeepEqual(t, out.Args, args)\n\t}\n}", "func PMULDQm128byte(X1 []byte, X2 []byte)", "func check_args(parsed_query []string, num_expected int) bool {\n\treturn (len(parsed_query) >= num_expected)\n}", "func Qpack(d *Domain, t uint16) *Packet {\n\treturn QpackID(d, t, randomID())\n}", "func isGzipped(b []byte) bool {\n\treturn b[0] == 0x1f && b[1] == 0x8b\n}", "func (m *SignatureKeyHolderMock) AsByteStringFinished() bool {\n\t// if expectation series were set then invocations count should be equal to expectations count\n\tif len(m.AsByteStringMock.expectationSeries) > 0 {\n\t\treturn atomic.LoadUint64(&m.AsByteStringCounter) == uint64(len(m.AsByteStringMock.expectationSeries))\n\t}\n\n\t// if main expectation was set then invocations count should be greater than zero\n\tif m.AsByteStringMock.mainExpectation != nil {\n\t\treturn atomic.LoadUint64(&m.AsByteStringCounter) > 0\n\t}\n\n\t// if func was set then invocations count should be greater than zero\n\tif m.AsByteStringFunc != nil {\n\t\treturn atomic.LoadUint64(&m.AsByteStringCounter) > 0\n\t}\n\n\treturn true\n}", "func (c *cBinaryExpr) bytesCompare0() {\n\tcond := c.Node\n\tx, ok1 := cond.X.(*ast.CallExpr)\n\tif !ok1 {\n\t\treturn\n\t}\n\tif !isFun(x.Fun, \"bytes\", \"Compare\") {\n\t\treturn\n\t}\n\tif !isBasicLitValue(cond.Y, token.INT, \"0\") {\n\t\treturn\n\t}\n\n\tif !astutil.UsesImport(c.req.AstFile, \"bytes\") {\n\t\treturn\n\t}\n\n\tfun := x.Fun.(*ast.SelectorExpr)\n\n\tswitch cond.Op {\n\tcase token.EQL: // bytes.Compare(s,a) == 0\n\t\tfun.Sel.Name = \"Equal\"\n\t\tc.Cursor.Replace(cond.X)\n\tcase token.NEQ: // bytes.Compare(s,a) != 0\n\t\tfun.Sel.Name = \"Equal\"\n\t\tc1 := &ast.UnaryExpr{\n\t\t\tOp: token.NOT,\n\t\t\tX: x,\n\t\t}\n\t\tc.Cursor.Replace(c1)\n\t}\n}", "func (c *ContainsComparer) CompareQuery(db1, db2 *sql.DB, query string) (string, error, error) {\n\tresult, err := sqldiff.GetQueryResult(db2, query)\n\tif err != nil {\n\t\treturn \"\", nil, err\n\t}\n\tif result.HasResult() {\n\t\tresultStr := result.String()\n\t\tif strings.Contains(resultStr, c.Content) {\n\t\t\treturn \"\", nil, nil\n\t\t} else {\n\t\t\treturn sqldiff.GetColorDiff(c.Content, resultStr), nil, nil\n\t\t}\n\t}\n\treturn \"\", nil, nil\n}", "func getChunksBitVectorFromHost(client *rpc.Client, addrs []storage.Address) (string, error) {\n\tvar hostChunks string\n\n\terr := client.Call(&hostChunks, \"bzz_has\", addrs)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn hostChunks, nil\n}", "func TestNewPacketFrom(t *testing.T) {\n\n\tt.Log(\"Start TestNewPacketFrom +++++++++++++\")\n\t// Get those go-to queries\n\ttcases := tmake()\n\n\tfor _, tcase := range tcases {\n\t\tt.Log(\"Testing for: \", tcase.Serialized)\n\t\tns := NewMySQLPacketFrom(0, tcase.ns.Serialized[HEADER_SIZE+1:])\n\t\tif ns.Length != tcase.ns.Length {\n\t\t\tt.Log(\"Length expected\", tcase.ns.Length, \"instead got\", ns.Length)\n\t\t}\n\t\tif ns.Sqid != tcase.ns.Sqid {\n\t\t\tt.Log(\"Length expected\", tcase.ns.Sqid, \"instead got\", ns.Sqid)\n\t\t}\n\t\tif ns.Cmd != tcase.ns.Cmd {\n\t\t\tt.Log(\"Command expected\", tcase.ns.Cmd, \"instead got\", ns.Cmd)\n\t\t\tt.Fail()\n\t\t}\n\t\tif !reflect.DeepEqual(ns.Serialized, tcase.ns.Serialized) {\n\t\t\tt.Log(\"Serialized expected\", tcase.ns.Serialized, \"instead got\", ns.Serialized)\n\t\t\tt.Fail()\n\t\t}\n\t\tt.Log(\"Done testing for: \", tcase.Serialized)\n\t}\n\n\tt.Log(\"End TestNewPacketFrom +++++++++++++\")\n\n}", "func QEncodeIfNeeded(src []byte, offset int) (dst []byte) {\n\tsafe := true\n\tfor i, sl := 0, len(src); i < sl && safe; i++ {\n\t\tsafe = ' ' <= src[i] && src[i] <= '~'\n\t}\n\tif safe {\n\t\treturn src\n\t}\n\tdst, _ = QEncode(src, offset)\n\treturn dst\n}", "func compress(chars []byte) int {\n \n}", "func compareKeyBytes(a, b []byte, reverse bool, removeID bool) bool {\n\tif removeID {\n\t\tb = stripID(b)\n\t}\n\n\tvar r int\n\n\tif !reverse {\n\t\tr = bytes.Compare(a, b)\n\t} else {\n\t\tr = bytes.Compare(b, a)\n\t}\n\n\tif r < 0 {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (*TxnQuery) Descriptor() ([]byte, []int) {\n\treturn file_protos_query_proto_rawDescGZIP(), []int{1}\n}", "func byteDiff(a, b uint8) int {\n\tcount := 0\n\tfor i := uint(0); i < 8; i++ {\n\t\tif a&(1<<i) != b&(1<<i) {\n\t\t\tcount++\n\t\t}\n\t}\n\treturn count\n}" ]
[ "0.68890274", "0.57353276", "0.5696035", "0.5437959", "0.54121333", "0.537639", "0.5337406", "0.5200456", "0.51706797", "0.5057195", "0.5042812", "0.49630687", "0.4941367", "0.49219006", "0.4919686", "0.49029464", "0.48985067", "0.4887714", "0.48703045", "0.4842406", "0.483072", "0.47928485", "0.47794908", "0.4751436", "0.4703513", "0.46459553", "0.4607848", "0.45757848", "0.457114", "0.45693886", "0.45554346", "0.45195538", "0.4517085", "0.45096472", "0.4503406", "0.44964227", "0.44957435", "0.4491588", "0.44895113", "0.4480226", "0.44753808", "0.44641873", "0.4463602", "0.44575572", "0.44420153", "0.44315326", "0.44212434", "0.4411016", "0.44098434", "0.44082758", "0.43913698", "0.43903652", "0.43872443", "0.43815035", "0.43662876", "0.43651402", "0.43599182", "0.43524304", "0.4351275", "0.43426302", "0.43387556", "0.4336832", "0.43263713", "0.43236583", "0.43185464", "0.431561", "0.42997953", "0.4296908", "0.42914897", "0.42883235", "0.42838505", "0.42804453", "0.42780232", "0.4277862", "0.42693734", "0.4263642", "0.42605022", "0.4256735", "0.42512664", "0.42486346", "0.4248445", "0.42441836", "0.42379388", "0.42283365", "0.42262727", "0.42213672", "0.42211744", "0.42196727", "0.4213928", "0.42089117", "0.42088702", "0.4206273", "0.41976467", "0.41974077", "0.41967788", "0.4195854", "0.41918567", "0.41899705", "0.41891077", "0.41869146" ]
0.71391696
0
Check that packing |uncompressedQueryBytes| constructs a smaller query byteforbyte, since label compression is enabled by default.
func TestDnsMessageUncompressedQueryConfidenceCheck(t *testing.T) { m := mustUnpack(uncompressedQueryBytes) packedBytes := mustPack(m) if len(packedBytes) >= len(uncompressedQueryBytes) { t.Errorf("Compressed query is not smaller than uncompressed query") } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func TestDnsMessageCompressedQueryConfidenceCheck(t *testing.T) {\n\tm := mustUnpack(compressedQueryBytes)\n\tpackedBytes := mustPack(m)\n\tif len(packedBytes) != len(compressedQueryBytes) {\n\t\tt.Errorf(\"Packed query has different size than original:\\n %v\\n %v\", packedBytes, compressedQueryBytes)\n\t}\n}", "func TestAddEdnsPaddingUncompressedQuery(t *testing.T) {\n\tif len(uncompressedQueryBytes)%PaddingBlockSize == 0 {\n\t\tt.Errorf(\"uncompressedQueryBytes does not require padding, so this test is invalid\")\n\t}\n\tpadded, err := AddEdnsPadding(uncompressedQueryBytes)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif len(padded)%PaddingBlockSize != 0 {\n\t\tt.Errorf(\"AddEdnsPadding failed to correctly pad uncompressed query\")\n\t}\n}", "func (b *bigchunk) UncompressedSize() int { return b.Size() }", "func IsCompressed(id uint8) bool {\n\treturn id >= 20\n}", "func TestAddEdnsPaddingCompressedQuery(t *testing.T) {\n\tif len(compressedQueryBytes)%PaddingBlockSize == 0 {\n\t\tt.Errorf(\"compressedQueryBytes does not require padding, so this test is invalid\")\n\t}\n\tpadded, err := AddEdnsPadding(compressedQueryBytes)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif len(padded)%PaddingBlockSize != 0 {\n\t\tt.Errorf(\"AddEdnsPadding failed to correctly pad compressed query\")\n\t}\n}", "func (b *block) uncompressedSizeBytes() uint64 {\n\trowsCount := uint64(b.Len())\n\n\t// Take into account timestamps\n\tn := rowsCount * uint64(len(time.RFC3339Nano))\n\n\t// Take into account columns\n\tcs := b.columns\n\tfor i := range cs {\n\t\tc := &cs[i]\n\t\tnameLen := uint64(len(c.name))\n\t\tif nameLen == 0 {\n\t\t\tnameLen = uint64(len(\"_msg\"))\n\t\t}\n\t\tfor _, v := range c.values {\n\t\t\tif len(v) > 0 {\n\t\t\t\tn += nameLen + 2 + uint64(len(v))\n\t\t\t}\n\t\t}\n\t}\n\n\t// Take into account constColumns\n\tccs := b.constColumns\n\tfor i := range ccs {\n\t\tcc := &ccs[i]\n\t\tnameLen := uint64(len(cc.Name))\n\t\tif nameLen == 0 {\n\t\t\tnameLen = uint64(len(\"_msg\"))\n\t\t}\n\t\tn += rowsCount * (2 + nameLen + uint64(len(cc.Value)))\n\t}\n\n\treturn n\n}", "func (q *CompoundQuery) GetCompressedQuery() []byte {\n\treturn flateCompressor.MustCompressString(\n\t\tojson.MarshalJSON(q),\n\t)\n}", "func (p *Packet) PackQuery() []byte {\n\tout := new(bytes.Buffer)\n\n\tp.packHeader(out)\n\tp.Question.pack(out)\n\n\tp.Bytes = out.Bytes() // swap in\n\treturn p.Bytes\n}", "func (s IntegrationSuite) TestCheckSchemaCompression(t *testing.T) {\n\tdir := getDir(t, \"testdata/validcfg\")\n\n\t// Ignore all linters except for the compression one\n\tforceOnlyRulesWarning(dir.Config, \"compression\")\n\topts, err := OptionsForDir(dir)\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected error from OptionsForDir: %v\", err)\n\t}\n\tlogicalSchema := dir.LogicalSchemas[0]\n\twsOpts, err := workspace.OptionsForDir(dir, s.d.Instance)\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected error from workspace.OptionsForDir: %v\", err)\n\t}\n\twsSchema, err := workspace.ExecLogicalSchema(logicalSchema, wsOpts)\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected error from workspace.ExecLogicalSchema: %v\", err)\n\t}\n\n\t// Count the InnoDB tables in the dir, for use in computing the expected\n\t// warning annotation count below\n\tvar innoTableCount int\n\tfor _, tbl := range wsSchema.Tables {\n\t\tif tbl.Engine == \"InnoDB\" {\n\t\t\tinnoTableCount++\n\t\t}\n\t}\n\n\t// Perform tests with various permutations of allow-list and flavor, and\n\t// confirm the number of annotations matches expectations. Note that the only\n\t// compressed tables in the dir are the two in testdata/validcfg/compression.sql;\n\t// one uses KEY_BLOCK_SIZE=2, and the other effectively uses 8 by way of\n\t// defaulting to half the page size.\n\tcases := []struct {\n\t\tallowList []string\n\t\tflavor tengo.Flavor\n\t\texpectedWarningCount int\n\t}{\n\t\t{[]string{\"8kb\"}, s.d.Flavor(), innoTableCount - 1},\n\t\t{[]string{\"page\", \"8kb\"}, tengo.FlavorMySQL57, innoTableCount - 1},\n\t\t{[]string{\"page\"}, tengo.FlavorMariaDB103, innoTableCount},\n\t\t{[]string{\"none\"}, s.d.Flavor(), 2},\n\t\t{[]string{\"none\", \"4kb\"}, s.d.Flavor(), 2},\n\t\t{[]string{\"none\", \"4kb\", \"page\"}, s.d.Flavor(), 2},\n\t\t{[]string{\"none\", \"invalid-value\"}, s.d.Flavor(), 2},\n\t\t{[]string{\"invalid-value\"}, s.d.Flavor(), innoTableCount},\n\t}\n\tfor n, c := range cases {\n\t\topts.RuleConfig[\"compression\"] = c.allowList\n\t\topts.Flavor = c.flavor\n\t\tresult := CheckSchema(wsSchema, opts)\n\t\tif result.WarningCount != c.expectedWarningCount {\n\t\t\tt.Errorf(\"cases[%d] expected warning count %d, instead found %d\", n, c.expectedWarningCount, result.WarningCount)\n\t\t}\n\t}\n\n\t// If the Dockerized test instance's Flavor supports page compression, verify\n\t// that the regexp used by tableCompressionMode() works properly.\n\t// Store a mapping of table name -> expected 2nd return value of tableCompressionMode().\n\tvar tableExpectedClause map[string]string\n\tif s.d.Flavor().Min(tengo.FlavorMySQL57) {\n\t\tdir = getDir(t, \"testdata/pagecomprmysql\")\n\t\ttableExpectedClause = map[string]string{\n\t\t\t\"page_comp_zlib\": \"COMPRESSION='zlib'\",\n\t\t\t\"page_comp_lz4\": \"COMPRESSION='lz4'\",\n\t\t\t\"page_comp_none\": \"\",\n\t\t}\n\t} else if s.d.Flavor().Min(tengo.FlavorMariaDB102) {\n\t\tdir = getDir(t, \"testdata/pagecomprmaria\")\n\t\ttableExpectedClause = map[string]string{\n\t\t\t\"page_comp_1\": \"`PAGE_COMPRESSED`=1\",\n\t\t\t\"page_comp_on\": \"`PAGE_COMPRESSED`='on'\",\n\t\t\t\"page_comp_0\": \"\",\n\t\t\t\"page_comp_off\": \"\",\n\t\t}\n\t}\n\tif tableExpectedClause != nil {\n\t\tlogicalSchema := dir.LogicalSchemas[0]\n\t\twsOpts, err := workspace.OptionsForDir(dir, s.d.Instance)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Unexpected error from workspace.OptionsForDir: %v\", err)\n\t\t}\n\t\twsSchema, err := workspace.ExecLogicalSchema(logicalSchema, wsOpts)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Unexpected error from workspace.ExecLogicalSchema: %v\", err)\n\t\t}\n\t\tif len(wsSchema.Failures) > 0 {\n\t\t\tt.Fatalf(\"%d of the CREATEs in %s unexpectedly failed: %+v\", len(wsSchema.Failures), dir, wsSchema.Failures)\n\t\t}\n\t\tfor _, tbl := range wsSchema.Tables {\n\t\t\texpectedClause, ok := tableExpectedClause[tbl.Name]\n\t\t\tif !ok {\n\t\t\t\tt.Fatalf(\"Unexpectedly found table %s in dir %s, not present in tableExpectedClause mapping for flavor %s\", tbl.Name, dir, s.d.Flavor())\n\t\t\t}\n\t\t\tvar expectedMode string\n\t\t\tif expectedClause == \"\" {\n\t\t\t\texpectedMode = \"none\"\n\t\t\t} else {\n\t\t\t\texpectedMode = \"page\"\n\t\t\t}\n\t\t\tactualMode, actualClause := tableCompressionMode(tbl)\n\t\t\tif actualMode != expectedMode || actualClause != expectedClause {\n\t\t\t\tt.Errorf(\"Unexpected return value from tableCompressionMode(%s): got %q,%q; expected %q,%q\", tbl.Name, actualMode, actualClause, expectedMode, expectedClause)\n\t\t\t}\n\t\t}\n\t}\n}", "func CheckQueryPattern(b []byte) bool {\n\n\ttheQuery := string(b)\n\ttheQuery = strings.ToLower(theQuery)\n\ttheQuery = strings.TrimSpace(theQuery)\n\n\t// проверка на первый key_word\n\tif !strings.HasPrefix(theQuery, \"select\") {\n\t\treturn false\n\t}\n\n\tfor _, patt := range QueryPatterns {\n\t\tmatched, _ := regexp.Match(patt, []byte(theQuery))\n\t\tif matched {\n\t\t\treturn true // также надо запомнить, какой паттерн подошел\n\t\t}\n\t}\n\treturn false\n}", "func (o *StorageHyperFlexStorageContainer) HasUnCompressedUsedBytes() bool {\n\tif o != nil && o.UnCompressedUsedBytes != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func DeSerializeQuery(bytes []byte) Query {\n if len(bytes) != 32 {\n fmt.Println(\"Error : bytes length is not 32. Its \", len(bytes))\n }\n\n return Query {\n action : bytes[0],\n empty : 0,\n replyIp : binary.BigEndian.Uint32(bytes[2:6]),\n replyPort : binary.BigEndian.Uint16(bytes[6:8]),\n key : binary.BigEndian.Uint64(bytes[8:16]),\n value : binary.BigEndian.Uint64(bytes[16:24]),\n timeToLive: binary.BigEndian.Uint32(bytes[24:28]),\n requestId : binary.BigEndian.Uint32(bytes[28:32]),\n }\n}", "func MaxEncodedLen(ct CompressionType, srcLen uint64) (uint64, bool) {\n\tif ct == Snappy {\n\t\tif srcLen > MaxBlockLen(ct) {\n\t\t\treturn 0, false\n\t\t}\n\t\tsz := snappy.MaxEncodedLen(int(srcLen))\n\t\tif sz == -1 {\n\t\t\treturn 0, false\n\t\t}\n\t\treturn uint64(sz), true\n\t}\n\tpanic(\"not supported compression type\")\n}", "func (b *BloomFilter) ContainsBytes(value []byte) bool {\n\tres := true\n\tfor h := 0; h < b.k; h++ {\n\t\tindex := b.kiMiHash(value, h)\n\t\tif b.bucket.Bit(index) == 0 {\n\t\t\tres = false\n\t\t}\n\t}\n\treturn res\n}", "func TestCompletePackUnpack(t *testing.T) {\n\n\tif testing.Short() {\n\t\tt.Skip(\"skipping testing in short mode\")\n\t}\n\n\tmaxCategory := categoryBitMask >> 24\n\tmaxLocation := locationBitMask >> 15\n\tmaxResult := resultBitMask\n\n\tfor c := 0; c < maxCategory; c++ {\n\t\tfor l := 0; l < maxLocation; l++ {\n\t\t\tfor r := 0; r < maxResult; r++ {\n\t\t\t\tpacked := pack(uint32(c), uint32(l), uint32(r))\n\t\t\t\tunpackedC, unpackedL, unpackedR := unpack(packed)\n\t\t\t\tif uint32(c) != unpackedC || uint32(l) != unpackedL || uint32(r) != unpackedR {\n\t\t\t\t\tt.Errorf(\"pack/unpack does not work properly\")\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func uncompressedRowsSizeBytes(rows [][]Field) uint64 {\n\tn := uint64(0)\n\tfor _, fields := range rows {\n\t\tn += uncompressedRowSizeBytes(fields)\n\t}\n\treturn n\n}", "func uncompressedRowSizeBytes(fields []Field) uint64 {\n\tn := uint64(len(time.RFC3339Nano)) // log timestamp\n\tfor _, f := range fields {\n\t\tnameLen := len(f.Name)\n\t\tif nameLen == 0 {\n\t\t\tnameLen = len(\"_msg\")\n\t\t}\n\t\tn += uint64(2 + nameLen + len(f.Value))\n\t}\n\treturn n\n}", "func TestAddEdnsPaddingCompressedOptQuery(t *testing.T) {\n\toptQuery := simpleQuery\n\toptQuery.Additionals = make([]dnsmessage.Resource, len(simpleQuery.Additionals))\n\tcopy(optQuery.Additionals, simpleQuery.Additionals)\n\n\toptQuery.Additionals = append(optQuery.Additionals,\n\t\tdnsmessage.Resource{\n\t\t\tHeader: dnsmessage.ResourceHeader{\n\t\t\t\tName: dnsmessage.MustNewName(\".\"),\n\t\t\t\tClass: dnsmessage.ClassINET,\n\t\t\t\tTTL: 0,\n\t\t\t},\n\t\t\tBody: &dnsmessage.OPTResource{\n\t\t\t\tOptions: []dnsmessage.Option{},\n\t\t\t},\n\t\t},\n\t)\n\tpaddedOnWire, err := AddEdnsPadding(mustPack(&optQuery))\n\tif err != nil {\n\t\tt.Errorf(\"Failed to pad query with OPT but no padding: %v\", err)\n\t}\n\tif len(paddedOnWire)%PaddingBlockSize != 0 {\n\t\tt.Errorf(\"AddEdnsPadding failed to correctly pad query with OPT but no padding\")\n\t}\n}", "func canMakePaliQueries(s string, queries [][]int) []bool {\n\tn := len(queries)\n\n\tcnt := make([]int, 1, n+1)\n\tc := 0\n\tfor _, l := range s {\n\t\tc ^= 1 << uint(l-'a')\n\t\tcnt = append(cnt, c)\n\t}\n\n\tres := make([]bool, n)\n\tfor i, q := range queries {\n\t\tlo, hi, k := q[0], q[1], q[2]\n\t\tif k >= 13 {\n\t\t\tres[i] = true\n\t\t\tcontinue\n\t\t}\n\t\tremains := bits(cnt[hi+1] ^ cnt[lo])\n\t\tres[i] = remains/2 <= k\n\t}\n\n\treturn res\n}", "func TestAddEdnsPaddingCompressedPaddedQuery(t *testing.T) {\n\tpaddedQuery := simpleQuery\n\tpaddedQuery.Additionals = make([]dnsmessage.Resource, len(simpleQuery.Additionals))\n\tcopy(paddedQuery.Additionals, simpleQuery.Additionals)\n\n\tpaddedQuery.Additionals = append(paddedQuery.Additionals,\n\t\tdnsmessage.Resource{\n\t\t\tHeader: dnsmessage.ResourceHeader{\n\t\t\t\tName: dnsmessage.MustNewName(\".\"),\n\t\t\t\tClass: dnsmessage.ClassINET,\n\t\t\t\tTTL: 0,\n\t\t\t},\n\t\t\tBody: &dnsmessage.OPTResource{\n\t\t\t\tOptions: []dnsmessage.Option{\n\t\t\t\t\t{\n\t\t\t\t\t\tCode: OptResourcePaddingCode,\n\t\t\t\t\t\tData: make([]byte, 5),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t)\n\toriginalOnWire := mustPack(&paddedQuery)\n\n\tpaddedOnWire, err := AddEdnsPadding(mustPack(&paddedQuery))\n\tif err != nil {\n\t\tt.Errorf(\"Failed to pad padded query: %v\", err)\n\t}\n\n\tif !bytes.Equal(originalOnWire, paddedOnWire) {\n\t\tt.Errorf(\"AddEdnsPadding tampered with a query that was already padded\")\n\t}\n}", "func IsByteLength(str string, min, max int) bool {\n\treturn len(str) >= min && len(str) <= max\n}", "func TestShortQuery(t *testing.T) {\n\tvar qerr *queryError\n\tdoh, _ := NewTransport(testURL, ips, nil, nil, nil)\n\t_, err := doh.Query([]byte{})\n\tif err == nil {\n\t\tt.Error(\"Empty query should fail\")\n\t} else if !errors.As(err, &qerr) {\n\t\tt.Errorf(\"Wrong error type: %v\", err)\n\t} else if qerr.status != BadQuery {\n\t\tt.Errorf(\"Wrong error status: %d\", qerr.status)\n\t}\n\n\t_, err = doh.Query([]byte{1})\n\tif err == nil {\n\t\tt.Error(\"One byte query should fail\")\n\t} else if !errors.As(err, &qerr) {\n\t\tt.Errorf(\"Wrong error type: %v\", err)\n\t} else if qerr.status != BadQuery {\n\t\tt.Errorf(\"Wrong error status: %d\", qerr.status)\n\t}\n}", "func isDiffVarintSnappyEncodedPostings(input []byte) bool {\n\treturn bytes.HasPrefix(input, []byte(codecHeaderSnappy))\n}", "func (v Document) QueryBytes(query string) []byte {\n\tr, ok := v.QueryOne(query).([]byte)\n\tif !ok {\n\t\treturn nil\n\t}\n\n\treturn r\n}", "func PrefixBytesDetector(prefix []byte, handler Handler) Detector {\n\treturn Detector{\n\t\tNeeded: len(prefix),\n\t\tTest: func(b []byte) bool {\n\t\t\tfor i, v := range prefix {\n\t\t\t\tif b[i] != v {\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn true\n\t\t},\n\t\tHandler: handler,\n\t}\n}", "func doFindSmallerSize(size int64, pattern string) bool {\n\ti, err := humanize.ParseBytes(pattern)\n\tfatalIf(probe.NewError(err), \"Error parsing string passed to flag smaller\")\n\n\treturn int64(i) > size\n}", "func (o *StorageHyperFlexStorageContainer) GetUnCompressedUsedBytesOk() (*int64, bool) {\n\tif o == nil || o.UnCompressedUsedBytes == nil {\n\t\treturn nil, false\n\t}\n\treturn o.UnCompressedUsedBytes, true\n}", "func (o *ArchivedAnalysis) HasArchiveSizeBytes() bool {\n\tif o != nil && o.ArchiveSizeBytes != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func isMsgPackString(b byte) bool {\n\treturn (0xbf&b) == b || b == 0xd9 || b == 0xda || b == 0xdb\n}", "func CheckBigIntInField(a *big.Int) bool {\n\treturn a.Cmp(constants.Q) == -1\n}", "func IsCompressed(msg []byte) bool {\n\treturn msg[0]&compressionMask != 0\n}", "func didCompress(input []byte) bool {\n\tvar output bytes.Buffer\n\n\tw := zlib.NewWriter(&output)\n\t_, err := w.Write(input)\n\tw.Close()\n\n\treturn err == nil && len(input) > output.Len()\n}", "func QueryBinAutoSize(s string) QueryOption {\n\treturn func(q *queryOptions) error {\n\t\tq.requestProperties.Options[QueryBinAutoSizeValue] = s\n\t\treturn nil\n\t}\n}", "func hasValidDataExtractionQuery(query interface{}) error {\n\tqueryConverted := query.(map[string]interface{})\n\tif val, ok := queryConverted[\"columns\"]; ok {\n\t\tcolumns := reflect.ValueOf(val)\n\t\tif columns.Len() > 10 {\n\t\t\treturn errors.New(\"Data Extraction Validator: The key 'columns' in data extraction result must have up to 10 columns.\")\n\t\t}\n\t}\n\treturn nil\n}", "func PTESTm128byte(X1 []byte, X2 []byte)", "func TestBenchmarkCompressed(t *testing.T) {\n\tstartFakeBookingApp()\n\tresp := httptest.NewRecorder()\n\tc := NewController(NewRequest(showRequest), NewResponse(resp))\n\tc.SetAction(\"Hotels\", \"Show\")\n\tConfig.SetOption(\"results.compressed\", \"true\")\n\tresult := Hotels{c}.Show(3)\n\tresult.Apply(c.Request, c.Response)\n\tif !strings.Contains(resp.Body.String(), \"300 Main St.\") {\n\t\tt.Errorf(\"Failed to find hotel address in action response:\\n%s\", resp.Body)\n\t}\n}", "func canCompress(value value) bool { return value&0x7fffffff == value }", "func contains(shorter, longer *TrieKey, prematchedBits uint) (matches, exact bool, common, child uint) {\n\t// Two variables important in finding which child to descend into\n\tvar pivotByte, numBytes uint\n\tpivotMask := byte(0x80)\n\n\t// calculate `exact`, `common`, and `child` at the end with defer\n\tdefer func() {\n\t\tif !matches {\n\t\t\tvar s, l byte\n\n\t\t\t// We know both of these slices are large enough to index with\n\t\t\t// `numBytes` because `matches` is false and therefore it must have\n\t\t\t// been a previous comparison of these bytes that got us here.\n\t\t\tfor i := prematchedBits / 8; i <= numBytes; i++ {\n\t\t\t\ts, l = shorter.Bits[i], longer.Bits[i]\n\t\t\t\tif s == l {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tcommon = 8*i + uint(bits.LeadingZeros8(s^l))\n\n\t\t\t\t// Whether `longer` goes on the left (0) or right (1)\n\t\t\t\tif longer.Bits[i] < shorter.Bits[i] {\n\t\t\t\t\tchild = 0\n\t\t\t\t} else {\n\t\t\t\t\tchild = 1\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tcommon = shorter.Length\n\t\texact = shorter.Length == longer.Length\n\t\tif !exact {\n\t\t\t// Whether `longer` goes on the left (0) or right (1)\n\t\t\tif longer.Bits[pivotByte]&pivotMask == 0 {\n\t\t\t\tchild = 0\n\t\t\t} else {\n\t\t\t\tchild = 1\n\t\t\t}\n\t\t}\n\t}()\n\n\t// Prefix length of 0 matches everything!\n\tif shorter.Length == 0 {\n\t\tmatches = true\n\t\treturn\n\t}\n\n\t// The bits to compare in the two keys always follows the following pattern:\n\t// 1. any number of leading \"full\" bytes which must match exactly\n\t// 2. 0 or 1 \"partial\" byte in the least significant (last) position which\n\t// must match up to the number of partial bits (1-7 bits).\n\t//\n\t// The strategy here is to compare the bytes from the least significant\n\t// (last) to the most significant (first) to avoid redundantly comparing\n\t// bytes that might have already matched higher in the tree.\n\n\t// Calculate number of bytes (including possible least-significant partial)\n\t// Decrement this as we compare bytes up to the most significant.\n\tnumBytes = bitsToBytes(shorter.Length)\n\n\t// Figure out how many bits are in the partial byte (0 means no partial)\n\tmaskLen := shorter.Length % 8\n\n\t// If the last byte is partial, compare using a bitmask\n\tif maskLen > 0 {\n\t\tvar mask byte\n\t\tmask = 0xff << (8 - maskLen)\n\n\t\t// decrement before comparing since the slices are indexed from 0\n\t\tnumBytes--\n\t\tif shorter.Bits[numBytes]&mask != longer.Bits[numBytes]&mask {\n\t\t\tmatches = false\n\t\t\treturn\n\t\t}\n\n\t\tpivotMask >>= maskLen\n\t}\n\n\tpivotByte = numBytes\n\n\t// The other bytes are all full and can be compared simply\n\tfor numBytes > (prematchedBits / 8) {\n\t\t// decrement before comparing since the slices are indexed from 0\n\t\tnumBytes--\n\t\tif shorter.Bits[numBytes] != longer.Bits[numBytes] {\n\t\t\tmatches = false\n\t\t\treturn\n\t\t}\n\t}\n\n\tmatches = true\n\treturn\n}", "func (m MarketDataRequestReject) HasEncodedTextLen() bool {\n\treturn m.Has(tag.EncodedTextLen)\n}", "func IsCompressed(proof *CommitmentProof) bool {\n\treturn proof.GetCompressed() != nil\n}", "func (me TxsdCounterSimpleContentExtensionType) IsByte() bool { return me.String() == \"byte\" }", "func compactSigCheck(t *testing.T, sig []byte) {\n\tt.Helper()\n\tb := int(sig[32])\n\tif b < 0 {\n\t\tt.Errorf(\"highest bit is negative: %d\", b)\n\t}\n\tif ((b >> 7) == 1) != ((b & 0x80) == 0x80) {\n\t\tt.Errorf(\"highest bit: %d bit >> 7: %d\", b, b>>7)\n\t}\n\tif (b & 0x80) == 0x80 {\n\t\tt.Errorf(\"highest bit: %d bit & 0x80: %d\", b, b&0x80)\n\t}\n}", "func (ms *memoryStorer) IsCompressed() bool {\n\treturn true\n}", "func ParsePromCompressedRequest(r *http.Request) ([]byte, *handler.ParseError) {\n\tbody := r.Body\n\tif r.Body == nil {\n\t\terr := fmt.Errorf(\"empty request body\")\n\t\treturn nil, handler.NewParseError(err, http.StatusBadRequest)\n\t}\n\tdefer body.Close()\n\tcompressed, err := ioutil.ReadAll(body)\n\n\tif err != nil {\n\t\treturn nil, handler.NewParseError(err, http.StatusInternalServerError)\n\t}\n\n\tif len(compressed) == 0 {\n\t\treturn nil, handler.NewParseError(fmt.Errorf(\"empty request body\"), http.StatusBadRequest)\n\t}\n\n\treqBuf, err := snappy.Decode(nil, compressed)\n\tif err != nil {\n\t\treturn nil, handler.NewParseError(err, http.StatusBadRequest)\n\t}\n\n\treturn reqBuf, nil\n}", "func (l *LexInner) Bytes(number int) bool {\n\tif l.mark.pos+number > len(l.input) {\n\t\treturn false\n\t}\n\tl.mark.width = number\n\tl.mark.pos += number\n\treturn true\n}", "func ValidUTF8(data []byte) bool {\n\tm := len(data)\n\tif m <= 1 {\n\t\treturn true\n\t}\n\tzerroByteCount := 0\n\tfor i := 0; i < m-1; {\n\t\tif (data[i] == 0x0) && (data[i+1] != 0x0) {\n\t\t\tzerroByteCount++\n\t\t}\n\t\tn, cp := testUTF8bitPattern(data[i])\n\t\t//n - количество байт следующих за этим которые будут использоваться для отображения данных\n\t\t//n == 0 быть не может, это получается если битовая маска 1000 0000 -> для первого байта UTF-8 это не допустимо\n\t\tif n == 0 {\n\t\t\treturn false\n\t\t}\n\n\t\tif i+int(n) >= m {\n\t\t\tbreak\n\t\t}\n\n\t\ti++\n\t\tvar j int32\n\t\tfor j = 1; j < n; j++ {\n\t\t\t//байты с данными должны иметь маску 10xx xxxx\n\t\t\tif (data[i] & 0xC0) != 0x80 {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tcp = (cp << 6) | int32(data[i]&0x3F)\n\t\t\ti++\n\t\t}\n\n\t\tif (cp > 0x10FFFF) ||\n\t\t\t((cp >= 0xD800) && (cp <= 0xDFFF)) ||\n\t\t\t((cp <= 0x007F) && (n != 1)) ||\n\t\t\t((cp >= 0x0080) && (cp <= 0x07FF) && (n != 2)) ||\n\t\t\t((cp >= 0x0800) && (cp <= 0xFFFF) && (n != 3)) ||\n\t\t\t((cp >= 0x10000) && (cp <= 0x1FFFFF) && (n != 4)) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn float64(zerroByteCount)/float64(m) < 0.05\n}", "func (info TrackInfo) IsCompressed() bool {\n\t// bit 31 - data is compressed\n\treturn info&0x80000000 != 0\n}", "func (me TxsdRecordPatternSimpleContentExtensionOffsetunit) IsByte() bool {\n\treturn me.String() == \"byte\"\n}", "func canPack(src []uint64, n, bits int) bool {\n\tif len(src) < n {\n\t\treturn false\n\t}\n\n\t// Selector 0,1 are special and use 0 bits to encode runs of 1's\n\tif bits == 0 {\n\t\tfor _, v := range src {\n\t\t\tif v != 1 {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\treturn true\n\t}\n\n\tmax := uint64((1 << uint64(bits)) - 1)\n\n\tfor _, s := range src[:n] {\n\t\tif s > max {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}", "func ByteLength(str string, min, max int) bool {\n\treturn len(str) >= min && len(str) <= max\n}", "func PMULDQm128byte(X1 []byte, X2 []byte)", "func (o *ArchivedAnalysis) GetArchiveSizeBytesOk() (*int32, bool) {\n\tif o == nil || o.ArchiveSizeBytes == nil {\n\t\treturn nil, false\n\t}\n\treturn o.ArchiveSizeBytes, true\n}", "func ByteLength(str string, minLen int, maxLen ...int) bool {\n\tstrLen := len(str)\n\n\t// only min length check.\n\tif len(maxLen) == 0 {\n\t\treturn strLen >= minLen\n\t}\n\n\t// min and max length check\n\treturn strLen >= minLen && strLen <= maxLen[0]\n}", "func GetMsgCompressedFlag(header uint64) bool {\n return (header & (1 << msgCompressedOffset)) != 0\n}", "func (v Document) QueryAll(query string) []interface{} {\n\tvar results []interface{}\n\tvar err error\n\tif v.table != nil && v.table.keyToCompressed != nil {\n\t\tresults, err = msgpack.NewDecoder(bytes.NewReader(v.data)).\n\t\t\tQueryCompressed(v.table.keyToC, query)\n\t} else {\n\t\tresults, err = msgpack.NewDecoder(bytes.NewReader(v.data)).Query(query)\n\t}\n\n\tif err != nil || len(results) == 0 {\n\t\treturn nil\n\t}\n\treturn results\n}", "func ShouldBeCompressed(recoveryFlag int) bool {\n\treturn recoveryFlag >= 31\n}", "func PACKUSDWm128byte(X1 []byte, X2 []byte)", "func (x *Big) isCompact() bool { return x.compact != c.Inflated }", "func doFindLargerSize(size int64, pattern string) bool {\n\ti, err := humanize.ParseBytes(pattern)\n\tfatalIf(probe.NewError(err), \"Error parsing string passed to flag larger\")\n\n\treturn int64(i) < size\n}", "func ValidPkcs7(b []byte) (bool, int, error) {\n\tlength := len(b)\n\tif length < 1 {\n\t\treturn false, 0, errors.New(\"empty slice\")\n\t}\n\n\tif length%aes.BlockSize != 0 {\n\t\treturn false, 0, fmt.Errorf(\"invalid padding: len(%s) is not a multiple of %d\", string(b), aes.BlockSize)\n\t}\n\n\tlast := b[length-1 : length]\n\tpad, n := binary.Uvarint(last)\n\n\tif n <= 0 || pad == 0 {\n\t\treturn false, 0, errors.New(\"no padding\")\n\t}\n\n\tif pad > aes.BlockSize {\n\t\treturn false, 0, errors.New(\"last byte exceeds blocksize\")\n\t}\n\n\t// Check that padding is as long as pad\n\tpadding := make([]byte, pad)\n\tpadding = b[length-int(pad):]\n\tif len(padding) != int(pad) {\n\t\treturn false, 0, fmt.Errorf(\"invalid padding: expected %d bytes of padding, got %d\", pad, len(padding))\n\t}\n\n\t// All bytes in padding should be the same\n\ttemp := padding[0]\n\tfor _, val := range padding {\n\t\tif val != temp {\n\t\t\treturn false, 0, errors.New(\"invalid padding: not all padding bytes the same\")\n\t\t}\n\t\ttemp = val\n\t}\n\treturn true, int(pad), nil\n}", "func (m NoMDEntries) HasEncodedTextLen() bool {\n\treturn m.Has(tag.EncodedTextLen)\n}", "func QueryVerify(name string) bool {\n\tcount, err := dbmap.SelectInt(\"select count(1) from cr_image where Image_name = ?\", name)\n\tif err != nil {\n\t\tlog.Fatalln(\"Verify failed\", err)\n\t\treturn false\n\t}\n\tif count < 1 {\n\t\treturn true\n\t}\n\treturn false\n}", "func VPCOMPRESSQ(ops ...operand.Op) { ctx.VPCOMPRESSQ(ops...) }", "func isDiffVarintSnappyStreamedEncodedPostings(input []byte) bool {\n\treturn bytes.HasPrefix(input, []byte(codecHeaderStreamedSnappy))\n}", "func CompareBytes(bs []byte) bool {\n\tcbs := dummyBytes()\n\tfor i, b := range bs {\n\t\tif cbs[i] != b {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n\n\t// We don't use this as we want to be able to test partial Byte slices\n\t//return bytes.Compare(b, DummyBytes()) == 0\n}", "func CheckSizeGreaterOrEqual(a, b []byte, aDescription, bDescription string) {\n\tif len(a) < len(b) {\n\t\tpanic(fmt.Sprintf(\"%s smaller than %s\", aDescription, bDescription))\n\t}\n}", "func CheckSizeMin(buf []byte, min int, descrip string) {\n\tif len(buf) < min {\n\t\tpanic(fmt.Sprintf(\"Incorrect %s buffer size, expected (>%d), got (%d).\", descrip, min, len(buf)))\n\t}\n}", "func TestCompression(t *testing.T) {\n\tvec := NewVector()\n\tfor i := 0; i < 1e5; i++ {\n\t\tv := int(rand.Uint32())\n\t\tvec.Add(v)\n\t}\n\n\tsizeofUint := int(unsafe.Sizeof(uint(0)))\n\n\trawsize := float64(sizeofUint * 1e5)\n\tvecsize := float64(vec.Size())\n\n\tpercentage := ((rawsize - vecsize) / rawsize) * 100\n\tfmt.Printf(\"=== COMPRESSION: %.2f%%\\n\", percentage)\n}", "func (s *BasePlSqlParserListener) EnterLob_compression_clause(ctx *Lob_compression_clauseContext) {}", "func IsObjectCompressed(path string) bool {\n\treturn path[len(path)-len(lz4.Extension):] == lz4.Extension\n}", "func (c *ColumnChunkMetaData) TotalUncompressedSize() int64 {\n\treturn c.columnMeta.GetTotalUncompressedSize()\n}", "func (api GrammarBot) CheckBytes(text []byte) (*Response, error) {\n\treturn api.Check(b2s(text))\n}", "func (c *cBinaryExpr) bytesCompare0() {\n\tcond := c.Node\n\tx, ok1 := cond.X.(*ast.CallExpr)\n\tif !ok1 {\n\t\treturn\n\t}\n\tif !isFun(x.Fun, \"bytes\", \"Compare\") {\n\t\treturn\n\t}\n\tif !isBasicLitValue(cond.Y, token.INT, \"0\") {\n\t\treturn\n\t}\n\n\tif !astutil.UsesImport(c.req.AstFile, \"bytes\") {\n\t\treturn\n\t}\n\n\tfun := x.Fun.(*ast.SelectorExpr)\n\n\tswitch cond.Op {\n\tcase token.EQL: // bytes.Compare(s,a) == 0\n\t\tfun.Sel.Name = \"Equal\"\n\t\tc.Cursor.Replace(cond.X)\n\tcase token.NEQ: // bytes.Compare(s,a) != 0\n\t\tfun.Sel.Name = \"Equal\"\n\t\tc1 := &ast.UnaryExpr{\n\t\t\tOp: token.NOT,\n\t\t\tX: x,\n\t\t}\n\t\tc.Cursor.Replace(c1)\n\t}\n}", "func CompletePackage(msg []byte, data_len int) int {\n //fmt.Println(\"data_len\", data_len)\n if data_len <= 8 {\n return 0\n }\n /*\n magic_byte:= msg[0:4]\n magic_num := binary.BigEndian.Uint32(magic_byte)\n if magic_num != MAGIC_NUM {\n return -1\n } \n */\n\n len_byte := msg[4:8]\n parse_len := binary.BigEndian.Uint32(len_byte)\n \n if int(parse_len) <= data_len {\n return int(parse_len)\n }\n return -1\n}", "func CompareBytes(b1, b2 []byte) int8 {\n\tif b1 == nil && b2 == nil {\n\t\tpanic(\"cannot compare with nil array\")\n\t}\n\tvar i, j int\n\tif len(b1) < len(b2) {\n\t\ti = len(b1) - len(b2)\n\t} else if len(b1) > len(b2) {\n\t\tj = len(b2) - len(b1)\n\t}\n\tfor {\n\t\tif i < 0 && j >= 0 && b2[j] > 0 {\n\t\t\treturn -1\n\t\t} else if j < 0 && i >= 0 && b1[i] > 0 {\n\t\t\treturn 1\n\t\t} else if j >= len(b1) && i >= len(b2) {\n\t\t\treturn 0\n\t\t} else if b1[i] < b2[j] {\n\t\t\treturn -1\n\t\t} else if b1[i] > b2[j] {\n\t\t\treturn 1\n\t\t}\n\t\ti++\n\t\tj++\n\t}\n}", "func (*NetworkInstance_Mpls_Global_ReservedLabelBlock_LowerBound_Union_Uint32) Is_NetworkInstance_Mpls_Global_ReservedLabelBlock_LowerBound_Union() {\n}", "func (at *AnnotatedTable) checkAlmostAsBig(ctx context.Context, other *AnnotatedTable) error {\n\tthisDetail, err := at.CachedDetail(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\totherDetail, err := other.CachedDetail(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Check that receiver table contains at least 99% as many tasks as\n\t// other table.\n\tif thisDetail.TaskFileCount < otherDetail.TaskFileCount {\n\t\tlog.Printf(\"Warning - fewer task files: %s(%d) < %s(%d) possibly due to redundant task files.\\n\",\n\t\t\tat.Table.FullyQualifiedName(), thisDetail.TaskFileCount,\n\t\t\tother.Table.FullyQualifiedName(), otherDetail.TaskFileCount)\n\t}\n\n\t// NOTE: We have discovered that in 2012, some archives contain tests that are entirely\n\t// redundant with tests in other archives. This means that some archives are completely removed\n\t// in the dedup process. Since these archives appear in the original \"base_tables\", this check\n\t// has been causing the sanity check to fail.\n\tif IncludeTaskFileCountCheck && float32(thisDetail.TaskFileCount) < taskCountRequirement*float32(otherDetail.TaskFileCount) {\n\t\treturn ErrTooFewTasks\n\t}\n\n\tif thisDetail.TestCount < otherDetail.TestCount {\n\t\tlog.Printf(\"Warning_fewer_tests: %s(%d) < %s(%d)\\n\",\n\t\t\tat.Table.FullyQualifiedName(), thisDetail.TestCount,\n\t\t\tother.Table.FullyQualifiedName(), otherDetail.TestCount)\n\t}\n\t// We are now using DISTINCT test counts, so we can use a tighter bound.\n\tif float32(thisDetail.TestCount) < testCountRequirement*float32(otherDetail.TestCount) {\n\t\treturn ErrTooFewTests\n\t}\n\treturn nil\n}", "func ByteLength(str string, params ...string) bool {\n\tif len(params) == 2 {\n\t\tmin, _ := ToInt(params[0])\n\t\tmax, _ := ToInt(params[1])\n\t\treturn len(str) >= int(min) && len(str) <= int(max)\n\t}\n\n\treturn false\n}", "func (z *InsertQuery) Msgsize() (s int) {\n\ts = 1 + 8\n\tif z.Actions == nil {\n\t\ts += msgp.NilSize\n\t} else {\n\t\ts += msgp.BoolSize\n\t}\n\ts += 7\n\tif z.Method == nil {\n\t\ts += msgp.NilSize\n\t} else {\n\t\ts += msgp.StringPrefixSize + len(*z.Method)\n\t}\n\treturn\n}", "func (m *SignatureKeyHolderMock) AsByteStringFinished() bool {\n\t// if expectation series were set then invocations count should be equal to expectations count\n\tif len(m.AsByteStringMock.expectationSeries) > 0 {\n\t\treturn atomic.LoadUint64(&m.AsByteStringCounter) == uint64(len(m.AsByteStringMock.expectationSeries))\n\t}\n\n\t// if main expectation was set then invocations count should be greater than zero\n\tif m.AsByteStringMock.mainExpectation != nil {\n\t\treturn atomic.LoadUint64(&m.AsByteStringCounter) > 0\n\t}\n\n\t// if func was set then invocations count should be greater than zero\n\tif m.AsByteStringFunc != nil {\n\t\treturn atomic.LoadUint64(&m.AsByteStringCounter) > 0\n\t}\n\n\treturn true\n}", "func (s *server) bytestreamBlobName(bytestream string) (*pb.Digest, bool, error) {\n\tmatches := s.bytestreamRe.FindStringSubmatch(bytestream)\n\tif matches == nil {\n\t\treturn nil, false, status.Errorf(codes.InvalidArgument, \"invalid ResourceName: %s\", bytestream)\n\t}\n\tsize, _ := strconv.Atoi(matches[3])\n\treturn &pb.Digest{\n\t\tHash: matches[2],\n\t\tSizeBytes: int64(size),\n\t}, matches[1] == \"compressed-blobs/zstd\", nil\n}", "func (q *queue) checkDataSize() error {\n\tif q.dataPageFct.Size()+q.indexPageFct.Size() > q.dataSizeLimit {\n\t\treturn ErrExceedingTotalSizeLimit\n\t}\n\treturn nil\n}", "func readUint8LengthPrefixed(s *cryptobyte.String, out *[]byte) bool {\n\treturn s.ReadUint8LengthPrefixed((*cryptobyte.String)(out))\n}", "func PMAXUDm128byte(X1 []byte, X2 []byte)", "func (g G1) BytesCompressed() []byte { return g.encodeBytes(true) }", "func TestCheckSignatureEncoding(t *testing.T) {\n\tt.Parallel()\n\n\ttests := []struct {\n\t\tname string\n\t\tsig []byte\n\t\tisValid bool\n\t}{\n\t\t{\n\t\t\tname: \"valid signature\",\n\t\t\tsig: decodeHex(\"304402204e45e16932b8af514961a1d3a1a25\" +\n\t\t\t\t\"fdf3f4f7732e9d624c6c61548ab5fb8cd41022018152\" +\n\t\t\t\t\"2ec8eca07de4860a4acdd12909d831cc56cbbac46220\" +\n\t\t\t\t\"82221a8768d1d09\"),\n\t\t\tisValid: true,\n\t\t},\n\t\t{\n\t\t\tname: \"empty.\",\n\t\t\tsig: nil,\n\t\t\tisValid: false,\n\t\t},\n\t\t{\n\t\t\tname: \"bad magic\",\n\t\t\tsig: decodeHex(\"314402204e45e16932b8af514961a1d3a1a25\" +\n\t\t\t\t\"fdf3f4f7732e9d624c6c61548ab5fb8cd41022018152\" +\n\t\t\t\t\"2ec8eca07de4860a4acdd12909d831cc56cbbac46220\" +\n\t\t\t\t\"82221a8768d1d09\"),\n\t\t\tisValid: false,\n\t\t},\n\t\t{\n\t\t\tname: \"bad 1st int marker magic\",\n\t\t\tsig: decodeHex(\"304403204e45e16932b8af514961a1d3a1a25\" +\n\t\t\t\t\"fdf3f4f7732e9d624c6c61548ab5fb8cd41022018152\" +\n\t\t\t\t\"2ec8eca07de4860a4acdd12909d831cc56cbbac46220\" +\n\t\t\t\t\"82221a8768d1d09\"),\n\t\t\tisValid: false,\n\t\t},\n\t\t{\n\t\t\tname: \"bad 2nd int marker\",\n\t\t\tsig: decodeHex(\"304402204e45e16932b8af514961a1d3a1a25\" +\n\t\t\t\t\"fdf3f4f7732e9d624c6c61548ab5fb8cd41032018152\" +\n\t\t\t\t\"2ec8eca07de4860a4acdd12909d831cc56cbbac46220\" +\n\t\t\t\t\"82221a8768d1d09\"),\n\t\t\tisValid: false,\n\t\t},\n\t\t{\n\t\t\tname: \"short len\",\n\t\t\tsig: decodeHex(\"304302204e45e16932b8af514961a1d3a1a25\" +\n\t\t\t\t\"fdf3f4f7732e9d624c6c61548ab5fb8cd41022018152\" +\n\t\t\t\t\"2ec8eca07de4860a4acdd12909d831cc56cbbac46220\" +\n\t\t\t\t\"82221a8768d1d09\"),\n\t\t\tisValid: false,\n\t\t},\n\t\t{\n\t\t\tname: \"long len\",\n\t\t\tsig: decodeHex(\"304502204e45e16932b8af514961a1d3a1a25\" +\n\t\t\t\t\"fdf3f4f7732e9d624c6c61548ab5fb8cd41022018152\" +\n\t\t\t\t\"2ec8eca07de4860a4acdd12909d831cc56cbbac46220\" +\n\t\t\t\t\"82221a8768d1d09\"),\n\t\t\tisValid: false,\n\t\t},\n\t\t{\n\t\t\tname: \"long X\",\n\t\t\tsig: decodeHex(\"304402424e45e16932b8af514961a1d3a1a25\" +\n\t\t\t\t\"fdf3f4f7732e9d624c6c61548ab5fb8cd41022018152\" +\n\t\t\t\t\"2ec8eca07de4860a4acdd12909d831cc56cbbac46220\" +\n\t\t\t\t\"82221a8768d1d09\"),\n\t\t\tisValid: false,\n\t\t},\n\t\t{\n\t\t\tname: \"long Y\",\n\t\t\tsig: decodeHex(\"304402204e45e16932b8af514961a1d3a1a25\" +\n\t\t\t\t\"fdf3f4f7732e9d624c6c61548ab5fb8cd41022118152\" +\n\t\t\t\t\"2ec8eca07de4860a4acdd12909d831cc56cbbac46220\" +\n\t\t\t\t\"82221a8768d1d09\"),\n\t\t\tisValid: false,\n\t\t},\n\t\t{\n\t\t\tname: \"short Y\",\n\t\t\tsig: decodeHex(\"304402204e45e16932b8af514961a1d3a1a25\" +\n\t\t\t\t\"fdf3f4f7732e9d624c6c61548ab5fb8cd41021918152\" +\n\t\t\t\t\"2ec8eca07de4860a4acdd12909d831cc56cbbac46220\" +\n\t\t\t\t\"82221a8768d1d09\"),\n\t\t\tisValid: false,\n\t\t},\n\t\t{\n\t\t\tname: \"trailing crap\",\n\t\t\tsig: decodeHex(\"304402204e45e16932b8af514961a1d3a1a25\" +\n\t\t\t\t\"fdf3f4f7732e9d624c6c61548ab5fb8cd41022018152\" +\n\t\t\t\t\"2ec8eca07de4860a4acdd12909d831cc56cbbac46220\" +\n\t\t\t\t\"82221a8768d1d0901\"),\n\t\t\tisValid: false,\n\t\t},\n\t\t{\n\t\t\tname: \"X == N \",\n\t\t\tsig: decodeHex(\"30440220fffffffffffffffffffffffffffff\" +\n\t\t\t\t\"ffebaaedce6af48a03bbfd25e8cd0364141022018152\" +\n\t\t\t\t\"2ec8eca07de4860a4acdd12909d831cc56cbbac46220\" +\n\t\t\t\t\"82221a8768d1d09\"),\n\t\t\tisValid: false,\n\t\t},\n\t\t{\n\t\t\tname: \"X == N \",\n\t\t\tsig: decodeHex(\"30440220fffffffffffffffffffffffffffff\" +\n\t\t\t\t\"ffebaaedce6af48a03bbfd25e8cd0364142022018152\" +\n\t\t\t\t\"2ec8eca07de4860a4acdd12909d831cc56cbbac46220\" +\n\t\t\t\t\"82221a8768d1d09\"),\n\t\t\tisValid: false,\n\t\t},\n\t\t{\n\t\t\tname: \"Y == N\",\n\t\t\tsig: decodeHex(\"304402204e45e16932b8af514961a1d3a1a25\" +\n\t\t\t\t\"fdf3f4f7732e9d624c6c61548ab5fb8cd410220fffff\" +\n\t\t\t\t\"ffffffffffffffffffffffffffebaaedce6af48a03bb\" +\n\t\t\t\t\"fd25e8cd0364141\"),\n\t\t\tisValid: false,\n\t\t},\n\t\t{\n\t\t\tname: \"Y > N\",\n\t\t\tsig: decodeHex(\"304402204e45e16932b8af514961a1d3a1a25\" +\n\t\t\t\t\"fdf3f4f7732e9d624c6c61548ab5fb8cd410220fffff\" +\n\t\t\t\t\"ffffffffffffffffffffffffffebaaedce6af48a03bb\" +\n\t\t\t\t\"fd25e8cd0364142\"),\n\t\t\tisValid: false,\n\t\t},\n\t\t{\n\t\t\tname: \"0 len X\",\n\t\t\tsig: decodeHex(\"302402000220181522ec8eca07de4860a4acd\" +\n\t\t\t\t\"d12909d831cc56cbbac4622082221a8768d1d09\"),\n\t\t\tisValid: false,\n\t\t},\n\t\t{\n\t\t\tname: \"0 len Y\",\n\t\t\tsig: decodeHex(\"302402204e45e16932b8af514961a1d3a1a25\" +\n\t\t\t\t\"fdf3f4f7732e9d624c6c61548ab5fb8cd410200\"),\n\t\t\tisValid: false,\n\t\t},\n\t\t{\n\t\t\tname: \"extra R padding\",\n\t\t\tsig: decodeHex(\"30450221004e45e16932b8af514961a1d3a1a\" +\n\t\t\t\t\"25fdf3f4f7732e9d624c6c61548ab5fb8cd410220181\" +\n\t\t\t\t\"522ec8eca07de4860a4acdd12909d831cc56cbbac462\" +\n\t\t\t\t\"2082221a8768d1d09\"),\n\t\t\tisValid: false,\n\t\t},\n\t\t{\n\t\t\tname: \"extra S padding\",\n\t\t\tsig: decodeHex(\"304502204e45e16932b8af514961a1d3a1a25\" +\n\t\t\t\t\"fdf3f4f7732e9d624c6c61548ab5fb8cd41022100181\" +\n\t\t\t\t\"522ec8eca07de4860a4acdd12909d831cc56cbbac462\" +\n\t\t\t\t\"2082221a8768d1d09\"),\n\t\t\tisValid: false,\n\t\t},\n\t}\n\n\t// flags := ScriptVerifyStrictEncoding\n\tflags := StandardVerifyFlags\n\tfor _, test := range tests {\n\t\terr := TstCheckSignatureEncoding(test.sig, flags)\n\t\tif err != nil && test.isValid {\n\t\t\tt.Errorf(\"checkSignatureEncoding test '%s' failed \"+\n\t\t\t\t\"when it should have succeeded: %v\", test.name,\n\t\t\t\terr)\n\t\t} else if err == nil && !test.isValid {\n\t\t\tt.Errorf(\"checkSignatureEncooding test '%s' succeeded \"+\n\t\t\t\t\"when it should have failed\", test.name)\n\t\t}\n\t}\n}", "func (me TdtypeType) IsByte() bool { return me.String() == \"byte\" }", "func compareKeyBytes(a, b []byte, reverse bool, removeID bool) bool {\n\tif removeID {\n\t\tb = stripID(b)\n\t}\n\n\tvar r int\n\n\tif !reverse {\n\t\tr = bytes.Compare(a, b)\n\t} else {\n\t\tr = bytes.Compare(b, a)\n\t}\n\n\tif r < 0 {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func ValidateBytes(data []byte) ([]byte, error) {\n\tvar i json.RawMessage\n\n\terr := json.Unmarshal(data, &i)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn i, nil\n}", "func TestResultBlockResults_regression8583(t *testing.T) {\n\tconst keyData = \"0123456789abcdef0123456789abcdef\" // 32 bytes\n\twantKey := base64.StdEncoding.EncodeToString([]byte(keyData))\n\n\trsp := &ResultBlockResults{\n\t\tValidatorUpdates: []abci.ValidatorUpdate{{\n\t\t\tPubKey: pbcrypto.PublicKey{\n\t\t\t\tSum: &pbcrypto.PublicKey_Ed25519{Ed25519: []byte(keyData)},\n\t\t\t},\n\t\t\tPower: 400,\n\t\t}},\n\t}\n\n\t// Use compact here so the test data remain legible. The output from the\n\t// marshaler will have whitespace folded out so we need to do that too for\n\t// the comparison to be valid.\n\tvar buf bytes.Buffer\n\trequire.NoError(t, json.Compact(&buf, []byte(fmt.Sprintf(`\n{\n \"height\": \"0\",\n \"txs_results\": null,\n \"total_gas_used\": \"0\",\n \"finalize_block_events\": null,\n \"validator_updates\": [\n {\n \"pub_key\":{\"type\": \"tendermint/PubKeyEd25519\", \"value\": \"%s\"},\n \"power\": \"400\"\n }\n ],\n \"consensus_param_updates\": null\n}`, wantKey))))\n\n\tbits, err := json.Marshal(rsp)\n\tif err != nil {\n\t\tt.Fatalf(\"Encoding block result: %v\", err)\n\t}\n\tif diff := cmp.Diff(buf.String(), string(bits)); diff != \"\" {\n\t\tt.Errorf(\"Marshaled result (-want, +got):\\n%s\", diff)\n\t}\n\n\tback := new(ResultBlockResults)\n\tif err := json.Unmarshal(bits, back); err != nil {\n\t\tt.Fatalf(\"Unmarshaling: %v\", err)\n\t}\n\tif diff := cmp.Diff(rsp, back); diff != \"\" {\n\t\tt.Errorf(\"Unmarshaled result (-want, +got):\\n%s\", diff)\n\t}\n}", "func (m OrderStatusRequest) HasEncodedSecurityDescLen() bool {\n\treturn m.Has(tag.EncodedSecurityDescLen)\n}", "func (bitmap *bitmap) Compare(other *bitmap, prefixLen int) int {\n\tif prefixLen > bitmap.Size || prefixLen > other.Size {\n\t\tpanic(\"index out of range\")\n\t}\n\n\tdiv, mod := prefixLen/8, prefixLen%8\n\tfor i := 0; i < div; i++ {\n\t\tif bitmap.data[i] > other.data[i] {\n\t\t\treturn 1\n\t\t} else if bitmap.data[i] < other.data[i] {\n\t\t\treturn -1\n\t\t}\n\t}\n\n\tfor i := div * 8; i < div*8+mod; i++ {\n\t\tbit1, bit2 := bitmap.Bit(i), other.Bit(i)\n\t\tif bit1 > bit2 {\n\t\t\treturn 1\n\t\t} else if bit1 < bit2 {\n\t\t\treturn -1\n\t\t}\n\t}\n\n\treturn 0\n}", "func (p *MultiProxy) Check(b []byte, _ []interface{}) (bool, int) {\n\trequired := 0\n\n\tfor _, v := range p.match {\n\t\tif len(v) > len(b) {\n\n\t\t\tif bytes.Compare(v[:len(b)], b) == 0 {\n\t\t\t\t// We found the smallest potential future match\n\t\t\t\trequired = len(v)\n\t\t\t\tbreak\n\t\t\t}\n\t\t} else if bytes.Compare(b[:len(v)], v) == 0 {\n\t\t\treturn true, 0\n\t\t}\n\t}\n\n\treturn false, required\n}", "func TestDecode(t *testing.T) {\n\tt.Parallel()\n\n\ttestCases := []struct {\n\t\tname string\n\t\ttweakEnc func(*msgpack.Encoder)\n\t\tinput any // will be encoded verbatim with\n\t\texpect *TestMessage\n\t\texpectUnknown protoreflect.RawFields\n\t\texpectRaw msgpack.RawMessage\n\t\texpectDecoded any\n\t\terr string\n\t}{\n\t\t{\n\t\t\tname: \"int32->int64\",\n\t\t\tinput: map[int32]any{\n\t\t\t\t3: int32(10),\n\t\t\t},\n\t\t\texpect: &TestMessage{Intval: 10},\n\t\t},\n\t\t{\n\t\t\tname: \"int8->int64\",\n\t\t\tinput: map[int32]any{\n\t\t\t\t3: int8(10),\n\t\t\t},\n\t\t\texpect: &TestMessage{Intval: 10},\n\t\t},\n\t\t{\n\t\t\tname: \"int64->int32\",\n\t\t\tinput: map[int32]any{\n\t\t\t\t5: int64(10),\n\t\t\t},\n\t\t\texpect: &TestMessage{ShortIntval: 10},\n\t\t},\n\t\t{\n\t\t\tname: \"int64->int32 (overflow)\",\n\t\t\tinput: map[int32]any{\n\t\t\t\t5: int64(math.MaxInt32 * 2),\n\t\t\t},\n\t\t\texpect: &TestMessage{ShortIntval: -2},\n\t\t},\n\t\t{\n\t\t\tname: \"float64->int32\",\n\t\t\tinput: map[int32]any{\n\t\t\t\t5: float64(217),\n\t\t\t},\n\t\t\terr: \"bad type: expected int32, got float64\",\n\t\t},\n\n\t\t{\n\t\t\tname: \"unknown field\",\n\t\t\tinput: map[int32]any{\n\t\t\t\t777: \"nerds\",\n\t\t\t\t3: 100,\n\t\t\t},\n\t\t\texpect: &TestMessage{\n\t\t\t\tIntval: 100,\n\t\t\t},\n\t\t\texpectUnknown: []byte{\n\t\t\t\t250, 255, 255, 255, 15, // proto: 536870911: LEN\n\t\t\t\t10, // proto: 10 bytes in this field\n\t\t\t\t129, // msgpack: 1 element map\n\t\t\t\t205, 3, 9, // msgpack: 777\n\t\t\t\t165, 110, 101, 114, 100, 115, // msgpack: 5-char string, \"nerds\"\n\t\t\t},\n\t\t\texpectRaw: []byte{\n\t\t\t\t130, // 2 item map\n\t\t\t\t3, 100, // tag 3, 100\n\t\t\t\t205, 3, 9, 165, 110, 101, 114, 100, 115, // tag 777, 5 char string \"nerds\"\n\t\t\t},\n\t\t\texpectDecoded: map[int32]any{\n\t\t\t\t3: int64(100),\n\t\t\t\t777: \"nerds\",\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\tname: \"sparse array\",\n\t\t\tinput: map[int32]any{\n\t\t\t\t13: map[int32]string{\n\t\t\t\t\t3: \"hello\",\n\t\t\t\t\t12: \"there\",\n\t\t\t\t},\n\t\t\t},\n\t\t\texpect: &TestMessage{\n\t\t\t\tStrings: []string{\n\t\t\t\t\t\"\", \"\", \"\",\n\t\t\t\t\t\"hello\",\n\t\t\t\t\t\"\", \"\", \"\",\n\t\t\t\t\t\"\", \"\", \"\",\n\t\t\t\t\t\"\", \"\",\n\t\t\t\t\t\"there\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tConvey(`TestDecode`, t, func() {\n\t\tfor _, tc := range testCases {\n\t\t\ttc := tc\n\t\t\tConvey(tc.name, func() {\n\t\t\t\tenc := msgpack.GetEncoder()\n\t\t\t\tdefer msgpack.PutEncoder(enc)\n\n\t\t\t\tbuf := bytes.Buffer{}\n\t\t\t\tenc.Reset(&buf)\n\t\t\t\tif tc.tweakEnc != nil {\n\t\t\t\t\ttc.tweakEnc(enc)\n\t\t\t\t}\n\t\t\t\tSo(enc.Encode(tc.input), ShouldBeNil)\n\n\t\t\t\tmsg := &TestMessage{}\n\t\t\t\terr := Unmarshal(buf.Bytes(), msg)\n\t\t\t\tif tc.err == \"\" {\n\t\t\t\t\tSo(err, ShouldBeNil)\n\n\t\t\t\t\tknown := proto.Clone(msg).(*TestMessage)\n\t\t\t\t\tknown.ProtoReflect().SetUnknown(nil)\n\t\t\t\t\tSo(known, ShouldResembleProto, tc.expect)\n\n\t\t\t\t\tSo(msg.ProtoReflect().GetUnknown(), ShouldResemble, tc.expectUnknown)\n\n\t\t\t\t\tif tc.expectRaw != nil {\n\t\t\t\t\t\traw, err := Marshal(msg, Deterministic)\n\t\t\t\t\t\tSo(err, ShouldBeNil)\n\n\t\t\t\t\t\tSo(raw, ShouldResemble, tc.expectRaw)\n\n\t\t\t\t\t\tif len(msg.ProtoReflect().GetUnknown()) > 0 {\n\t\t\t\t\t\t\tdec := msgpack.GetDecoder()\n\t\t\t\t\t\t\tdefer msgpack.PutDecoder(dec)\n\t\t\t\t\t\t\tdec.Reset(bytes.NewBuffer(raw))\n\t\t\t\t\t\t\tdec.UseLooseInterfaceDecoding(true)\n\t\t\t\t\t\t\tdec.SetMapDecoder(func(d *msgpack.Decoder) (any, error) {\n\t\t\t\t\t\t\t\treturn d.DecodeUntypedMap()\n\t\t\t\t\t\t\t})\n\n\t\t\t\t\t\t\tdecoded := reflect.MakeMap(reflect.TypeOf(tc.expectDecoded))\n\n\t\t\t\t\t\t\tSo(dec.DecodeValue(decoded), ShouldBeNil)\n\n\t\t\t\t\t\t\tSo(decoded.Interface(), ShouldResemble, tc.expectDecoded)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tSo(err, ShouldErrLike, tc.err)\n\t\t\t\t}\n\n\t\t\t})\n\t\t}\n\t})\n\n}", "func DeserializeQueryEstResultSizes(q *Query, buffer *Buffer, serializationType SerializationType, clientSide bool) error {\n\tvar cClientSide C.int32_t\n\tif clientSide {\n\t\tcClientSide = 1\n\t} else {\n\t\tcClientSide = 0\n\t}\n\n\tret := C.tiledb_deserialize_query_est_result_sizes(q.context.tiledbContext, q.tiledbQuery, C.tiledb_serialization_type_t(serializationType), cClientSide, buffer.tiledbBuffer)\n\tif ret != C.TILEDB_OK {\n\t\treturn fmt.Errorf(\"Error deserializing query est buffer sizes: %s\", q.context.LastError())\n\t}\n\treturn nil\n}", "func (o *UcsdBackupInfoAllOf) GetBackupSizeOk() (*int64, bool) {\n\tif o == nil || o.BackupSize == nil {\n\t\treturn nil, false\n\t}\n\treturn o.BackupSize, true\n}", "func check_args(parsed_query []string, num_expected int) bool {\n\treturn (len(parsed_query) >= num_expected)\n}", "func TestMalformedPacket(t *testing.T) {\n\t// copied as bytes from Wireshark, then modified the RelayMessage option length\n\tbytes := []byte{\n\t\t0x0c, 0x00, 0x24, 0x01, 0xdb, 0x00, 0x30, 0x10, 0xb0, 0x8a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n\t\t0x00, 0x0a, 0xfe, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x0b, 0xab, 0xff, 0xfe, 0x8a,\n\t\t0x6d, 0xf2, 0x00, 0x09, 0x00, 0x50 /*was 0x32*/, 0x01, 0x8d, 0x3e, 0x24, 0x00, 0x01, 0x00, 0x0e, 0x00, 0x01,\n\t\t0x00, 0x01, 0x0c, 0x71, 0x3d, 0x0e, 0x00, 0x0b, 0xab, 0x8a, 0x6d, 0xf2, 0x00, 0x08, 0x00, 0x02,\n\t\t0x00, 0x00, 0x00, 0x03, 0x00, 0x0c, 0xee, 0xbf, 0xfb, 0x6e, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,\n\t\t0xff, 0xff, 0x00, 0x06, 0x00, 0x02, 0x00, 0x17, 0x00, 0x25, 0x00, 0x0e, 0x00, 0x00, 0x00, 0x09,\n\t\t0x00, 0x03, 0x08, 0x00, 0xf0, 0x7f, 0x06, 0xd6, 0x4c, 0x3c, 0x00, 0x12, 0x00, 0x04, 0x09, 0x01,\n\t\t0x08, 0x5a,\n\t}\n\tpacket := Packet6(bytes)\n\t_, err := packet.dhcp6message()\n\tif err == nil {\n\t\tt.Fatalf(\"Should be unable to extract dhcp6message, but did not fail\")\n\t}\n}", "func compareBytes(a []byte, b []byte) bool {\n\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tfor i, j := range a {\n\t\tif b[i] != j {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func packdataLen() int {\n\treturn 2*marshalBufLen + binary.MaxVarintLen64 + sha256.Size + 1\n}" ]
[ "0.6823946", "0.53564346", "0.51485986", "0.5139452", "0.50640583", "0.5022969", "0.49374676", "0.4916851", "0.48699605", "0.4850243", "0.48246896", "0.48079136", "0.4799034", "0.4767999", "0.47604385", "0.47575718", "0.4742403", "0.47369084", "0.47271678", "0.47015467", "0.4670805", "0.46636847", "0.4652214", "0.4642612", "0.4639603", "0.46076816", "0.45745322", "0.4569829", "0.45672503", "0.45507628", "0.45435315", "0.45397955", "0.4536673", "0.4502963", "0.4490072", "0.4484125", "0.44774026", "0.44772545", "0.44742703", "0.44483754", "0.4447167", "0.44218007", "0.44193345", "0.4413398", "0.44093245", "0.44050226", "0.44030365", "0.43981534", "0.4396104", "0.43542877", "0.4350077", "0.434699", "0.43437788", "0.43389773", "0.43372792", "0.43357372", "0.4326813", "0.43232036", "0.4320164", "0.43159676", "0.43138337", "0.43106273", "0.43090326", "0.43037078", "0.43015948", "0.430142", "0.42999184", "0.42851683", "0.42785573", "0.4272131", "0.4265744", "0.42618236", "0.42584822", "0.4257177", "0.42526135", "0.42379564", "0.42285404", "0.4226687", "0.42246127", "0.42224038", "0.42187974", "0.42122003", "0.42111006", "0.42095688", "0.42070305", "0.41918436", "0.41862336", "0.41808102", "0.41742584", "0.41681874", "0.41678944", "0.4167238", "0.41617846", "0.4157182", "0.41556996", "0.41534385", "0.41510132", "0.41505006", "0.41475388", "0.41459328" ]
0.721547
0
Check that we correctly pad an uncompressed query to the nearest block.
func TestAddEdnsPaddingUncompressedQuery(t *testing.T) { if len(uncompressedQueryBytes)%PaddingBlockSize == 0 { t.Errorf("uncompressedQueryBytes does not require padding, so this test is invalid") } padded, err := AddEdnsPadding(uncompressedQueryBytes) if err != nil { panic(err) } if len(padded)%PaddingBlockSize != 0 { t.Errorf("AddEdnsPadding failed to correctly pad uncompressed query") } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func TestDnsMessageUncompressedQueryConfidenceCheck(t *testing.T) {\n\tm := mustUnpack(uncompressedQueryBytes)\n\tpackedBytes := mustPack(m)\n\tif len(packedBytes) >= len(uncompressedQueryBytes) {\n\t\tt.Errorf(\"Compressed query is not smaller than uncompressed query\")\n\t}\n}", "func TestDnsMessageCompressedQueryConfidenceCheck(t *testing.T) {\n\tm := mustUnpack(compressedQueryBytes)\n\tpackedBytes := mustPack(m)\n\tif len(packedBytes) != len(compressedQueryBytes) {\n\t\tt.Errorf(\"Packed query has different size than original:\\n %v\\n %v\", packedBytes, compressedQueryBytes)\n\t}\n}", "func TestAddEdnsPaddingCompressedQuery(t *testing.T) {\n\tif len(compressedQueryBytes)%PaddingBlockSize == 0 {\n\t\tt.Errorf(\"compressedQueryBytes does not require padding, so this test is invalid\")\n\t}\n\tpadded, err := AddEdnsPadding(compressedQueryBytes)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif len(padded)%PaddingBlockSize != 0 {\n\t\tt.Errorf(\"AddEdnsPadding failed to correctly pad compressed query\")\n\t}\n}", "func TestAddEdnsPaddingCompressedPaddedQuery(t *testing.T) {\n\tpaddedQuery := simpleQuery\n\tpaddedQuery.Additionals = make([]dnsmessage.Resource, len(simpleQuery.Additionals))\n\tcopy(paddedQuery.Additionals, simpleQuery.Additionals)\n\n\tpaddedQuery.Additionals = append(paddedQuery.Additionals,\n\t\tdnsmessage.Resource{\n\t\t\tHeader: dnsmessage.ResourceHeader{\n\t\t\t\tName: dnsmessage.MustNewName(\".\"),\n\t\t\t\tClass: dnsmessage.ClassINET,\n\t\t\t\tTTL: 0,\n\t\t\t},\n\t\t\tBody: &dnsmessage.OPTResource{\n\t\t\t\tOptions: []dnsmessage.Option{\n\t\t\t\t\t{\n\t\t\t\t\t\tCode: OptResourcePaddingCode,\n\t\t\t\t\t\tData: make([]byte, 5),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t)\n\toriginalOnWire := mustPack(&paddedQuery)\n\n\tpaddedOnWire, err := AddEdnsPadding(mustPack(&paddedQuery))\n\tif err != nil {\n\t\tt.Errorf(\"Failed to pad padded query: %v\", err)\n\t}\n\n\tif !bytes.Equal(originalOnWire, paddedOnWire) {\n\t\tt.Errorf(\"AddEdnsPadding tampered with a query that was already padded\")\n\t}\n}", "func (b *blockEnc) matchOffset(offset, lits uint32) uint32 {\n\t// Check if offset is one of the recent offsets.\n\t// Adjusts the output offset accordingly.\n\t// Gives a tiny bit of compression, typically around 1%.\n\tif true {\n\t\tif lits > 0 {\n\t\t\tswitch offset {\n\t\t\tcase b.recentOffsets[0]:\n\t\t\t\toffset = 1\n\t\t\tcase b.recentOffsets[1]:\n\t\t\t\tb.recentOffsets[1] = b.recentOffsets[0]\n\t\t\t\tb.recentOffsets[0] = offset\n\t\t\t\toffset = 2\n\t\t\tcase b.recentOffsets[2]:\n\t\t\t\tb.recentOffsets[2] = b.recentOffsets[1]\n\t\t\t\tb.recentOffsets[1] = b.recentOffsets[0]\n\t\t\t\tb.recentOffsets[0] = offset\n\t\t\t\toffset = 3\n\t\t\tdefault:\n\t\t\t\tb.recentOffsets[2] = b.recentOffsets[1]\n\t\t\t\tb.recentOffsets[1] = b.recentOffsets[0]\n\t\t\t\tb.recentOffsets[0] = offset\n\t\t\t\toffset += 3\n\t\t\t}\n\t\t} else {\n\t\t\tswitch offset {\n\t\t\tcase b.recentOffsets[1]:\n\t\t\t\tb.recentOffsets[1] = b.recentOffsets[0]\n\t\t\t\tb.recentOffsets[0] = offset\n\t\t\t\toffset = 1\n\t\t\tcase b.recentOffsets[2]:\n\t\t\t\tb.recentOffsets[2] = b.recentOffsets[1]\n\t\t\t\tb.recentOffsets[1] = b.recentOffsets[0]\n\t\t\t\tb.recentOffsets[0] = offset\n\t\t\t\toffset = 2\n\t\t\tcase b.recentOffsets[0] - 1:\n\t\t\t\tb.recentOffsets[2] = b.recentOffsets[1]\n\t\t\t\tb.recentOffsets[1] = b.recentOffsets[0]\n\t\t\t\tb.recentOffsets[0] = offset\n\t\t\t\toffset = 3\n\t\t\tdefault:\n\t\t\t\tb.recentOffsets[2] = b.recentOffsets[1]\n\t\t\t\tb.recentOffsets[1] = b.recentOffsets[0]\n\t\t\t\tb.recentOffsets[0] = offset\n\t\t\t\toffset += 3\n\t\t\t}\n\t\t}\n\t} else {\n\t\toffset += 3\n\t}\n\treturn offset\n}", "func blockPadding(offset int64) (n int64) {\n\treturn -offset & (blockSize - 1)\n}", "func checksizeAndPad(plaintext []byte) []byte {\n\n\t// calculate modulus of plaintext to blowfish's cipher block size\n\t// if result is not 0, then we need to pad\n\n\tmodulus := len(plaintext) % blowfish.BlockSize\n\tif modulus != 0 {\n\t\t// calc bytes we need to pad to make plaintext a multiple of block size\n\t\tpadlen := blowfish.BlockSize - modulus\n\n\t\t// add required padding\n\t\tfor i := 0; i < padlen; i++ {\n\t\t\tplaintext = append(plaintext, 0)\n\t\t}\n\t}\n\n\treturn plaintext\n}", "func TestAddEdnsPaddingCompressedOptQuery(t *testing.T) {\n\toptQuery := simpleQuery\n\toptQuery.Additionals = make([]dnsmessage.Resource, len(simpleQuery.Additionals))\n\tcopy(optQuery.Additionals, simpleQuery.Additionals)\n\n\toptQuery.Additionals = append(optQuery.Additionals,\n\t\tdnsmessage.Resource{\n\t\t\tHeader: dnsmessage.ResourceHeader{\n\t\t\t\tName: dnsmessage.MustNewName(\".\"),\n\t\t\t\tClass: dnsmessage.ClassINET,\n\t\t\t\tTTL: 0,\n\t\t\t},\n\t\t\tBody: &dnsmessage.OPTResource{\n\t\t\t\tOptions: []dnsmessage.Option{},\n\t\t\t},\n\t\t},\n\t)\n\tpaddedOnWire, err := AddEdnsPadding(mustPack(&optQuery))\n\tif err != nil {\n\t\tt.Errorf(\"Failed to pad query with OPT but no padding: %v\", err)\n\t}\n\tif len(paddedOnWire)%PaddingBlockSize != 0 {\n\t\tt.Errorf(\"AddEdnsPadding failed to correctly pad query with OPT but no padding\")\n\t}\n}", "func canonicalPadding(b []byte) error {\n\tswitch {\n\tcase b[0]&0x80 == 0x80:\n\t\treturn errNegativeValue\n\tcase len(b) > 1 && b[0] == 0x00 && b[1]&0x80 != 0x80:\n\t\treturn errExcessivelyPaddedValue\n\tdefault:\n\t\treturn nil\n\t}\n}", "func (uc *Cypher) pkcs7unpad(padded []byte, blockSize int) []byte {\n\n\tdataLen := len(padded)\n\tpaddingCount := int(padded[dataLen-1])\n\n\tif paddingCount > blockSize || paddingCount <= 0 {\n\t\treturn padded //data is not padded (or not padded correctly), return as is\n\t}\n\n\tpadding := padded[dataLen-paddingCount : dataLen-1]\n\n\tfor _, b := range padding {\n\t\tif int(b) != paddingCount {\n\t\t\treturn padded //data is not padded (or not padded correcly), return as is\n\t\t}\n\t}\n\n\treturn padded[:len(padded)-paddingCount] //return data - padding\n}", "func TestHiddenWithPK1(t *testing.T) {\n\tdefer testutils.AfterTest(t)()\n\ttestutils.EnsureNoLeak(t)\n\tctx := context.Background()\n\n\ttae := testutil.InitTestDB(ctx, ModuleName, t, nil)\n\tdefer tae.Close()\n\tschema := catalog.MockSchemaAll(13, 2)\n\tschema.BlockMaxRows = 10\n\tschema.SegmentMaxBlocks = 2\n\tbat := catalog.MockBatch(schema, int(schema.BlockMaxRows*4))\n\tdefer bat.Close()\n\tbats := bat.Split(10)\n\n\ttxn, _, rel := testutil.CreateRelationNoCommit(t, tae, testutil.DefaultTestDB, schema, true)\n\terr := rel.Append(context.Background(), bats[0])\n\t{\n\t\toffsets := make([]uint32, 0)\n\t\tit := rel.MakeBlockIt()\n\t\tfor it.Valid() {\n\t\t\tblk := it.GetBlock()\n\t\t\tview, err := blk.GetColumnDataById(context.Background(), schema.PhyAddrKey.Idx)\n\t\t\tassert.NoError(t, err)\n\t\t\tdefer view.Close()\n\t\t\tfp := blk.Fingerprint()\n\t\t\t_ = view.GetData().Foreach(func(v any, _ bool, _ int) (err error) {\n\t\t\t\trid := v.(types.Rowid)\n\t\t\t\tbid, offset := rid.Decode()\n\t\t\t\tt.Logf(\"bid=%s,offset=%d\", bid.String(), offset)\n\t\t\t\tassert.Equal(t, fp.BlockID, bid)\n\t\t\t\toffsets = append(offsets, offset)\n\t\t\t\treturn\n\t\t\t}, nil)\n\t\t\tit.Next()\n\t\t}\n\t\t// sort.Slice(offsets, func(i, j int) bool { return offsets[i] < offsets[j] })\n\t\t// assert.Equal(t, []uint32{0, 1, 2, 3}, offsets)\n\t}\n\tassert.NoError(t, err)\n\tassert.NoError(t, txn.Commit(context.Background()))\n\n\ttxn, rel = testutil.GetDefaultRelation(t, tae, schema.Name)\n\t{\n\t\tblk := testutil.GetOneBlock(rel)\n\t\tview, err := blk.GetColumnDataByName(context.Background(), catalog.PhyAddrColumnName)\n\t\tassert.NoError(t, err)\n\t\tdefer view.Close()\n\t\toffsets := make([]uint32, 0)\n\t\tfp := blk.Fingerprint()\n\t\tt.Log(fp.String())\n\t\t_ = view.GetData().Foreach(func(v any, _ bool, _ int) (err error) {\n\t\t\trid := v.(types.Rowid)\n\t\t\tbid, offset := rid.Decode()\n\t\t\tt.Logf(\",bid=%s,offset=%d\", bid, offset)\n\t\t\tassert.Equal(t, fp.BlockID, bid)\n\t\t\toffsets = append(offsets, offset)\n\t\t\treturn\n\t\t}, nil)\n\t\tsort.Slice(offsets, func(i, j int) bool { return offsets[i] < offsets[j] })\n\t\tassert.Equal(t, []uint32{0, 1, 2, 3}, offsets)\n\t}\n\n\tassert.NoError(t, err)\n\tassert.NoError(t, txn.Commit(context.Background()))\n\n\ttxn, rel = testutil.GetDefaultRelation(t, tae, schema.Name)\n\terr = rel.Append(context.Background(), bats[1])\n\tassert.NoError(t, err)\n\terr = rel.Append(context.Background(), bats[2])\n\tassert.NoError(t, err)\n\terr = rel.Append(context.Background(), bats[3])\n\tassert.NoError(t, err)\n\terr = rel.Append(context.Background(), bats[4])\n\tassert.NoError(t, err)\n\terr = rel.Append(context.Background(), bats[5])\n\tassert.NoError(t, err)\n\tassert.NoError(t, txn.Commit(context.Background()))\n\n\ttestutil.CompactBlocks(t, 0, tae, \"db\", schema, false)\n\n\ttxn, rel = testutil.GetDefaultRelation(t, tae, schema.Name)\n\tvar segMeta *catalog.SegmentEntry\n\t{\n\t\tit := rel.MakeBlockIt()\n\t\tfor it.Valid() {\n\t\t\tblk := it.GetBlock()\n\t\t\tview, err := blk.GetColumnDataByName(context.Background(), catalog.PhyAddrColumnName)\n\t\t\tassert.NoError(t, err)\n\t\t\tdefer view.Close()\n\t\t\toffsets := make([]uint32, 0)\n\t\t\tmeta := blk.GetMeta().(*catalog.BlockEntry)\n\t\t\tt.Log(meta.String())\n\t\t\t_ = view.GetData().Foreach(func(v any, _ bool, _ int) (err error) {\n\t\t\t\trid := v.(types.Rowid)\n\t\t\t\tbid, offset := rid.Decode()\n\t\t\t\t// t.Logf(\"sid=%d,bid=%d,offset=%d\", sid, bid, offset)\n\t\t\t\tassert.Equal(t, meta.ID, bid)\n\t\t\t\toffsets = append(offsets, offset)\n\t\t\t\treturn\n\t\t\t}, nil)\n\t\t\tsort.Slice(offsets, func(i, j int) bool { return offsets[i] < offsets[j] })\n\t\t\tif meta.IsAppendable() {\n\t\t\t\tassert.Equal(t, []uint32{0, 1, 2, 3}, offsets)\n\t\t\t} else {\n\t\t\t\tsegMeta = meta.GetSegment()\n\t\t\t\tassert.Equal(t, []uint32{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, offsets)\n\t\t\t}\n\t\t\tit.Next()\n\t\t}\n\t}\n\n\tassert.NoError(t, txn.Commit(context.Background()))\n\t{\n\t\tseg := segMeta.GetSegmentData()\n\t\tfactory, taskType, scopes, err := seg.BuildCompactionTaskFactory()\n\t\tassert.NoError(t, err)\n\t\ttask, err := tae.Runtime.Scheduler.ScheduleMultiScopedTxnTask(tasks.WaitableCtx, taskType, scopes, factory)\n\t\tassert.NoError(t, err)\n\t\terr = task.WaitDone()\n\t\tassert.NoError(t, err)\n\t}\n\n\ttxn, rel = testutil.GetDefaultRelation(t, tae, schema.Name)\n\t{\n\t\tit := rel.MakeBlockIt()\n\t\tfor it.Valid() {\n\t\t\tblk := it.GetBlock()\n\t\t\tview, err := blk.GetColumnDataByName(context.Background(), catalog.PhyAddrColumnName)\n\t\t\tassert.NoError(t, err)\n\t\t\tdefer view.Close()\n\t\t\toffsets := make([]uint32, 0)\n\t\t\tmeta := blk.GetMeta().(*catalog.BlockEntry)\n\t\t\tt.Log(meta.String())\n\t\t\tt.Log(meta.GetSegment().String())\n\t\t\t_ = view.GetData().Foreach(func(v any, _ bool, _ int) (err error) {\n\t\t\t\trid := v.(types.Rowid)\n\t\t\t\tbid, offset := rid.Decode()\n\t\t\t\t// t.Logf(\"sid=%d,bid=%d,offset=%d\", sid, bid, offset)\n\t\t\t\tassert.Equal(t, meta.ID, bid)\n\t\t\t\toffsets = append(offsets, offset)\n\t\t\t\treturn\n\t\t\t}, nil)\n\t\t\tsort.Slice(offsets, func(i, j int) bool { return offsets[i] < offsets[j] })\n\t\t\tif meta.IsAppendable() {\n\t\t\t\tassert.Equal(t, []uint32{0, 1, 2, 3}, offsets)\n\t\t\t} else {\n\t\t\t\tassert.Equal(t, []uint32{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, offsets)\n\t\t\t}\n\t\t\tit.Next()\n\t\t}\n\t}\n\n\tassert.NoError(t, txn.Commit(context.Background()))\n\tt.Log(tae.Catalog.SimplePPString(common.PPL1))\n}", "func (enc *Encoding) decode(dst, src []byte) (n int, end bool, err error) {\n\t// Lift the nil check outside of the loop.\n\t_ = enc.decodeMap\n\n\tdsti := 0\n\tolen := len(src)\n\n\tfor len(src) > 0 && !end {\n\t\t// Decode quantum using the base32 alphabet\n\t\tvar dbuf [8]byte\n\t\tdlen := 8\n\n\t\tfor j := 0; j < 8; {\n\n\t\t\tif len(src) == 0 {\n\t\t\t\tif enc.padChar != NoPadding {\n\t\t\t\t\t// We have reached the end and are missing padding\n\t\t\t\t\treturn n, false, CorruptInputError(olen - len(src) - j)\n\t\t\t\t}\n\t\t\t\t// We have reached the end and are not expecting any padding\n\t\t\t\tdlen, end = j, true\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tin := src[0]\n\t\t\tsrc = src[1:]\n\t\t\tif in == byte(enc.padChar) && j >= 2 && len(src) < 8 {\n\t\t\t\t// We've reached the end and there's padding\n\t\t\t\tif len(src)+j < 8-1 {\n\t\t\t\t\t// not enough padding\n\t\t\t\t\treturn n, false, CorruptInputError(olen)\n\t\t\t\t}\n\t\t\t\tfor k := 0; k < 8-1-j; k++ {\n\t\t\t\t\tif len(src) > k && src[k] != byte(enc.padChar) {\n\t\t\t\t\t\t// incorrect padding\n\t\t\t\t\t\treturn n, false, CorruptInputError(olen - len(src) + k - 1)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tdlen, end = j, true\n\t\t\t\t// 7, 5 and 2 are not valid padding lengths, and so 1, 3 and 6 are not\n\t\t\t\t// valid dlen values. See RFC 4648 Section 6 \"Base 32 Encoding\" listing\n\t\t\t\t// the five valid padding lengths, and Section 9 \"Illustrations and\n\t\t\t\t// Examples\" for an illustration for how the 1st, 3rd and 6th base32\n\t\t\t\t// src bytes do not yield enough information to decode a dst byte.\n\t\t\t\tif dlen == 1 || dlen == 3 || dlen == 6 {\n\t\t\t\t\treturn n, false, CorruptInputError(olen - len(src) - 1)\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tdbuf[j] = enc.decodeMap[in]\n\t\t\tif dbuf[j] == 0xFF {\n\t\t\t\treturn n, false, CorruptInputError(olen - len(src) - 1)\n\t\t\t}\n\t\t\tj++\n\t\t}\n\n\t\t// Pack 8x 5-bit source blocks into 5 byte destination\n\t\t// quantum\n\t\tswitch dlen {\n\t\tcase 8:\n\t\t\tdst[dsti+4] = dbuf[6]<<5 | dbuf[7]\n\t\t\tn++\n\t\t\tfallthrough\n\t\tcase 7:\n\t\t\tdst[dsti+3] = dbuf[4]<<7 | dbuf[5]<<2 | dbuf[6]>>3\n\t\t\tn++\n\t\t\tfallthrough\n\t\tcase 5:\n\t\t\tdst[dsti+2] = dbuf[3]<<4 | dbuf[4]>>1\n\t\t\tn++\n\t\t\tfallthrough\n\t\tcase 4:\n\t\t\tdst[dsti+1] = dbuf[1]<<6 | dbuf[2]<<1 | dbuf[3]>>4\n\t\t\tn++\n\t\t\tfallthrough\n\t\tcase 2:\n\t\t\tdst[dsti+0] = dbuf[0]<<3 | dbuf[1]>>2\n\t\t\tn++\n\t\t}\n\t\tdsti += 5\n\t}\n\treturn n, end, nil\n}", "func isDiffVarintSnappyEncodedPostings(input []byte) bool {\n\treturn bytes.HasPrefix(input, []byte(codecHeaderSnappy))\n}", "func XXX_HACK_autodetect_libpcap_layout(reader io.ReaderAt, hdrlay *PacketHeaderLayout) {\r\n\tbuf := make([]byte, 24)\r\n\treader.ReadAt(buf, 0)\r\n\tif 0xa1b23c4d == binary.LittleEndian.Uint32(buf[0:4]) &&\r\n\t\t228 == binary.LittleEndian.Uint32(buf[20:24]) {\r\n\t\tBuildPacketHeaderLayout(hdrlay, HDRLAY_LIBPCAP)\r\n\t}\r\n}", "func QEncodeIfNeeded(src []byte, offset int) (dst []byte) {\n\tsafe := true\n\tfor i, sl := 0, len(src); i < sl && safe; i++ {\n\t\tsafe = ' ' <= src[i] && src[i] <= '~'\n\t}\n\tif safe {\n\t\treturn src\n\t}\n\tdst, _ = QEncode(src, offset)\n\treturn dst\n}", "func (c *Cache) blockAlign(offset int64) int64 {\n\treturn offset / int64(c.blockSize) * int64(c.blockSize)\n}", "func (w *Writer) checkAlign(n *node, start int, comma, cs []byte) bool {\n\tc := n.genTables(w.SEN)\n\tif c == nil || w.Width < start+c.size {\n\t\treturn true\n\t}\n\tfor i, m := range n.members {\n\t\tif 0 < i {\n\t\t\tw.buf = append(w.buf, comma...)\n\t\t}\n\t\tw.buf = append(w.buf, []byte(cs)...)\n\t\tswitch m.kind {\n\t\tcase arrayNode:\n\t\t\tw.alignArray(m, c, comma, cs)\n\t\tcase mapNode:\n\t\t\tw.alignMap(m, c, comma, cs)\n\t\t}\n\t}\n\treturn false\n}", "func padBlock(b []byte) []byte {\n\tpad := BlockSize - uint64(len(b))\n\tfor i := uint64(0); i < pad; i++ {\n\t\tb = append(b, byte(pad))\n\t}\n\treturn b\n}", "func unpadBlock(b []byte) []byte {\n\tpad := int(b[len(b)-1])\n\treturn b[:len(b)-pad]\n}", "func (c *Codec) tryMatch(idx int, prefix, ol oligo.Oligo, olen int, mdblks []uint64, data [][]byte, difficulty int) (err int) {\n\terr = -1\n\tdata[idx] = nil\n\n\t// Do a common sense check before we even try to match:\n\t// we can have up to (blknum-idx)*(Nerrdata + 1) insertions or deletions for\n\t// the rest of the oligo. If the difference between olen and the actual\n\t// oligo length is bigger, there is no point of going that route, we'll fail anyway\n\t// This is very conservative and will probably easily pass in the first recursion\n\t// steps, but it may save us some time once we get deeper.\n\td := ol.Len() - olen\n\tif d < 0 {\n\t\td = -d\n\t}\n\n\tif d > (c.blknum - idx)*(Nerrdata + Nerrmd) {\n\t\treturn -1\n\t}\n\n\t// try without errors\n\terr = c.tryMd(idx, ol.Slice(13, 17), ol.Slice(17, 0), olen - 17, mdblks, data, difficulty)\n\tif err >= 0 {\n\t\t// we try to decode the data block only if we didn't assume it have errors\n\t\tif ol.Len() < 17 {\n\t\t\t// if the block is too short, don't even try to decode it\n\t\t\treturn\n\t\t}\n\n\t\tv, errr := l0.Decode(prefix, ol.Slice(0, 17), c.crit)\n\t\tif errr != nil {\n\t\t\treturn\n\t\t}\n\n\t\tpbit := int(v & 1)\n\t\tv >>= 1\n\n\t\tvar parityok bool\n\t\tif PARITY_BUG {\n\t\t\tparityok = (v + uint64(pbit)) % 2 == 0\n\t\t} else {\n\t\t\tparityok = (bits.OnesCount64(v) + pbit) % 2 == 0\n\t\t}\n\n\t\tif parityok {\n\t\t\td := make([]byte, 4)\n\t\t\td[0] = byte(v)\n\t\t\td[1] = byte(v >> 8)\n\t\t\td[2] = byte(v >> 16)\n\t\t\td[3] = byte(v >> 24)\n\t\t\tdata[idx] = d\n\t\t}\n\n\t\treturn\n\t}\n\n\t// iterate through all possible errors\n\tfor derr := 1; derr < Nerrdata; derr++ {\n\t\t// data deletes\n\t\t// (we assume that the deletes are not in the last derr nts)\n\t\t// TODO: We should calculate the prefix correctly by assuming there are\n\t\t// errors\n\t\tprefix := ol.Slice(13 - derr, 17 - derr)\n\t\terr = c.tryMd(idx, prefix, ol.Slice(17 - derr, 0), olen - 17, mdblks, data, difficulty)\n\t\tif err >= 0 {\n\t\t\terr += derr\n\t\t\tbreak\n\t\t}\n\n\t\t// data inserts\n\t\t// TODO: We should calculate the prefix correctly by assuming there are\n\t\t// errors\n\t\tprefix = ol.Slice(13 + derr, 17 + derr)\n\t\terr = c.tryMd(idx, prefix, ol.Slice(17 + derr, 0), olen - 17, mdblks, data, difficulty)\n\t\tif err >= 0 {\n\t\t\terr += derr\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn\n}", "func block(h *[4][16]uint32, base uintptr, offsets *[16]uint32, mask uint16)", "func queryBlockEnc(blockStr string, prefix int64, blockId int) QueryBlockCipher {\n\tvar ret QueryBlockCipher\n\texp := getHashedValue(blockStr, prefix, blockId) // get the hash value in power part of ciphertext\n\tsubIndex, _ := strconv.Atoi(new(big.Int).Mod(exp, big.NewInt(subIndexSize)).String()) // calculate the sub-index value (G_k mod subIndexSize)\n\tret.subIndex = uint8(subIndex)\n\n\t// generate the ciphertext\n\tret.cipher = exp.Bytes()\n\treturn ret\n}", "func (r *nvPairReader) skipToAlign() {\n\tvar alignment int\n\tswitch r.nvlist.encoding {\n\tcase EncodingNative:\n\t\talignment = 8\n\tcase EncodingXDR:\n\t\talignment = 4\n\tdefault:\n\t\tpanic(\"Invalid encoding inside parser\")\n\t}\n\tif (r.currentByte-r.startByte)%alignment != 0 {\n\t\tr.currentByte += alignment - ((r.currentByte - r.startByte) % alignment)\n\t}\n}", "func isVideoPadding(c color.Color) bool {\n\tblack := color.RGBA{0, 0, 0, 255}\n\t// The tolerance was picked empirically. For example, on kukui, the first padding row below\n\t// the video has a color of (20, 1, 22, 255).\n\ttolerance := 25\n\treturn ColorDistance(c, black) < tolerance\n}", "func detectPrefixInfo() (int, int) {\n\t//this is simply a block of 32 A's\n\t// we will brute force a prefix length until we get two repeating blocks\n\tblockOfAs := []byte{65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65}\n\n\tfor len := 0; len < 16; len++ {\n\t\tprefix := make([]byte, len)\n\t\tfor i := range prefix {\n\t\t\tprefix[i] = 'A'\n\t\t}\n\t\tciphertext := blackBox(append(prefix, blockOfAs...))\n\t\tchunks, _ := common.Chunks(ciphertext, 16)\n\t\tif common.HasDuplicateBlocks(chunks) {\n\t\t\t// if we have two duplicate blocks, then we know how much data we need to prepend for block alignment\n\t\t\t// now we need to know which is the first block that has our data, so basically the index of the first repeating chunk\n\t\t\tfor blockNum := range chunks {\n\t\t\t\tvar left, right [16]byte\n\t\t\t\tcopy(left[:], chunks[blockNum][:16])\n\t\t\t\tcopy(right[:], chunks[blockNum+1][:16])\n\t\t\t\tif left == right {\n\t\t\t\t\treturn len, blockNum\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn -1, -1\n}", "func checkPkcs7Padding(buf []byte) ([]byte, error) {\n\tif len(buf) < 16 {\n\t\treturn nil, fmt.Errorf(\"Invalid padded buffer\")\n\t}\n\n\tpadLen := int(buf[len(buf)-1])\n\tif padLen < 1 || padLen > 16 {\n\t\treturn nil, fmt.Errorf(\"Invalid padded buffer\")\n\t}\n\n\tif padLen > len(buf) {\n\t\treturn nil, fmt.Errorf(\"Invalid padded buffer\")\n\t}\n\n\tfor pos := len(buf) - padLen; pos < len(buf); pos++ {\n\t\tif int(buf[pos]) != padLen {\n\t\t\treturn nil, fmt.Errorf(\"Invalid padded buffer\")\n\t\t}\n\t}\n\n\treturn buf[:len(buf)-padLen], nil\n}", "func unpad(message []byte) ([]byte, error) {\n if len(message) == 0 {\n return nil, ErrInvalidPadding\n }\n\n lenPadding := message[len(message) - 1]\n if lenPadding == 0 || lenPadding > aes.BlockSize {\n return nil, ErrInvalidPadding\n }\n\n for i := len(message) - 1; i > len(message) - int(lenPadding) - 1; i-- {\n if message[i] != lenPadding {\n return nil, ErrInvalidPadding\n }\n }\n\n return message[:len(message) - int(lenPadding)], nil\n}", "func encodeBlockSnappy(dst, src []byte) (d int) {\n\tif len(src) < minNonLiteralBlockSize {\n\t\treturn 0\n\t}\n\treturn encodeBlockSnappyGo(dst, src)\n}", "func (b *bitWriter) flushAlign() {\n\tnbBytes := (b.nBits + 7) >> 3\n\tfor i := uint8(0); i < nbBytes; i++ {\n\t\tb.out = append(b.out, byte(b.bitContainer>>(i*8)))\n\t}\n\tb.nBits = 0\n\tb.bitContainer = 0\n}", "func (ot *T) correctOffset(offset int64) {\n\tdrop := 0\n\tfor i, ar := range ot.ackedRanges {\n\t\tif offset < ar.from {\n\t\t\tbreak\n\t\t}\n\t\tdrop = i + 1\n\t\tif offset < ar.to {\n\t\t\toffset = ar.to\n\t\t\tbreak\n\t\t}\n\t}\n\tif drop > 0 {\n\t\tot.ackedRanges = ot.ackedRanges[drop:]\n\t}\n\tot.offset.Val = offset\n\tot.offset.Meta = encodeAckedRanges(offset, ot.ackedRanges)\n}", "func matchLen(a []byte, b []byte) int {\n\tb = b[:len(a)]\n\tvar checked int\n\tif len(a) > 4 {\n\t\t// Try 4 bytes first\n\t\tif diff := load32(a, 0) ^ load32(b, 0); diff != 0 {\n\t\t\treturn bits.TrailingZeros32(diff) >> 3\n\t\t}\n\t\t// Switch to 8 byte matching.\n\t\tchecked = 4\n\t\ta = a[4:]\n\t\tb = b[4:]\n\t\tfor len(a) >= 8 {\n\t\t\tb = b[:len(a)]\n\t\t\tif diff := load64(a, 0) ^ load64(b, 0); diff != 0 {\n\t\t\t\treturn checked + (bits.TrailingZeros64(diff) >> 3)\n\t\t\t}\n\t\t\tchecked += 8\n\t\t\ta = a[8:]\n\t\t\tb = b[8:]\n\t\t}\n\t}\n\tb = b[:len(a)]\n\tfor i := range a {\n\t\tif a[i] != b[i] {\n\t\t\treturn int(i) + checked\n\t\t}\n\t}\n\treturn len(a) + checked\n}", "func IsCompressed(id uint8) bool {\n\treturn id >= 20\n}", "func finalizeFragment(buf []byte, to util.File, toOffset int64, c Compressor) (raw int, compressed bool, err error) {\n\t// compress the block if needed\n\tif c != nil {\n\t\tout, err := c.compress(buf)\n\t\tif err != nil {\n\t\t\treturn 0, compressed, fmt.Errorf(\"error compressing fragment block: %v\", err)\n\t\t}\n\t\tif len(out) < len(buf) {\n\t\t\tbuf = out\n\t\t\tcompressed = true\n\t\t}\n\t}\n\tif _, err := to.WriteAt(buf, toOffset); err != nil {\n\t\treturn 0, compressed, err\n\t}\n\treturn len(buf), compressed, nil\n}", "func alignment(block []byte, AlignSize int) int {\n\treturn int(uintptr(unsafe.Pointer(&block[0])) & uintptr(AlignSize-1))\n}", "func (decryptor *PgDecryptor) MatchZoneInBlock(block []byte) {\n\tsliceCopy := block[:]\n\tfor {\n\t\t// binary format\n\t\ti := bytes.Index(block, zone.ZoneIDBegin)\n\t\tif i == utils.NotFound {\n\t\t\tbreak\n\t\t} else {\n\t\t\tif decryptor.keyStore.HasZonePrivateKey(sliceCopy[i : i+zone.ZoneIDBlockLength]) {\n\t\t\t\tdecryptor.zoneMatcher.SetMatched(sliceCopy[i : i+EscapeZoneIDBlockLength])\n\t\t\t\treturn\n\t\t\t}\n\t\t\tsliceCopy = sliceCopy[i+1:]\n\t\t}\n\t}\n\treturn\n}", "func getPadding(packetLen int) int {\n\tif packetLen%4 == 0 {\n\t\treturn 0\n\t}\n\treturn 4 - (packetLen % 4)\n}", "func fixLen(fixme []byte) []byte {\n l := (len(fixme) - 1) // skip the Q in \"Q\\x00\\x00\\x01\\x01SELECT...\"\n binary.BigEndian.PutUint32(fixme[1:], uint32(l))\n return fixme\n}", "func align(n, b int) int {\n\tbsz := b - 1 // blocksize\n\treturn (n + bsz) &^ bsz\n}", "func matchLen(a, b []byte) int {\n\tvar checked int\n\n\tfor len(a) >= 8 {\n\t\tif diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b); diff != 0 {\n\t\t\treturn checked + (bits.TrailingZeros64(diff) >> 3)\n\t\t}\n\t\tchecked += 8\n\t\ta = a[8:]\n\t\tb = b[8:]\n\t}\n\tb = b[:len(a)]\n\tfor i := range a {\n\t\tif a[i] != b[i] {\n\t\t\treturn i + checked\n\t\t}\n\t}\n\treturn len(a) + checked\n}", "func isBlockValid(newBlock *Block, oldBlock *Block) bool {\n\tif oldBlock.Index+1 != newBlock.Index {\n\t\treturn false\n\t}\n\n\tif oldBlock.Hash != newBlock.PrevHash {\n\t\treturn false\n\t}\n\n\tif calculateHash(newBlock) != newBlock.Hash {\n\t\treturn false\n\t}\n\n\treturn true\n}", "func TestMalformedPacket(t *testing.T) {\n\t// copied as bytes from Wireshark, then modified the RelayMessage option length\n\tbytes := []byte{\n\t\t0x0c, 0x00, 0x24, 0x01, 0xdb, 0x00, 0x30, 0x10, 0xb0, 0x8a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n\t\t0x00, 0x0a, 0xfe, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x0b, 0xab, 0xff, 0xfe, 0x8a,\n\t\t0x6d, 0xf2, 0x00, 0x09, 0x00, 0x50 /*was 0x32*/, 0x01, 0x8d, 0x3e, 0x24, 0x00, 0x01, 0x00, 0x0e, 0x00, 0x01,\n\t\t0x00, 0x01, 0x0c, 0x71, 0x3d, 0x0e, 0x00, 0x0b, 0xab, 0x8a, 0x6d, 0xf2, 0x00, 0x08, 0x00, 0x02,\n\t\t0x00, 0x00, 0x00, 0x03, 0x00, 0x0c, 0xee, 0xbf, 0xfb, 0x6e, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,\n\t\t0xff, 0xff, 0x00, 0x06, 0x00, 0x02, 0x00, 0x17, 0x00, 0x25, 0x00, 0x0e, 0x00, 0x00, 0x00, 0x09,\n\t\t0x00, 0x03, 0x08, 0x00, 0xf0, 0x7f, 0x06, 0xd6, 0x4c, 0x3c, 0x00, 0x12, 0x00, 0x04, 0x09, 0x01,\n\t\t0x08, 0x5a,\n\t}\n\tpacket := Packet6(bytes)\n\t_, err := packet.dhcp6message()\n\tif err == nil {\n\t\tt.Fatalf(\"Should be unable to extract dhcp6message, but did not fail\")\n\t}\n}", "func (b testBody) isDeleted(t *testing.T, encoding []byte, bspan dvid.Span) bool {\n\t// Get to the # spans and RLE in encoding\n\tspansEncoding := encoding[8:]\n\tvar spans dvid.Spans\n\tif err := spans.UnmarshalBinary(spansEncoding); err != nil {\n\t\tt.Fatalf(\"Error in decoding sparse volume: %v\\n\", err)\n\t\treturn false\n\t}\n\n\t// Iterate true spans to see if any are in the blocks given.\n\tfor _, span := range spans {\n\t\tbx0 := span[2] / 32\n\t\tbx1 := span[3] / 32\n\t\tby := span[1] / 32\n\t\tbz := span[0] / 32\n\n\t\twithin_x := (bx0 >= bspan[2] && bx0 <= bspan[3]) || (bx1 >= bspan[2] && bx1 <= bspan[3])\n\t\tif bz == bspan[0] && by == bspan[1] && within_x {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func discoverBlockSizeInfo(oracle EncryptionOracleFn) BlockSizeInfo {\n\t// Assume block size is 8:\n\t// =>\n\t// suffix | inputSizeToGetFullPadding\n\t// 0 | 8\n\t// 1 | 7\n\t// 2 | 6\n\t// 3 | 5\n\t// 4 | 4\n\t// 5 | 3\n\t// 6 | 2\n\t// 7 | 1\n\t// 8 | 8\n\t// 9 | 7\n\n\tplainText := []byte{}\n\tcipher := askOracle(oracle, plainText)\n\tinitialLength := len(cipher)\n\tcipherLength := initialLength\n\n\tfor cipherLength == initialLength {\n\t\tplainText = append(plainText, 'A')\n\t\tcipher = askOracle(oracle, plainText)\n\t\tcipherLength = len(cipher)\n\t}\n\n\tbs := cipherLength - initialLength\n\treturn BlockSizeInfo{\n\t\tinputSizeToGetFullPadding: len(plainText),\n\t\tblockSize: bs,\n\t}\n}", "func isTruncated(fp *os.File, since *SinceDBInfo) (truncated bool, err error) {\n\tvar (\n\t\tfi os.FileInfo\n\t)\n\tif fi, err = fp.Stat(); err != nil {\n\t\treturn\n\t}\n\t// Old offset larger than file size.\n\tif fi.Size() < since.Offset {\n\t\ttruncated = true\n\t} else {\n\t\ttruncated = false\n\t}\n\treturn\n}", "func (self *bipbuf_t) IsEmpty() bool {\n\treturn self.a_start >= self.a_end\n}", "func alignBound(window *([]*Percept), sec *int) () {\n\tif *sec >= len(*window) {\n\t\t*sec = len(*window) - 1\n\t} else if *sec < 0 {\n\t\t*sec = 0\n\t}\n\treturn\n}", "func (t kSamples) checkUpdate(d float64, row []interface{}) {\n\tindexToChange := -1\n\tvar maxDistance float64\n\tfor i, e := range t {\n\t\tif e.distance > maxDistance {\n\t\t\tmaxDistance = e.distance\n\t\t\tindexToChange = i\n\t\t}\n\t}\n\tif d < maxDistance {\n\t\tt[indexToChange].row = row\n\t\tt[indexToChange].distance = d\n\t}\n}", "func remainingPattern(inputIndex, inputSize, patternIndex, patternSize int) bool {\n\treturn inputIndex == inputSize && patternIndex < patternSize-1\n}", "func PADDB(mx, x operand.Op) { ctx.PADDB(mx, x) }", "func InstMatchEmptyWidth(i *syntax.Inst, before rune, after rune) bool", "func (decryptor *PgDecryptor) MatchZoneBlock(block []byte) {\n\tfor _, c := range block {\n\t\tif !decryptor.MatchZone(c) {\n\t\t\treturn\n\t\t}\n\t}\n}", "func CheckSizeBlocks(start, end uint64) error {\n\ttxn := globalOpt.Txn(true)\n\tdefer txn.Commit()\n\n\tlist := make([]interface{}, 0, 64)\n\n\tit, err := txn.Get(TableBlockKey, HeightBlockKey)\n\tif err != nil {\n\t\ttxn.Abort()\n\t\treturn err\n\t}\n\tfor obj := it.Next(); obj != nil; obj = it.Next() {\n\t\tv, ok := obj.(*fabclient.MiddleCommonBlock)\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\n\t\tif v.Number < start || v.Number > end {\n\t\t\tlist = append(list, obj)\n\t\t}\n\t}\n\n\tfor _, one := range list {\n\t\terr = txn.Delete(TableBlockKey, one)\n\t\tif err != nil {\n\t\t\ttxn.Abort()\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func padBytesNeeded(elementLen int) int {\n\treturn 4*(elementLen/4+1) - elementLen\n}", "func checkPallocBits(t *testing.T, got, want *PallocBits) bool {\n\td := DiffPallocBits(got, want)\n\tif len(d) != 0 {\n\t\tt.Errorf(\"%d range(s) different\", len(d))\n\t\tfor _, bits := range d {\n\t\t\tt.Logf(\"\\t@ bit index %d\", bits.I)\n\t\t\tt.Logf(\"\\t| got: %s\", StringifyPallocBits(got, bits))\n\t\t\tt.Logf(\"\\t| want: %s\", StringifyPallocBits(want, bits))\n\t\t}\n\t\treturn false\n\t}\n\treturn true\n}", "func isBlockValid(newBlock, oldBlock Block) bool {\n\tif oldBlock.Index+1 != newBlock.Index {\n\t\tfmt.Println(\"Invalid index !!\", newBlock.Index, oldBlock.Index)\n\t\treturn false\n\t}\n\n\tif oldBlock.Hash != newBlock.PrevHash {\n\t\tfmt.Println(\"Invalid prev hash !!\")\n\t\treturn false\n\t}\n\n\tif calculateBlockHash(newBlock) != newBlock.Hash {\n\t\tfmt.Println(\"Invalid hash !!\")\n\t\treturn false\n\t}\n\tif !isServerData(newBlock.Data, newBlock.Hash) {\n\t\treturn false\n\t}\n\treturn true\n}", "func checkRow(m [][]int, padding, x, y int) (flag bool) {\n\tflag = true\n\tfor k := 1; k <= padding && flag; k++ {\n\t\tif m[x+k][y] == 1 || m[x-k][y] == 1 {\n\t\t\tflag = false\n\t\t}\n\t}\n\n\treturn\n}", "func isBlockValid(newBlock, oldBlock Block) bool {\n\tif oldBlock.Index+1 != newBlock.Index {\n\t\treturn false\n\t}\n\n\tif oldBlock.Hash != newBlock.PrevHash {\n\t\treturn false\n\t}\n\n\tif calculateHash(newBlock) != newBlock.Hash {\n\t\treturn false\n\t}\n\n\treturn true\n}", "func checkReadQuality(read *sam.Record) bool {\n\tif int(read.MapQ) < MinMapQuality || read.Len() < MinReadLength {\n\t\treturn false\n\t}\n\n\t//\t\tfor _, cigar := range read.Cigar {\n\t//\t\t\tif cigar.Type() != sam.CigarMatch && cigar.Type() != sam.CigarSoftClipped {\n\t//\t\t\t\treturn false\n\t//\t\t\t}\n\t//\t\t}\n\t\n return true\n}", "func compressFragmentTwoPass(input []byte, input_size uint, is_last bool, command_buf []uint32, literal_buf []byte, table []int, table_size uint, storage_ix *uint, storage []byte) {\n\tvar initial_storage_ix uint = *storage_ix\n\tvar table_bits uint = uint(log2FloorNonZero(table_size))\n\tvar min_match uint\n\tif table_bits <= 15 {\n\t\tmin_match = 4\n\t} else {\n\t\tmin_match = 6\n\t}\n\tcompressFragmentTwoPassImpl(input, input_size, is_last, command_buf, literal_buf, table, table_bits, min_match, storage_ix, storage)\n\n\t/* If output is larger than single uncompressed block, rewrite it. */\n\tif *storage_ix-initial_storage_ix > 31+(input_size<<3) {\n\t\trewindBitPosition(initial_storage_ix, storage_ix, storage)\n\t\temitUncompressedMetaBlock(input, input_size, storage_ix, storage)\n\t}\n\n\tif is_last {\n\t\twriteBits(1, 1, storage_ix, storage) /* islast */\n\t\twriteBits(1, 1, storage_ix, storage) /* isempty */\n\t\t*storage_ix = (*storage_ix + 7) &^ 7\n\t}\n}", "func afpacketComputeSize(targetSizeMb int, snaplen int, pageSize int) (\n\tframeSize int, blockSize int, numBlocks int, err error) {\n\n\tif snaplen < pageSize {\n\t\tframeSize = pageSize / (pageSize / snaplen)\n\t} else {\n\t\tframeSize = (snaplen/pageSize + 1) * pageSize\n\t}\n\n\t// 128 is the default from the gopacket library so just use that\n\tblockSize = frameSize * 128\n\tnumBlocks = (targetSizeMb * 1024 * 1024) / blockSize\n\n\tif numBlocks == 0 {\n\t\treturn 0, 0, 0, fmt.Errorf(\"interface buffersize is too small\")\n\t}\n\n\treturn frameSize, blockSize, numBlocks, nil\n}", "func verifyBlock(tree io.ReadSeeker, layout Layout, dataBlock []byte, blockIndex int64, expectedRoot []byte) error {\n\tif len(dataBlock) != int(layout.blockSize) {\n\t\treturn fmt.Errorf(\"incorrect block size\")\n\t}\n\n\texpectedDigest := make([]byte, layout.digestSize)\n\ttreeBlock := make([]byte, layout.blockSize)\n\tvar digest []byte\n\tfor level := 0; level < layout.numLevels(); level++ {\n\t\t// Calculate hash.\n\t\tif level == 0 {\n\t\t\tdigestArray := sha256.Sum256(dataBlock)\n\t\t\tdigest = digestArray[:]\n\t\t} else {\n\t\t\t// Read a block in previous level that contains the\n\t\t\t// hash we just generated, and generate a next level\n\t\t\t// hash from it.\n\t\t\tif _, err := tree.Seek(layout.blockOffset(level-1, blockIndex), io.SeekStart); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif _, err := tree.Read(treeBlock); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdigestArray := sha256.Sum256(treeBlock)\n\t\t\tdigest = digestArray[:]\n\t\t}\n\n\t\t// Move to stored hash for the current block, read the digest\n\t\t// and store in expectedDigest.\n\t\tif _, err := tree.Seek(layout.digestOffset(level, blockIndex), io.SeekStart); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif _, err := tree.Read(expectedDigest); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif !bytes.Equal(digest, expectedDigest) {\n\t\t\treturn fmt.Errorf(\"Verification failed\")\n\t\t}\n\n\t\t// If this is the root layer, no need to generate next level\n\t\t// hash.\n\t\tif level == layout.rootLevel() {\n\t\t\tbreak\n\t\t}\n\t\tblockIndex = blockIndex / layout.hashesPerBlock()\n\t}\n\n\t// Verification for the tree succeeded. Now compare the root hash in the\n\t// tree with expectedRoot.\n\tif !bytes.Equal(digest[:], expectedRoot) {\n\t\treturn fmt.Errorf(\"Verification failed\")\n\t}\n\treturn nil\n}", "func CompressSnappyBlock(src []byte, dst []byte) int {\n\tdstLen := len(dst)\n\tresult := snappy.Encode(dst, src)\n\tif len(result) > dstLen {\n\t\tpanic(\"dst length is too small\")\n\t}\n\treturn len(result)\n}", "func (d *Decoder) padEntry(idx *Index, e *Entry, read int) error {\n\tif idx.Version == 4 {\n\t\treturn nil\n\t}\n\n\tentrySize := read + len(e.Name)\n\tpadLen := 8 - entrySize%8\n\t_, err := io.CopyN(ioutil.Discard, d.r, int64(padLen))\n\treturn err\n}", "func (bd *BlockDAG) checkLayerGap(parents []*hash.Hash) bool {\n\tif len(parents) == 0 {\n\t\treturn false\n\t}\n\tparentsNode := []IBlock{}\n\tfor _, v := range parents {\n\t\tib := bd.getBlock(v)\n\t\tif ib == nil {\n\t\t\treturn false\n\t\t}\n\t\tparentsNode = append(parentsNode, ib)\n\t}\n\n\tpLen := len(parentsNode)\n\tif pLen == 0 {\n\t\treturn false\n\t}\n\tvar gap float64\n\tif pLen == 1 {\n\t\treturn true\n\t} else if pLen == 2 {\n\t\tgap = math.Abs(float64(parentsNode[0].GetLayer()) - float64(parentsNode[1].GetLayer()))\n\t} else {\n\t\tvar minLayer int64 = -1\n\t\tvar maxLayer int64 = -1\n\t\tfor i := 0; i < pLen; i++ {\n\t\t\tparentLayer := int64(parentsNode[i].GetLayer())\n\t\t\tif maxLayer == -1 || parentLayer > maxLayer {\n\t\t\t\tmaxLayer = parentLayer\n\t\t\t}\n\t\t\tif minLayer == -1 || parentLayer < minLayer {\n\t\t\t\tminLayer = parentLayer\n\t\t\t}\n\t\t}\n\t\tgap = math.Abs(float64(maxLayer) - float64(minLayer))\n\t}\n\tif gap > MaxTipLayerGap {\n\t\tlog.Error(fmt.Sprintf(\"Parents gap is %f which is more than %d\", gap, MaxTipLayerGap))\n\t\treturn false\n\t}\n\n\treturn true\n}", "func (g *gcm) update(y *gcmFieldElement, data []byte) {\n\tfullBlocks := (len(data) >> 4) << 4\n\tg.updateBlocks(y, data[:fullBlocks])\n\n\tif len(data) != fullBlocks {\n\t\tvar partialBlock [gcmBlockSize]byte\n\t\tcopy(partialBlock[:], data[fullBlocks:])\n\t\tg.updateBlocks(y, partialBlock[:])\n\t}\n}", "func (h *Header) verifyInfo(ctx context.Context, isBDB bool) error {\n\tlim := len(h.Infos)\n\ttypecheck := h.region == TagHeaderImmutable || h.region == TagHeaderImage\n\tvar prev int32\n\tstart := 1\n\tif isBDB {\n\t\tstart--\n\t}\n\n\tfor i := start; i < lim; i++ {\n\t\te, err := h.loadTag(ctx, i)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tswitch {\n\t\tcase prev > e.offset:\n\t\t\treturn fmt.Errorf(\"botched entry: prev > offset (%d > %d)\", prev, e.offset)\n\t\tcase e.Tag < TagHeaderI18nTable && !isBDB:\n\t\t\treturn fmt.Errorf(\"botched entry: bad tag %v (%[1]d < %d)\", e.Tag, TagHeaderI18nTable)\n\t\tcase e.Type < TypeMin || e.Type > TypeMax:\n\t\t\treturn fmt.Errorf(\"botched entry: bad type %v\", e.Type)\n\t\tcase e.count == 0 || int64(e.count) > h.data.Size():\n\t\t\treturn fmt.Errorf(\"botched entry: bad count %d\", e.count)\n\t\tcase (e.Type.alignment()-1)&e.offset != 0:\n\t\t\treturn fmt.Errorf(\"botched entry: weird alignment: type alignment %d, offset %d\", e.Type.alignment(), e.offset)\n\t\tcase e.offset < 0 || int64(e.offset) > h.data.Size():\n\t\t\treturn fmt.Errorf(\"botched entry: bad offset %d\", e.offset)\n\t\tcase typecheck && !checkTagType(e.Tag, e.Type):\n\t\t\treturn fmt.Errorf(\"botched entry: typecheck fail: %v is not %v\", e.Tag, e.Type)\n\t\t}\n\t}\n\treturn nil\n}", "func unmatchedOuterRow(tblPos plannercore.TblColPosInfo, waitUpdateRow []types.Datum) bool {\n\tfirstHandleIdx := tblPos.HandleCols.GetCol(0)\n\treturn waitUpdateRow[firstHandleIdx.Index].IsNull()\n}", "func isBlockValid(newBlock, oldBlock Block) bool {\n\tif oldBlock.Index+1 != newBlock.Index {\n\t\treturn false\n\t}\n\n\tif oldBlock.Hash != newBlock.PrevHash {\n\t\treturn false\n\t}\n\n\tif calculateBlockHash(newBlock) != newBlock.Hash {\n\t\treturn false\n\t}\n\n\treturn true\n}", "func doFindSmallerSize(size int64, pattern string) bool {\n\ti, err := humanize.ParseBytes(pattern)\n\tfatalIf(probe.NewError(err), \"Error parsing string passed to flag smaller\")\n\n\treturn int64(i) > size\n}", "func pad(unpadded []byte, desiredLength int) []byte {\n\tif len(unpadded) == desiredLength {\n\t\treturn unpadded\n\t}\n\ttoAppend := desiredLength - len(unpadded)\n\treturn append(unpadded, bytes.Repeat([]byte{byte(0x00)}, toAppend)...)\n}", "func (da *DoubleArray) _decideBaseOffset(firstChars []uint8, existsTerminator bool, offset uint8, rootIndex uint32, baseSearchOffset uint32) (uint32, uint32) {\n for {\n if baseSearchOffset >= uint32(len(da.Base)) {\n da._resizeDoubleArray()\n }\n if da.Check[baseSearchOffset] == 0 {\n break\n }\n baseSearchOffset++\n }\n var baseOffset uint32\n if baseSearchOffset <= charIndexCount + 2 {\n baseOffset = 2\n } else {\n baseOffset = baseSearchOffset - charIndexCount\n }\n for {\n if baseOffset + charIndexCount >= uint32(len(da.Base)) {\n da._resizeDoubleArray()\n }\n if !da._checkCollision(firstChars, existsTerminator, baseOffset) {\n // 衝突しない場合\n var i uint32\n for i = 1; i < charIndexCount; i++ {\n if firstChars[i] != 0 {\n da.Check[baseOffset + i] = rootIndex\n }\n }\n if existsTerminator {\n da.Check[baseOffset + charIndexCount] = rootIndex\n }\n\t\t\t//daCount++\n\t\t\t//if daCount % 1000 == 0 {\n\t\t\t//\tfmt.Printf(\"DEBUG decideBaseOffset %d %d %d\\n\", daCount, baseOffset, baseSearchOffset)\n\t\t\t//}\n return baseOffset, baseSearchOffset\n }\n baseOffset++\n }\n}", "func unpad(in []byte) []byte {\n\tif len(in) == 0 {\n\t\treturn nil\n\t}\n\n\tpadding := in[len(in)-1]\n\tif int(padding) > len(in) {\n\t\treturn nil\n\t} else if padding == 0 {\n\t\treturn nil\n\t}\n\n\tfor i := len(in) - 1; i > len(in)-int(padding)-1; i-- {\n\t\tif in[i] != padding {\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn in[:len(in)-int(padding)]\n}", "func copyOuterRows(innerColOffset, outerColOffset int, src *Chunk, numRows int, dst *Chunk) {\n\ttrace_util_0.Count(_chunk_util_00000, 17)\n\tif numRows <= 0 {\n\t\ttrace_util_0.Count(_chunk_util_00000, 20)\n\t\treturn\n\t}\n\ttrace_util_0.Count(_chunk_util_00000, 18)\n\trow := src.GetRow(0)\n\tvar srcCols []*column\n\tif innerColOffset == 0 {\n\t\ttrace_util_0.Count(_chunk_util_00000, 21)\n\t\tsrcCols = src.columns[outerColOffset:]\n\t} else {\n\t\ttrace_util_0.Count(_chunk_util_00000, 22)\n\t\t{\n\t\t\tsrcCols = src.columns[:innerColOffset]\n\t\t}\n\t}\n\ttrace_util_0.Count(_chunk_util_00000, 19)\n\tfor i, srcCol := range srcCols {\n\t\ttrace_util_0.Count(_chunk_util_00000, 23)\n\t\tdstCol := dst.columns[outerColOffset+i]\n\t\tdstCol.appendMultiSameNullBitmap(!srcCol.isNull(row.idx), numRows)\n\t\tdstCol.length += numRows\n\t\tif srcCol.isFixed() {\n\t\t\ttrace_util_0.Count(_chunk_util_00000, 24)\n\t\t\telemLen := len(srcCol.elemBuf)\n\t\t\tstart := row.idx * elemLen\n\t\t\tend := start + numRows*elemLen\n\t\t\tdstCol.data = append(dstCol.data, srcCol.data[start:end]...)\n\t\t} else {\n\t\t\ttrace_util_0.Count(_chunk_util_00000, 25)\n\t\t\t{\n\t\t\t\tstart, end := srcCol.offsets[row.idx], srcCol.offsets[row.idx+numRows]\n\t\t\t\tdstCol.data = append(dstCol.data, srcCol.data[start:end]...)\n\t\t\t\toffsets := dstCol.offsets\n\t\t\t\telemLen := srcCol.offsets[row.idx+1] - srcCol.offsets[row.idx]\n\t\t\t\tfor j := 0; j < numRows; j++ {\n\t\t\t\t\ttrace_util_0.Count(_chunk_util_00000, 27)\n\t\t\t\t\toffsets = append(offsets, int64(offsets[len(offsets)-1]+elemLen))\n\t\t\t\t}\n\t\t\t\ttrace_util_0.Count(_chunk_util_00000, 26)\n\t\t\t\tdstCol.offsets = offsets\n\t\t\t}\n\t\t}\n\t}\n}", "func TestBlockLayout(t *testing.T) {\n\tt.Parallel()\n\n\tvar b block\n\tb.setbit(0)\n\tb.setbit(1)\n\tb.setbit(111)\n\tb.setbit(499)\n\n\tassert.Equal(t, BlockBits, 8*binary.Size(b))\n\n\th := sha256.New()\n\tbinary.Write(h, binary.LittleEndian, b)\n\texpect := \"aa7f8c411600fa387f0c10641eab428a7ed2f27a86171ac69f0e2087b2aa9140\"\n\tassert.Equal(t, expect, hex.EncodeToString(h.Sum(nil)))\n}", "func (b *bitWriter) alignToByteBoundary() error {\n\tif b.nBits = (b.nBits + 7) &^ 7; b.nBits == 64 {\n\t\treturn b.flushBits()\n\t}\n\treturn nil\n}", "func DetectBlockSize(oracle func([]byte) []byte) uint {\n\n\tsize := len(oracle(make([]byte, 0)))\n\tdata := make([]byte, 1)\n\tfor true {\n\t\tdif := len(oracle(data)) - size\n\t\tif dif > 0 {\n\t\t\treturn uint(dif)\n\t\t} else {\n\t\t\tdata = append(data, byte(0))\n\t\t}\n\t}\n\treturn 0 // no valid block size found\n}", "func (at *AnnotatedTable) checkAlmostAsBig(ctx context.Context, other *AnnotatedTable) error {\n\tthisDetail, err := at.CachedDetail(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\totherDetail, err := other.CachedDetail(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Check that receiver table contains at least 99% as many tasks as\n\t// other table.\n\tif thisDetail.TaskFileCount < otherDetail.TaskFileCount {\n\t\tlog.Printf(\"Warning - fewer task files: %s(%d) < %s(%d) possibly due to redundant task files.\\n\",\n\t\t\tat.Table.FullyQualifiedName(), thisDetail.TaskFileCount,\n\t\t\tother.Table.FullyQualifiedName(), otherDetail.TaskFileCount)\n\t}\n\n\t// NOTE: We have discovered that in 2012, some archives contain tests that are entirely\n\t// redundant with tests in other archives. This means that some archives are completely removed\n\t// in the dedup process. Since these archives appear in the original \"base_tables\", this check\n\t// has been causing the sanity check to fail.\n\tif IncludeTaskFileCountCheck && float32(thisDetail.TaskFileCount) < taskCountRequirement*float32(otherDetail.TaskFileCount) {\n\t\treturn ErrTooFewTasks\n\t}\n\n\tif thisDetail.TestCount < otherDetail.TestCount {\n\t\tlog.Printf(\"Warning_fewer_tests: %s(%d) < %s(%d)\\n\",\n\t\t\tat.Table.FullyQualifiedName(), thisDetail.TestCount,\n\t\t\tother.Table.FullyQualifiedName(), otherDetail.TestCount)\n\t}\n\t// We are now using DISTINCT test counts, so we can use a tighter bound.\n\tif float32(thisDetail.TestCount) < testCountRequirement*float32(otherDetail.TestCount) {\n\t\treturn ErrTooFewTests\n\t}\n\treturn nil\n}", "func unpad(src []byte) ([]byte, error) {\n\tlength := len(src)\n\tunpadding := int(src[length-1])\n\n\tif unpadding > length {\n\t\treturn nil, errors.New(\"unpad error. This could happen when incorrect encryption key is used\")\n\t}\n\n\treturn src[:(length - unpadding)], nil\n}", "func pkcs7Unpad(data []byte, blocklen int) []byte {\n\tif blocklen <= 0 {\n\t\tlog.Fatal(\"Invalid blocklen %d\", blocklen)\n\t}\n\tif len(data)%blocklen != 0 || len(data) == 0 {\n\t\tlog.Fatal(\"Invalid data len %d\", len(data))\n\t}\n\tpadlen := int(data[len(data)-1])\n\tif padlen > blocklen || padlen == 0 {\n\t\tlog.Fatal(\"Invalid padding\")\n\t}\n\tpad := data[len(data)-padlen:]\n\tfor i := 0; i < padlen; i++ {\n\t\tif pad[i] != byte(padlen) {\n\t\t\tlog.Fatal(\"Invalid padding\")\n\t\t}\n\t}\n\treturn data[:len(data)-padlen]\n}", "func (p *TestPager) FixData() bool {\n var hasError = false\n if p.Total < 0 {\n p.Total = 0\n hasError = true\n }\n if p.Current < 0 {\n p.Current = 0\n hasError = true\n }\n if p.PageItems <= 0 {\n p.PageItems = 10\n hasError = true\n }\n return hasError\n}", "func (c *cursor) decodeBlock(position uint32) {\n\tlength := c.blockLength(position)\n\tblock := c.f.mmap[position+blockHeaderSize : position+blockHeaderSize+length]\n\tc.vals = c.vals[:0]\n\t_ = DecodeBlock(block, &c.vals)\n\n\t// only adavance the position if we're asceending.\n\t// Descending queries use the blockPositions\n\tif c.ascending {\n\t\tc.pos = position + blockHeaderSize + length\n\t}\n}", "func afpacketComputeSize(target_size_mb int, snaplen int, page_size int) (\n\tframe_size int, block_size int, num_blocks int, err error) {\n\n\tif snaplen < page_size {\n\t\tframe_size = page_size / (page_size / snaplen)\n\t} else {\n\t\tframe_size = (snaplen/page_size + 1) * page_size\n\t}\n\n\t// 128 is the default from the gopacket library so just use that\n\tblock_size = frame_size * 128\n\tnum_blocks = (target_size_mb * 1024 * 1024) / block_size\n\n\tif num_blocks == 0 {\n\t\treturn 0, 0, 0, fmt.Errorf(\"Buffer size too small\")\n\t}\n\n\treturn frame_size, block_size, num_blocks, nil\n}", "func (t Table) Pack() (C, V, r []int) {\n\trows := len(t)\n\ttotal := 0\n\tmaxCols := 0\n\n\t// Step 1 - count nonzero positions\n\tnz := make([][]int, rows)\n\tfor i, row := range t {\n\t\ttotal += len(row)\n\t\tif len(row) > maxCols {\n\t\t\tmaxCols = len(row)\n\t\t}\n\t\tfor j, val := range row {\n\t\t\tif val != 0 {\n\t\t\t\tnz[i] = append(nz[i], j)\n\t\t\t}\n\t\t}\n\t}\n\n\t// Step 2 - sort rows\n\tsorted := make([]int, rows)\n\tfor i := range sorted {\n\t\tsorted[i] = i\n\t}\n\tsort.Slice(sorted, func(i, j int) bool {\n\t\tx, y := sorted[i], sorted[j]\n\t\tlx, ly := len(nz[x]), len(nz[y])\n\t\treturn lx > ly || lx == ly && t.less(x, y)\n\t})\n\n\t// Step 3 - first-fit\n\tentry := NewBitSet(uint(total + 1))\n\tused := NewBitSet(uint(total + maxCols))\n\tmin := 0 // first position where entry[min] = false\n\tmax := -1 // last position where entry[max] = true\n\tr = make([]int, rows)\n\n\tprev := -1\n\tfor _, i := range sorted {\n\t\tif len(nz[i]) == 0 { // all zero row\n\t\t\tr[i] = max + 1\n\t\t\tcontinue\n\t\t}\n\t\tif prev >= 0 && len(nz[prev]) == len(nz[i]) && !t.less(prev, i) { // duplicate row\n\t\t\tr[i] = r[prev]\n\t\t\tcontinue\n\t\t}\n\t\tprev = i\n\t\tri := min - nz[i][0]\n\tcheck:\n\t\tif used.Test(uint(ri + maxCols)) {\n\t\t\tri++\n\t\t\tgoto check\n\t\t}\n\t\tfor _, j := range nz[i] {\n\t\t\tif entry.Test(uint(ri + j)) {\n\t\t\t\tri++\n\t\t\t\tgoto check\n\t\t\t}\n\t\t}\n\t\t// ri is ok\n\t\tused.Set(uint(ri + maxCols))\n\t\tfor _, j := range nz[i] {\n\t\t\tentry.Set(uint(ri + j))\n\t\t\tif ri+j > max {\n\t\t\t\tmax = ri + j\n\t\t\t}\n\t\t}\n\t\tfor entry.Test(uint(min)) {\n\t\t\tmin++\n\t\t}\n\t\tr[i] = ri\n\t}\n\n\t// Generate C, V\n\tC = make([]int, max+1)\n\tV = make([]int, max+1)\n\tfor i := range V {\n\t\tV[i] = -1\n\t}\n\tfor i, row := range t {\n\t\tri := r[i]\n\t\tfor _, j := range nz[i] {\n\t\t\tC[ri+j] = row[j]\n\t\t\tV[ri+j] = j\n\t\t}\n\t}\n\treturn C, V, r\n}", "func packetChecksumOK(packet []byte) bool {\n\n\tif len(packet) < 0x22 {\n\t\treturn false\n\t}\n\n\t// checksum in data packet\n\tpacketsum := uint16(packet[0x21])<<8 | uint16(packet[0x20])\n\n\t// calculated checksum\n\tsum := checksum(packet)\n\tsum -= uint16(packet[0x20]) // remove checksum value bytes\n\tsum -= uint16(packet[0x21])\n\n\treturn sum == packetsum\n}", "func (w *Window) Check(index uint64) bool {\n\t// check if too old\n\tif index+WindowSize < w.highest {\n\t\treturn false\n\t}\n\n\t// bits outside the block size represent which block the index is in\n\tindexBlock := index >> blockBitsLog\n\n\t// move window if new index is higher\n\tif index > w.highest {\n\t\tcurrTopBlock := w.highest >> blockBitsLog\n\t\t// how many blocks ahead is indexBlock?\n\t\t// cap it at a full circle around the array, at that point we clear the\n\t\t// whole thing\n\t\tnewBlocks := min(indexBlock-currTopBlock, numBlocks)\n\t\t// clear each new block\n\t\tfor i := uint64(1); i <= newBlocks; i++ {\n\t\t\t// mod index so it wraps around\n\t\t\tw.blocks[(currTopBlock+i)%numBlocks] = 0\n\t\t}\n\t\tw.highest = index\n\t}\n\n\t// we didn't mod until now because we needed to know the difference between\n\t// a lower index and wrapped higher index\n\t// we need to keep the index inside the array now\n\tindexBlock %= numBlocks\n\n\t// bits inside the block represent where in the block the bit is\n\t// mask it with the block size\n\tindexBit := index & uint64(blockBits-1)\n\n\t// finally check the index\n\n\t// save existing block to see if it changes\n\toldBlock := w.blocks[indexBlock]\n\t// create updated block\n\tnewBlock := oldBlock | (1 << indexBit)\n\t// set block to new value\n\tw.blocks[indexBlock] = newBlock\n\n\t// if the bit wasn't already 1, the values should be different and this should return true\n\treturn oldBlock != newBlock\n}", "func (s *Stack) Align(offset int) {\n\tif s.Bottom%hostarch.Addr(offset) != 0 {\n\t\ts.Bottom -= (s.Bottom % hostarch.Addr(offset))\n\t}\n}", "func SmithWatermanFull(seqPair *SeqPair, wm *WeightMatrix, gapPenalty int) *AlignResult {\n dp := make([][]int, len(seqPair.S1) + 1)\n\n for i := range dp {\n dp[i] = make([]int, len(seqPair.S2) + 1)\n }\n\n maxScore := 0\n maxPos := Point{}\n\n // Main part\n\n for row := 0; row < len(seqPair.S1); row += 1 {\n for col := 0; col < len(seqPair.S2); col += 1 {\n // dp indices are actually (row + 1, col + 1) because of dummy zero-indexed elements\n dp[row + 1][col + 1] = util.Max4(\n 0,\n dp[row + 1][col] + gapPenalty, /* left */\n dp[row][col + 1] + gapPenalty, /* top */\n dp[row][col] + seqPair.WeightIn(wm, row, col)) /* top-left */\n\n if maxScore < dp[row + 1][col + 1] {\n maxScore = dp[row + 1][col + 1]\n maxPos = Point{ col + 1, row + 1 }\n }\n }\n }\n\n //fmt.Println(\"TABLE:\")\n //for i, lst := range dp[1:] {\n // fmt.Printf(\"%-2v: %-3v\\n\", i, lst)\n //}\n //fmt.Println()\n\n // Recovery\n\n s1Builder := strings.Builder{}\n s2Builder := strings.Builder{}\n\n curPos := maxPos\n\n if curPos.X > 0 {\n for i := len(seqPair.S2) - 1; i > curPos.X - 1; i -= 1 {\n s1Builder.WriteByte('-')\n s2Builder.WriteByte(seqPair.S2[i])\n }\n }\n\n if curPos.Y > 0 {\n for i := len(seqPair.S1) - 1; i > curPos.Y - 1; i -= 1 {\n s1Builder.WriteByte(seqPair.S1[i])\n s2Builder.WriteByte('-')\n }\n }\n\n for curPos.X > 0 && curPos.Y > 0 {\n l := dp[curPos.Y][curPos.X - 1] + gapPenalty /* left */\n t := dp[curPos.Y - 1][curPos.X] + gapPenalty /* top */\n tl := dp[curPos.Y - 1][curPos.X - 1] + seqPair.WeightIn(wm, curPos.Y - 1, curPos.X - 1) /* top-left */\n\n max := util.Max4(0, l, t, tl)\n\n if max == 0 {\n break\n } else if max == l {\n s1Builder.WriteByte('-')\n s2Builder.WriteByte(seqPair.S2[curPos.X - 1])\n curPos.X -= 1\n } else if max == t {\n s1Builder.WriteByte(seqPair.S1[curPos.Y - 1])\n s2Builder.WriteByte('-')\n curPos.Y -= 1\n } else { /* max == tl */\n s1Builder.WriteByte(seqPair.S1[curPos.Y - 1])\n s2Builder.WriteByte(seqPair.S2[curPos.X - 1])\n curPos.X -= 1\n curPos.Y -= 1\n }\n }\n\n for curPos.X > 0 {\n s1Builder.WriteByte('-')\n s2Builder.WriteByte(seqPair.S2[curPos.X - 1])\n curPos.X -= 1\n }\n\n for curPos.Y > 0 {\n s1Builder.WriteByte(seqPair.S1[curPos.Y - 1])\n s2Builder.WriteByte('-')\n curPos.Y -= 1\n }\n\n s1Align := util.ReverseString(s1Builder.String())\n s2Align := util.ReverseString(s2Builder.String())\n\n s1AlignColored := strings.Builder{}\n s2AlignColored := strings.Builder{}\n\n for i := range s1Align {\n if s1Align[i] == '-' {\n s1AlignColored.WriteString(util.Colorify(\"-\", util.ColorBlue))\n s2AlignColored.WriteByte(s2Align[i])\n } else if s2Align[i] == '-' {\n s1AlignColored.WriteByte(s1Align[i])\n s2AlignColored.WriteString(util.Colorify(\"-\", util.ColorBlue))\n } else if s1Align[i] == s2Align[i] {\n s1AlignColored.WriteString(util.Colorify(string(s1Align[i]), util.ColorGreen))\n s2AlignColored.WriteString(util.Colorify(string(s2Align[i]), util.ColorGreen))\n } else {\n s1AlignColored.WriteString(util.Colorify(string(s1Align[i]), util.ColorRed))\n s2AlignColored.WriteString(util.Colorify(string(s2Align[i]), util.ColorRed))\n }\n }\n\n // Return result\n return &AlignResult{\n Score: maxScore,\n Align: s1AlignColored.String() + \"\\n\" + s2AlignColored.String(),\n }\n}", "func canMakePaliQueries(s string, queries [][]int) []bool {\n\tn := len(queries)\n\n\tcnt := make([]int, 1, n+1)\n\tc := 0\n\tfor _, l := range s {\n\t\tc ^= 1 << uint(l-'a')\n\t\tcnt = append(cnt, c)\n\t}\n\n\tres := make([]bool, n)\n\tfor i, q := range queries {\n\t\tlo, hi, k := q[0], q[1], q[2]\n\t\tif k >= 13 {\n\t\t\tres[i] = true\n\t\t\tcontinue\n\t\t}\n\t\tremains := bits(cnt[hi+1] ^ cnt[lo])\n\t\tres[i] = remains/2 <= k\n\t}\n\n\treturn res\n}", "func pkcs7Pad(b []byte, blocksize int) ([]byte, error) {\n\tif blocksize <= 0 {\n\t\treturn nil, fmt.Errorf(\"invalid block size\")\n\t}\n\tif b == nil || len(b) == 0 {\n\t\treturn nil, fmt.Errorf(\"invalid pkcs7 data format\")\n\n\t}\n\tn := blocksize - (len(b) % blocksize)\n\tpb := make([]byte, len(b)+n)\n\tcopy(pb, b)\n\tcopy(pb[len(b):], bytes.Repeat([]byte{byte(n)}, n))\n\treturn pb, nil\n}", "func (n Disk) partitionsMisaligned() bool {\n\tfor _, p := range n.Partitions {\n\t\tif (p.Start & (2048 - 1)) != 0 {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func encodeBlock(dst, src []byte) (d int) {\n\tif len(src) < minNonLiteralBlockSize {\n\t\treturn 0\n\t}\n\treturn encodeBlockGo(dst, src)\n}", "func isValidBlock(newBlock, oldBlock Block) bool {\n\tif oldBlock.Index+1 != newBlock.Index {\n\t\treturn false\n\t}\n\n\tif oldBlock.Hash != newBlock.PrevHash {\n\t\treturn false\n\t}\n\n\tif calculateBlockHash(newBlock) != newBlock.Hash {\n\t\treturn false\n\t}\n\n\treturn true\n}", "func (ss *statSegmentV2) adjust(data unsafe.Pointer) unsafe.Pointer {\n\theader := ss.loadSharedHeader(ss.sharedHeader)\n\tadjusted := unsafe.Pointer(uintptr(unsafe.Pointer(&ss.sharedHeader[0])) +\n\t\tuintptr(*(*uint64)(data)) - uintptr(*(*uint64)(unsafe.Pointer(&header.base))))\n\tif uintptr(unsafe.Pointer(&ss.sharedHeader[len(ss.sharedHeader)-1])) <= uintptr(adjusted) ||\n\t\tuintptr(unsafe.Pointer(&ss.sharedHeader[0])) >= uintptr(adjusted) {\n\t\treturn nil\n\t}\n\treturn adjusted\n}", "func Encode(dst, src []byte) (compressedSize int, error error) {\n\tif len(src) >= MaxInputSize {\n\t\treturn 0, ErrTooLarge\n\t}\n\n\tif n := CompressBound(len(src)); len(dst) < n {\n\t\treturn 0, ErrEncodeTooSmall\n\t}\n\n\thashTable := hashPool.Get().([]uint32)\n\tfor i := range hashTable {\n\t\thashTable[i] = 0\n\t}\n\te := encoder{src: src, dst: dst, hashTable: hashTable}\n\tdefer func() {\n\t\thashPool.Put(hashTable)\n\t}()\n\t// binary.LittleEndian.PutUint32(dst, uint32(len(src)))\n\t// e.dpos = 0\n\n\tvar (\n\t\tstep uint32 = 1\n\t\tlimit = incompressible\n\t)\n\n\tfor {\n\t\tif int(e.pos)+12 >= len(e.src) {\n\t\t\te.writeLiterals(uint32(len(e.src))-e.anchor, 0, e.anchor)\n\t\t\treturn int(e.dpos), nil\n\t\t}\n\n\t\tsequence := uint32(e.src[e.pos+3])<<24 | uint32(e.src[e.pos+2])<<16 | uint32(e.src[e.pos+1])<<8 | uint32(e.src[e.pos+0])\n\n\t\thash := (sequence * 2654435761) >> hashShift\n\t\tref := e.hashTable[hash] + uninitHash\n\t\te.hashTable[hash] = e.pos - uninitHash\n\n\t\tif ((e.pos-ref)>>16) != 0 || uint32(e.src[ref+3])<<24|uint32(e.src[ref+2])<<16|uint32(e.src[ref+1])<<8|uint32(e.src[ref+0]) != sequence {\n\t\t\tif e.pos-e.anchor > limit {\n\t\t\t\tlimit <<= 1\n\t\t\t\tstep += 1 + (step >> 2)\n\t\t\t}\n\t\t\te.pos += step\n\t\t\tcontinue\n\t\t}\n\n\t\tif step > 1 {\n\t\t\te.hashTable[hash] = ref - uninitHash\n\t\t\te.pos -= step - 1\n\t\t\tstep = 1\n\t\t\tcontinue\n\t\t}\n\t\tlimit = incompressible\n\n\t\tln := e.pos - e.anchor\n\t\tback := e.pos - ref\n\n\t\tanchor := e.anchor\n\n\t\te.pos += minMatch\n\t\tref += minMatch\n\t\te.anchor = e.pos\n\n\t\tfor int(e.pos) < len(e.src)-5 && e.src[e.pos] == e.src[ref] {\n\t\t\te.pos++\n\t\t\tref++\n\t\t}\n\n\t\tmlLen := e.pos - e.anchor\n\n\t\te.writeLiterals(ln, mlLen, anchor)\n\t\te.dst[e.dpos] = uint8(back)\n\t\te.dst[e.dpos+1] = uint8(back >> 8)\n\t\te.dpos += 2\n\n\t\tif mlLen > mlMask-1 {\n\t\t\tmlLen -= mlMask\n\t\t\tfor mlLen > 254 {\n\t\t\t\tmlLen -= 255\n\n\t\t\t\te.dst[e.dpos] = 255\n\t\t\t\te.dpos++\n\t\t\t}\n\n\t\t\te.dst[e.dpos] = byte(mlLen)\n\t\t\te.dpos++\n\t\t}\n\n\t\te.anchor = e.pos\n\t}\n}", "func (g *GroupedAVP) Padding() int {\n\treturn 0\n}", "func TestNewPacketFrom(t *testing.T) {\n\n\tt.Log(\"Start TestNewPacketFrom +++++++++++++\")\n\t// Get those go-to queries\n\ttcases := tmake()\n\n\tfor _, tcase := range tcases {\n\t\tt.Log(\"Testing for: \", tcase.Serialized)\n\t\tns := NewMySQLPacketFrom(0, tcase.ns.Serialized[HEADER_SIZE+1:])\n\t\tif ns.Length != tcase.ns.Length {\n\t\t\tt.Log(\"Length expected\", tcase.ns.Length, \"instead got\", ns.Length)\n\t\t}\n\t\tif ns.Sqid != tcase.ns.Sqid {\n\t\t\tt.Log(\"Length expected\", tcase.ns.Sqid, \"instead got\", ns.Sqid)\n\t\t}\n\t\tif ns.Cmd != tcase.ns.Cmd {\n\t\t\tt.Log(\"Command expected\", tcase.ns.Cmd, \"instead got\", ns.Cmd)\n\t\t\tt.Fail()\n\t\t}\n\t\tif !reflect.DeepEqual(ns.Serialized, tcase.ns.Serialized) {\n\t\t\tt.Log(\"Serialized expected\", tcase.ns.Serialized, \"instead got\", ns.Serialized)\n\t\t\tt.Fail()\n\t\t}\n\t\tt.Log(\"Done testing for: \", tcase.Serialized)\n\t}\n\n\tt.Log(\"End TestNewPacketFrom +++++++++++++\")\n\n}", "func (dec *XMASDecoder) CheckAtPosition(pos int) (bool, error) {\n\tidx := pos + dec.PreambleSize\n\ttarget := dec.Stream[idx]\n\n\tif pos < 0 || idx >= len(dec.Stream) {\n\t\treturn false, errors.New(\"out of bounds\")\n\t}\n\n\tbeginIdx := idx - dec.PreambleSize\n\tfinalIdx := idx - 1\n\n\tpass := false\n\tfor i := beginIdx; i <= finalIdx; i++ {\n\t\touterVal := dec.Stream[i]\n\t\tfor j := beginIdx; j <= finalIdx; j++ {\n\t\t\tif i == j {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tinnerVal := dec.Stream[j]\n\n\t\t\tif outerVal+innerVal == target {\n\t\t\t\tpass = true\n\t\t\t}\n\t\t}\n\t}\n\n\treturn pass, nil\n}", "func isDiffVarintSnappyStreamedEncodedPostings(input []byte) bool {\n\treturn bytes.HasPrefix(input, []byte(codecHeaderStreamedSnappy))\n}", "func (a *ArrayDataSlab) CanLendToLeft(size uint32) bool {\n\tif len(a.elements) == 0 {\n\t\t// TODO return EmptyDataSlabError\n\t\tpanic(fmt.Sprintf(\"empty data slab %d\", a.header.id))\n\t}\n\tif len(a.elements) < 2 {\n\t\treturn false\n\t}\n\tif a.header.size-size < uint32(minThreshold) {\n\t\treturn false\n\t}\n\tlendSize := uint32(0)\n\tfor i := 0; i < len(a.elements); i++ {\n\t\tlendSize += a.elements[i].ByteSize()\n\t\tif a.header.size-lendSize < uint32(minThreshold) {\n\t\t\treturn false\n\t\t}\n\t\tif lendSize >= size {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func PackedNibbleLookup(dst, src []byte, tablePtr *[16]byte) {\n\t// This takes ~15% longer than the unsafe function on the short-array\n\t// benchmark.\n\tdstLen := len(dst)\n\tnSrcFullByte := dstLen >> 1\n\tsrcOdd := dstLen & 1\n\tif len(src) != nSrcFullByte+srcOdd {\n\t\tpanic(\"PackedNibbleLookup() requires len(src) == (len(dst) + 1) / 2.\")\n\t}\n\tif nSrcFullByte < 16 {\n\t\tfor srcPos := 0; srcPos < nSrcFullByte; srcPos++ {\n\t\t\tsrcByte := src[srcPos]\n\t\t\tdst[2*srcPos] = tablePtr[srcByte&15]\n\t\t\tdst[2*srcPos+1] = tablePtr[srcByte>>4]\n\t\t}\n\t} else {\n\t\tsrcHeader := (*reflect.SliceHeader)(unsafe.Pointer(&src))\n\t\tdstHeader := (*reflect.SliceHeader)(unsafe.Pointer(&dst))\n\t\tpackedNibbleLookupOddSSSE3Asm(unsafe.Pointer(dstHeader.Data), unsafe.Pointer(srcHeader.Data), unsafe.Pointer(tablePtr), nSrcFullByte)\n\t}\n\tif srcOdd == 1 {\n\t\tsrcByte := src[nSrcFullByte]\n\t\tdst[2*nSrcFullByte] = tablePtr[srcByte&15]\n\t}\n}" ]
[ "0.625556", "0.5852425", "0.5724083", "0.5428828", "0.526153", "0.51679826", "0.51656187", "0.5112957", "0.505683", "0.48105946", "0.4784534", "0.4780916", "0.47005382", "0.46983063", "0.4685418", "0.466283", "0.46618646", "0.4618533", "0.45821616", "0.45800024", "0.45759624", "0.45676714", "0.4561276", "0.45587534", "0.4550772", "0.45343986", "0.44959575", "0.44945973", "0.4491839", "0.44644785", "0.4462923", "0.44489175", "0.44439232", "0.44433558", "0.444153", "0.44390145", "0.44374076", "0.44351593", "0.44315675", "0.44294444", "0.44220218", "0.44204494", "0.44198596", "0.44096306", "0.44081265", "0.440228", "0.43925664", "0.4391132", "0.4389216", "0.43736815", "0.43697414", "0.43666705", "0.43557122", "0.43457636", "0.43418494", "0.43405718", "0.4335573", "0.43324137", "0.4332349", "0.4330549", "0.43270376", "0.4326864", "0.4321924", "0.43198034", "0.43146905", "0.43058747", "0.43032646", "0.43004116", "0.4299808", "0.4299005", "0.42980802", "0.42948124", "0.42939413", "0.42915207", "0.42879894", "0.428731", "0.42869425", "0.42818016", "0.42798343", "0.42728284", "0.4261773", "0.4251763", "0.42493764", "0.423462", "0.42334384", "0.42273766", "0.42244604", "0.4222694", "0.42212653", "0.4221005", "0.4217038", "0.42142728", "0.42116982", "0.42109743", "0.4207406", "0.42065033", "0.42036697", "0.41987336", "0.41970855", "0.41959918" ]
0.60605675
1
Check that we correctly pad a compressed query to the nearest block.
func TestAddEdnsPaddingCompressedQuery(t *testing.T) { if len(compressedQueryBytes)%PaddingBlockSize == 0 { t.Errorf("compressedQueryBytes does not require padding, so this test is invalid") } padded, err := AddEdnsPadding(compressedQueryBytes) if err != nil { panic(err) } if len(padded)%PaddingBlockSize != 0 { t.Errorf("AddEdnsPadding failed to correctly pad compressed query") } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func TestDnsMessageCompressedQueryConfidenceCheck(t *testing.T) {\n\tm := mustUnpack(compressedQueryBytes)\n\tpackedBytes := mustPack(m)\n\tif len(packedBytes) != len(compressedQueryBytes) {\n\t\tt.Errorf(\"Packed query has different size than original:\\n %v\\n %v\", packedBytes, compressedQueryBytes)\n\t}\n}", "func TestDnsMessageUncompressedQueryConfidenceCheck(t *testing.T) {\n\tm := mustUnpack(uncompressedQueryBytes)\n\tpackedBytes := mustPack(m)\n\tif len(packedBytes) >= len(uncompressedQueryBytes) {\n\t\tt.Errorf(\"Compressed query is not smaller than uncompressed query\")\n\t}\n}", "func TestAddEdnsPaddingCompressedPaddedQuery(t *testing.T) {\n\tpaddedQuery := simpleQuery\n\tpaddedQuery.Additionals = make([]dnsmessage.Resource, len(simpleQuery.Additionals))\n\tcopy(paddedQuery.Additionals, simpleQuery.Additionals)\n\n\tpaddedQuery.Additionals = append(paddedQuery.Additionals,\n\t\tdnsmessage.Resource{\n\t\t\tHeader: dnsmessage.ResourceHeader{\n\t\t\t\tName: dnsmessage.MustNewName(\".\"),\n\t\t\t\tClass: dnsmessage.ClassINET,\n\t\t\t\tTTL: 0,\n\t\t\t},\n\t\t\tBody: &dnsmessage.OPTResource{\n\t\t\t\tOptions: []dnsmessage.Option{\n\t\t\t\t\t{\n\t\t\t\t\t\tCode: OptResourcePaddingCode,\n\t\t\t\t\t\tData: make([]byte, 5),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t)\n\toriginalOnWire := mustPack(&paddedQuery)\n\n\tpaddedOnWire, err := AddEdnsPadding(mustPack(&paddedQuery))\n\tif err != nil {\n\t\tt.Errorf(\"Failed to pad padded query: %v\", err)\n\t}\n\n\tif !bytes.Equal(originalOnWire, paddedOnWire) {\n\t\tt.Errorf(\"AddEdnsPadding tampered with a query that was already padded\")\n\t}\n}", "func TestAddEdnsPaddingUncompressedQuery(t *testing.T) {\n\tif len(uncompressedQueryBytes)%PaddingBlockSize == 0 {\n\t\tt.Errorf(\"uncompressedQueryBytes does not require padding, so this test is invalid\")\n\t}\n\tpadded, err := AddEdnsPadding(uncompressedQueryBytes)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif len(padded)%PaddingBlockSize != 0 {\n\t\tt.Errorf(\"AddEdnsPadding failed to correctly pad uncompressed query\")\n\t}\n}", "func TestAddEdnsPaddingCompressedOptQuery(t *testing.T) {\n\toptQuery := simpleQuery\n\toptQuery.Additionals = make([]dnsmessage.Resource, len(simpleQuery.Additionals))\n\tcopy(optQuery.Additionals, simpleQuery.Additionals)\n\n\toptQuery.Additionals = append(optQuery.Additionals,\n\t\tdnsmessage.Resource{\n\t\t\tHeader: dnsmessage.ResourceHeader{\n\t\t\t\tName: dnsmessage.MustNewName(\".\"),\n\t\t\t\tClass: dnsmessage.ClassINET,\n\t\t\t\tTTL: 0,\n\t\t\t},\n\t\t\tBody: &dnsmessage.OPTResource{\n\t\t\t\tOptions: []dnsmessage.Option{},\n\t\t\t},\n\t\t},\n\t)\n\tpaddedOnWire, err := AddEdnsPadding(mustPack(&optQuery))\n\tif err != nil {\n\t\tt.Errorf(\"Failed to pad query with OPT but no padding: %v\", err)\n\t}\n\tif len(paddedOnWire)%PaddingBlockSize != 0 {\n\t\tt.Errorf(\"AddEdnsPadding failed to correctly pad query with OPT but no padding\")\n\t}\n}", "func (b *blockEnc) matchOffset(offset, lits uint32) uint32 {\n\t// Check if offset is one of the recent offsets.\n\t// Adjusts the output offset accordingly.\n\t// Gives a tiny bit of compression, typically around 1%.\n\tif true {\n\t\tif lits > 0 {\n\t\t\tswitch offset {\n\t\t\tcase b.recentOffsets[0]:\n\t\t\t\toffset = 1\n\t\t\tcase b.recentOffsets[1]:\n\t\t\t\tb.recentOffsets[1] = b.recentOffsets[0]\n\t\t\t\tb.recentOffsets[0] = offset\n\t\t\t\toffset = 2\n\t\t\tcase b.recentOffsets[2]:\n\t\t\t\tb.recentOffsets[2] = b.recentOffsets[1]\n\t\t\t\tb.recentOffsets[1] = b.recentOffsets[0]\n\t\t\t\tb.recentOffsets[0] = offset\n\t\t\t\toffset = 3\n\t\t\tdefault:\n\t\t\t\tb.recentOffsets[2] = b.recentOffsets[1]\n\t\t\t\tb.recentOffsets[1] = b.recentOffsets[0]\n\t\t\t\tb.recentOffsets[0] = offset\n\t\t\t\toffset += 3\n\t\t\t}\n\t\t} else {\n\t\t\tswitch offset {\n\t\t\tcase b.recentOffsets[1]:\n\t\t\t\tb.recentOffsets[1] = b.recentOffsets[0]\n\t\t\t\tb.recentOffsets[0] = offset\n\t\t\t\toffset = 1\n\t\t\tcase b.recentOffsets[2]:\n\t\t\t\tb.recentOffsets[2] = b.recentOffsets[1]\n\t\t\t\tb.recentOffsets[1] = b.recentOffsets[0]\n\t\t\t\tb.recentOffsets[0] = offset\n\t\t\t\toffset = 2\n\t\t\tcase b.recentOffsets[0] - 1:\n\t\t\t\tb.recentOffsets[2] = b.recentOffsets[1]\n\t\t\t\tb.recentOffsets[1] = b.recentOffsets[0]\n\t\t\t\tb.recentOffsets[0] = offset\n\t\t\t\toffset = 3\n\t\t\tdefault:\n\t\t\t\tb.recentOffsets[2] = b.recentOffsets[1]\n\t\t\t\tb.recentOffsets[1] = b.recentOffsets[0]\n\t\t\t\tb.recentOffsets[0] = offset\n\t\t\t\toffset += 3\n\t\t\t}\n\t\t}\n\t} else {\n\t\toffset += 3\n\t}\n\treturn offset\n}", "func canonicalPadding(b []byte) error {\n\tswitch {\n\tcase b[0]&0x80 == 0x80:\n\t\treturn errNegativeValue\n\tcase len(b) > 1 && b[0] == 0x00 && b[1]&0x80 != 0x80:\n\t\treturn errExcessivelyPaddedValue\n\tdefault:\n\t\treturn nil\n\t}\n}", "func IsCompressed(id uint8) bool {\n\treturn id >= 20\n}", "func checksizeAndPad(plaintext []byte) []byte {\n\n\t// calculate modulus of plaintext to blowfish's cipher block size\n\t// if result is not 0, then we need to pad\n\n\tmodulus := len(plaintext) % blowfish.BlockSize\n\tif modulus != 0 {\n\t\t// calc bytes we need to pad to make plaintext a multiple of block size\n\t\tpadlen := blowfish.BlockSize - modulus\n\n\t\t// add required padding\n\t\tfor i := 0; i < padlen; i++ {\n\t\t\tplaintext = append(plaintext, 0)\n\t\t}\n\t}\n\n\treturn plaintext\n}", "func CompressSnappyBlock(src []byte, dst []byte) int {\n\tdstLen := len(dst)\n\tresult := snappy.Encode(dst, src)\n\tif len(result) > dstLen {\n\t\tpanic(\"dst length is too small\")\n\t}\n\treturn len(result)\n}", "func detectPrefixInfo() (int, int) {\n\t//this is simply a block of 32 A's\n\t// we will brute force a prefix length until we get two repeating blocks\n\tblockOfAs := []byte{65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65}\n\n\tfor len := 0; len < 16; len++ {\n\t\tprefix := make([]byte, len)\n\t\tfor i := range prefix {\n\t\t\tprefix[i] = 'A'\n\t\t}\n\t\tciphertext := blackBox(append(prefix, blockOfAs...))\n\t\tchunks, _ := common.Chunks(ciphertext, 16)\n\t\tif common.HasDuplicateBlocks(chunks) {\n\t\t\t// if we have two duplicate blocks, then we know how much data we need to prepend for block alignment\n\t\t\t// now we need to know which is the first block that has our data, so basically the index of the first repeating chunk\n\t\t\tfor blockNum := range chunks {\n\t\t\t\tvar left, right [16]byte\n\t\t\t\tcopy(left[:], chunks[blockNum][:16])\n\t\t\t\tcopy(right[:], chunks[blockNum+1][:16])\n\t\t\t\tif left == right {\n\t\t\t\t\treturn len, blockNum\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn -1, -1\n}", "func compressFragmentTwoPass(input []byte, input_size uint, is_last bool, command_buf []uint32, literal_buf []byte, table []int, table_size uint, storage_ix *uint, storage []byte) {\n\tvar initial_storage_ix uint = *storage_ix\n\tvar table_bits uint = uint(log2FloorNonZero(table_size))\n\tvar min_match uint\n\tif table_bits <= 15 {\n\t\tmin_match = 4\n\t} else {\n\t\tmin_match = 6\n\t}\n\tcompressFragmentTwoPassImpl(input, input_size, is_last, command_buf, literal_buf, table, table_bits, min_match, storage_ix, storage)\n\n\t/* If output is larger than single uncompressed block, rewrite it. */\n\tif *storage_ix-initial_storage_ix > 31+(input_size<<3) {\n\t\trewindBitPosition(initial_storage_ix, storage_ix, storage)\n\t\temitUncompressedMetaBlock(input, input_size, storage_ix, storage)\n\t}\n\n\tif is_last {\n\t\twriteBits(1, 1, storage_ix, storage) /* islast */\n\t\twriteBits(1, 1, storage_ix, storage) /* isempty */\n\t\t*storage_ix = (*storage_ix + 7) &^ 7\n\t}\n}", "func QEncodeIfNeeded(src []byte, offset int) (dst []byte) {\n\tsafe := true\n\tfor i, sl := 0, len(src); i < sl && safe; i++ {\n\t\tsafe = ' ' <= src[i] && src[i] <= '~'\n\t}\n\tif safe {\n\t\treturn src\n\t}\n\tdst, _ = QEncode(src, offset)\n\treturn dst\n}", "func blockPadding(offset int64) (n int64) {\n\treturn -offset & (blockSize - 1)\n}", "func encodeBlockSnappy(dst, src []byte) (d int) {\n\tif len(src) < minNonLiteralBlockSize {\n\t\treturn 0\n\t}\n\treturn encodeBlockSnappyGo(dst, src)\n}", "func isDiffVarintSnappyEncodedPostings(input []byte) bool {\n\treturn bytes.HasPrefix(input, []byte(codecHeaderSnappy))\n}", "func (w *Writer) checkAlign(n *node, start int, comma, cs []byte) bool {\n\tc := n.genTables(w.SEN)\n\tif c == nil || w.Width < start+c.size {\n\t\treturn true\n\t}\n\tfor i, m := range n.members {\n\t\tif 0 < i {\n\t\t\tw.buf = append(w.buf, comma...)\n\t\t}\n\t\tw.buf = append(w.buf, []byte(cs)...)\n\t\tswitch m.kind {\n\t\tcase arrayNode:\n\t\t\tw.alignArray(m, c, comma, cs)\n\t\tcase mapNode:\n\t\t\tw.alignMap(m, c, comma, cs)\n\t\t}\n\t}\n\treturn false\n}", "func queryBlockEnc(blockStr string, prefix int64, blockId int) QueryBlockCipher {\n\tvar ret QueryBlockCipher\n\texp := getHashedValue(blockStr, prefix, blockId) // get the hash value in power part of ciphertext\n\tsubIndex, _ := strconv.Atoi(new(big.Int).Mod(exp, big.NewInt(subIndexSize)).String()) // calculate the sub-index value (G_k mod subIndexSize)\n\tret.subIndex = uint8(subIndex)\n\n\t// generate the ciphertext\n\tret.cipher = exp.Bytes()\n\treturn ret\n}", "func (t Table) Pack() (C, V, r []int) {\n\trows := len(t)\n\ttotal := 0\n\tmaxCols := 0\n\n\t// Step 1 - count nonzero positions\n\tnz := make([][]int, rows)\n\tfor i, row := range t {\n\t\ttotal += len(row)\n\t\tif len(row) > maxCols {\n\t\t\tmaxCols = len(row)\n\t\t}\n\t\tfor j, val := range row {\n\t\t\tif val != 0 {\n\t\t\t\tnz[i] = append(nz[i], j)\n\t\t\t}\n\t\t}\n\t}\n\n\t// Step 2 - sort rows\n\tsorted := make([]int, rows)\n\tfor i := range sorted {\n\t\tsorted[i] = i\n\t}\n\tsort.Slice(sorted, func(i, j int) bool {\n\t\tx, y := sorted[i], sorted[j]\n\t\tlx, ly := len(nz[x]), len(nz[y])\n\t\treturn lx > ly || lx == ly && t.less(x, y)\n\t})\n\n\t// Step 3 - first-fit\n\tentry := NewBitSet(uint(total + 1))\n\tused := NewBitSet(uint(total + maxCols))\n\tmin := 0 // first position where entry[min] = false\n\tmax := -1 // last position where entry[max] = true\n\tr = make([]int, rows)\n\n\tprev := -1\n\tfor _, i := range sorted {\n\t\tif len(nz[i]) == 0 { // all zero row\n\t\t\tr[i] = max + 1\n\t\t\tcontinue\n\t\t}\n\t\tif prev >= 0 && len(nz[prev]) == len(nz[i]) && !t.less(prev, i) { // duplicate row\n\t\t\tr[i] = r[prev]\n\t\t\tcontinue\n\t\t}\n\t\tprev = i\n\t\tri := min - nz[i][0]\n\tcheck:\n\t\tif used.Test(uint(ri + maxCols)) {\n\t\t\tri++\n\t\t\tgoto check\n\t\t}\n\t\tfor _, j := range nz[i] {\n\t\t\tif entry.Test(uint(ri + j)) {\n\t\t\t\tri++\n\t\t\t\tgoto check\n\t\t\t}\n\t\t}\n\t\t// ri is ok\n\t\tused.Set(uint(ri + maxCols))\n\t\tfor _, j := range nz[i] {\n\t\t\tentry.Set(uint(ri + j))\n\t\t\tif ri+j > max {\n\t\t\t\tmax = ri + j\n\t\t\t}\n\t\t}\n\t\tfor entry.Test(uint(min)) {\n\t\t\tmin++\n\t\t}\n\t\tr[i] = ri\n\t}\n\n\t// Generate C, V\n\tC = make([]int, max+1)\n\tV = make([]int, max+1)\n\tfor i := range V {\n\t\tV[i] = -1\n\t}\n\tfor i, row := range t {\n\t\tri := r[i]\n\t\tfor _, j := range nz[i] {\n\t\t\tC[ri+j] = row[j]\n\t\t\tV[ri+j] = j\n\t\t}\n\t}\n\treturn C, V, r\n}", "func remainingPattern(inputIndex, inputSize, patternIndex, patternSize int) bool {\n\treturn inputIndex == inputSize && patternIndex < patternSize-1\n}", "func finalizeFragment(buf []byte, to util.File, toOffset int64, c Compressor) (raw int, compressed bool, err error) {\n\t// compress the block if needed\n\tif c != nil {\n\t\tout, err := c.compress(buf)\n\t\tif err != nil {\n\t\t\treturn 0, compressed, fmt.Errorf(\"error compressing fragment block: %v\", err)\n\t\t}\n\t\tif len(out) < len(buf) {\n\t\t\tbuf = out\n\t\t\tcompressed = true\n\t\t}\n\t}\n\tif _, err := to.WriteAt(buf, toOffset); err != nil {\n\t\treturn 0, compressed, err\n\t}\n\treturn len(buf), compressed, nil\n}", "func (enc *Encoding) decode(dst, src []byte) (n int, end bool, err error) {\n\t// Lift the nil check outside of the loop.\n\t_ = enc.decodeMap\n\n\tdsti := 0\n\tolen := len(src)\n\n\tfor len(src) > 0 && !end {\n\t\t// Decode quantum using the base32 alphabet\n\t\tvar dbuf [8]byte\n\t\tdlen := 8\n\n\t\tfor j := 0; j < 8; {\n\n\t\t\tif len(src) == 0 {\n\t\t\t\tif enc.padChar != NoPadding {\n\t\t\t\t\t// We have reached the end and are missing padding\n\t\t\t\t\treturn n, false, CorruptInputError(olen - len(src) - j)\n\t\t\t\t}\n\t\t\t\t// We have reached the end and are not expecting any padding\n\t\t\t\tdlen, end = j, true\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tin := src[0]\n\t\t\tsrc = src[1:]\n\t\t\tif in == byte(enc.padChar) && j >= 2 && len(src) < 8 {\n\t\t\t\t// We've reached the end and there's padding\n\t\t\t\tif len(src)+j < 8-1 {\n\t\t\t\t\t// not enough padding\n\t\t\t\t\treturn n, false, CorruptInputError(olen)\n\t\t\t\t}\n\t\t\t\tfor k := 0; k < 8-1-j; k++ {\n\t\t\t\t\tif len(src) > k && src[k] != byte(enc.padChar) {\n\t\t\t\t\t\t// incorrect padding\n\t\t\t\t\t\treturn n, false, CorruptInputError(olen - len(src) + k - 1)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tdlen, end = j, true\n\t\t\t\t// 7, 5 and 2 are not valid padding lengths, and so 1, 3 and 6 are not\n\t\t\t\t// valid dlen values. See RFC 4648 Section 6 \"Base 32 Encoding\" listing\n\t\t\t\t// the five valid padding lengths, and Section 9 \"Illustrations and\n\t\t\t\t// Examples\" for an illustration for how the 1st, 3rd and 6th base32\n\t\t\t\t// src bytes do not yield enough information to decode a dst byte.\n\t\t\t\tif dlen == 1 || dlen == 3 || dlen == 6 {\n\t\t\t\t\treturn n, false, CorruptInputError(olen - len(src) - 1)\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tdbuf[j] = enc.decodeMap[in]\n\t\t\tif dbuf[j] == 0xFF {\n\t\t\t\treturn n, false, CorruptInputError(olen - len(src) - 1)\n\t\t\t}\n\t\t\tj++\n\t\t}\n\n\t\t// Pack 8x 5-bit source blocks into 5 byte destination\n\t\t// quantum\n\t\tswitch dlen {\n\t\tcase 8:\n\t\t\tdst[dsti+4] = dbuf[6]<<5 | dbuf[7]\n\t\t\tn++\n\t\t\tfallthrough\n\t\tcase 7:\n\t\t\tdst[dsti+3] = dbuf[4]<<7 | dbuf[5]<<2 | dbuf[6]>>3\n\t\t\tn++\n\t\t\tfallthrough\n\t\tcase 5:\n\t\t\tdst[dsti+2] = dbuf[3]<<4 | dbuf[4]>>1\n\t\t\tn++\n\t\t\tfallthrough\n\t\tcase 4:\n\t\t\tdst[dsti+1] = dbuf[1]<<6 | dbuf[2]<<1 | dbuf[3]>>4\n\t\t\tn++\n\t\t\tfallthrough\n\t\tcase 2:\n\t\t\tdst[dsti+0] = dbuf[0]<<3 | dbuf[1]>>2\n\t\t\tn++\n\t\t}\n\t\tdsti += 5\n\t}\n\treturn n, end, nil\n}", "func isBlockValid(newBlock *Block, oldBlock *Block) bool {\n\tif oldBlock.Index+1 != newBlock.Index {\n\t\treturn false\n\t}\n\n\tif oldBlock.Hash != newBlock.PrevHash {\n\t\treturn false\n\t}\n\n\tif calculateHash(newBlock) != newBlock.Hash {\n\t\treturn false\n\t}\n\n\treturn true\n}", "func compressBlock(block *image.RGBA) []byte {\n\n\tvar minR, maxR, minG, maxG byte = 255, 0, 255, 0\n\tfor y := 0; y < 4; y++ {\n\t\tfor x := 0; x < 4; x++ {\n\t\t\tc := block.RGBAAt(x, y)\n\n\t\t\tif c.R < minR {\n\t\t\t\tminR = c.R\n\t\t\t}\n\t\t\tif c.R > maxR {\n\t\t\t\tmaxR = c.R\n\t\t\t}\n\n\t\t\tif c.G < minG {\n\t\t\t\tminG = c.G\n\t\t\t}\n\t\t\tif c.G > maxG {\n\t\t\t\tmaxG = c.G\n\t\t\t}\n\t\t}\n\t}\n\n\tpalR := generatePalette(normalize(minR), normalize(maxR))\n\tpalG := generatePalette(normalize(minG), normalize(maxG))\n\tnearest := func(pal [8]float64, v byte) byte {\n\t\tni := 0\n\t\tfor i := 0; i < 8; i++ {\n\t\t\tif math.Abs(pal[i]-normalize(v)) < math.Abs(pal[ni]-normalize(v)) {\n\t\t\t\tni = i\n\t\t\t}\n\t\t}\n\t\treturn byte(ni)\n\t}\n\n\t//Compare red and green values and select closest in palette\n\trIndexU, gIndexU := uint64(0), uint64(0)\n\tfor y := 0; y < 4; y++ {\n\t\tfor x := 0; x < 4; x++ {\n\t\t\tc := block.RGBAAt(x, y)\n\t\t\trIndexU = (rIndexU << 3) | uint64(nearest(palR, c.R))\n\t\t\tgIndexU = (gIndexU << 3) | uint64(nearest(palG, c.G))\n\t\t}\n\t}\n\n\trIxBytes, gIxBytes := make([]byte, 8), make([]byte, 8)\n\tbinary.BigEndian.PutUint64(rIxBytes, rIndexU)\n\tbinary.BigEndian.PutUint64(gIxBytes, gIndexU)\n\n\tblockBytes := make([]byte, 16)\n\tblockBytes[0] = denormalize(palR[0])\n\tblockBytes[1] = denormalize(palR[1])\n\tcopy(blockBytes[2:8], rIxBytes[2:8])\n\n\tblockBytes[8] = denormalize(palG[0])\n\tblockBytes[9] = denormalize(palG[1])\n\tcopy(blockBytes[10:], gIxBytes[2:8])\n\n\treturn blockBytes\n}", "func (q *BytesQueue) canInsertBeforeHead(need int) bool {\n\tif q.full {\n\t\treturn false\n\t}\n\tif q.tail >= q.head {\n\t\treturn q.head-leftMarginIndex == need || q.head-leftMarginIndex >= need+minimumHeaderSize\n\t}\n\treturn q.head-q.tail == need || q.head-q.tail >= need+minimumHeaderSize\n}", "func TestHiddenWithPK1(t *testing.T) {\n\tdefer testutils.AfterTest(t)()\n\ttestutils.EnsureNoLeak(t)\n\tctx := context.Background()\n\n\ttae := testutil.InitTestDB(ctx, ModuleName, t, nil)\n\tdefer tae.Close()\n\tschema := catalog.MockSchemaAll(13, 2)\n\tschema.BlockMaxRows = 10\n\tschema.SegmentMaxBlocks = 2\n\tbat := catalog.MockBatch(schema, int(schema.BlockMaxRows*4))\n\tdefer bat.Close()\n\tbats := bat.Split(10)\n\n\ttxn, _, rel := testutil.CreateRelationNoCommit(t, tae, testutil.DefaultTestDB, schema, true)\n\terr := rel.Append(context.Background(), bats[0])\n\t{\n\t\toffsets := make([]uint32, 0)\n\t\tit := rel.MakeBlockIt()\n\t\tfor it.Valid() {\n\t\t\tblk := it.GetBlock()\n\t\t\tview, err := blk.GetColumnDataById(context.Background(), schema.PhyAddrKey.Idx)\n\t\t\tassert.NoError(t, err)\n\t\t\tdefer view.Close()\n\t\t\tfp := blk.Fingerprint()\n\t\t\t_ = view.GetData().Foreach(func(v any, _ bool, _ int) (err error) {\n\t\t\t\trid := v.(types.Rowid)\n\t\t\t\tbid, offset := rid.Decode()\n\t\t\t\tt.Logf(\"bid=%s,offset=%d\", bid.String(), offset)\n\t\t\t\tassert.Equal(t, fp.BlockID, bid)\n\t\t\t\toffsets = append(offsets, offset)\n\t\t\t\treturn\n\t\t\t}, nil)\n\t\t\tit.Next()\n\t\t}\n\t\t// sort.Slice(offsets, func(i, j int) bool { return offsets[i] < offsets[j] })\n\t\t// assert.Equal(t, []uint32{0, 1, 2, 3}, offsets)\n\t}\n\tassert.NoError(t, err)\n\tassert.NoError(t, txn.Commit(context.Background()))\n\n\ttxn, rel = testutil.GetDefaultRelation(t, tae, schema.Name)\n\t{\n\t\tblk := testutil.GetOneBlock(rel)\n\t\tview, err := blk.GetColumnDataByName(context.Background(), catalog.PhyAddrColumnName)\n\t\tassert.NoError(t, err)\n\t\tdefer view.Close()\n\t\toffsets := make([]uint32, 0)\n\t\tfp := blk.Fingerprint()\n\t\tt.Log(fp.String())\n\t\t_ = view.GetData().Foreach(func(v any, _ bool, _ int) (err error) {\n\t\t\trid := v.(types.Rowid)\n\t\t\tbid, offset := rid.Decode()\n\t\t\tt.Logf(\",bid=%s,offset=%d\", bid, offset)\n\t\t\tassert.Equal(t, fp.BlockID, bid)\n\t\t\toffsets = append(offsets, offset)\n\t\t\treturn\n\t\t}, nil)\n\t\tsort.Slice(offsets, func(i, j int) bool { return offsets[i] < offsets[j] })\n\t\tassert.Equal(t, []uint32{0, 1, 2, 3}, offsets)\n\t}\n\n\tassert.NoError(t, err)\n\tassert.NoError(t, txn.Commit(context.Background()))\n\n\ttxn, rel = testutil.GetDefaultRelation(t, tae, schema.Name)\n\terr = rel.Append(context.Background(), bats[1])\n\tassert.NoError(t, err)\n\terr = rel.Append(context.Background(), bats[2])\n\tassert.NoError(t, err)\n\terr = rel.Append(context.Background(), bats[3])\n\tassert.NoError(t, err)\n\terr = rel.Append(context.Background(), bats[4])\n\tassert.NoError(t, err)\n\terr = rel.Append(context.Background(), bats[5])\n\tassert.NoError(t, err)\n\tassert.NoError(t, txn.Commit(context.Background()))\n\n\ttestutil.CompactBlocks(t, 0, tae, \"db\", schema, false)\n\n\ttxn, rel = testutil.GetDefaultRelation(t, tae, schema.Name)\n\tvar segMeta *catalog.SegmentEntry\n\t{\n\t\tit := rel.MakeBlockIt()\n\t\tfor it.Valid() {\n\t\t\tblk := it.GetBlock()\n\t\t\tview, err := blk.GetColumnDataByName(context.Background(), catalog.PhyAddrColumnName)\n\t\t\tassert.NoError(t, err)\n\t\t\tdefer view.Close()\n\t\t\toffsets := make([]uint32, 0)\n\t\t\tmeta := blk.GetMeta().(*catalog.BlockEntry)\n\t\t\tt.Log(meta.String())\n\t\t\t_ = view.GetData().Foreach(func(v any, _ bool, _ int) (err error) {\n\t\t\t\trid := v.(types.Rowid)\n\t\t\t\tbid, offset := rid.Decode()\n\t\t\t\t// t.Logf(\"sid=%d,bid=%d,offset=%d\", sid, bid, offset)\n\t\t\t\tassert.Equal(t, meta.ID, bid)\n\t\t\t\toffsets = append(offsets, offset)\n\t\t\t\treturn\n\t\t\t}, nil)\n\t\t\tsort.Slice(offsets, func(i, j int) bool { return offsets[i] < offsets[j] })\n\t\t\tif meta.IsAppendable() {\n\t\t\t\tassert.Equal(t, []uint32{0, 1, 2, 3}, offsets)\n\t\t\t} else {\n\t\t\t\tsegMeta = meta.GetSegment()\n\t\t\t\tassert.Equal(t, []uint32{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, offsets)\n\t\t\t}\n\t\t\tit.Next()\n\t\t}\n\t}\n\n\tassert.NoError(t, txn.Commit(context.Background()))\n\t{\n\t\tseg := segMeta.GetSegmentData()\n\t\tfactory, taskType, scopes, err := seg.BuildCompactionTaskFactory()\n\t\tassert.NoError(t, err)\n\t\ttask, err := tae.Runtime.Scheduler.ScheduleMultiScopedTxnTask(tasks.WaitableCtx, taskType, scopes, factory)\n\t\tassert.NoError(t, err)\n\t\terr = task.WaitDone()\n\t\tassert.NoError(t, err)\n\t}\n\n\ttxn, rel = testutil.GetDefaultRelation(t, tae, schema.Name)\n\t{\n\t\tit := rel.MakeBlockIt()\n\t\tfor it.Valid() {\n\t\t\tblk := it.GetBlock()\n\t\t\tview, err := blk.GetColumnDataByName(context.Background(), catalog.PhyAddrColumnName)\n\t\t\tassert.NoError(t, err)\n\t\t\tdefer view.Close()\n\t\t\toffsets := make([]uint32, 0)\n\t\t\tmeta := blk.GetMeta().(*catalog.BlockEntry)\n\t\t\tt.Log(meta.String())\n\t\t\tt.Log(meta.GetSegment().String())\n\t\t\t_ = view.GetData().Foreach(func(v any, _ bool, _ int) (err error) {\n\t\t\t\trid := v.(types.Rowid)\n\t\t\t\tbid, offset := rid.Decode()\n\t\t\t\t// t.Logf(\"sid=%d,bid=%d,offset=%d\", sid, bid, offset)\n\t\t\t\tassert.Equal(t, meta.ID, bid)\n\t\t\t\toffsets = append(offsets, offset)\n\t\t\t\treturn\n\t\t\t}, nil)\n\t\t\tsort.Slice(offsets, func(i, j int) bool { return offsets[i] < offsets[j] })\n\t\t\tif meta.IsAppendable() {\n\t\t\t\tassert.Equal(t, []uint32{0, 1, 2, 3}, offsets)\n\t\t\t} else {\n\t\t\t\tassert.Equal(t, []uint32{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, offsets)\n\t\t\t}\n\t\t\tit.Next()\n\t\t}\n\t}\n\n\tassert.NoError(t, txn.Commit(context.Background()))\n\tt.Log(tae.Catalog.SimplePPString(common.PPL1))\n}", "func (c *Codec) tryMatch(idx int, prefix, ol oligo.Oligo, olen int, mdblks []uint64, data [][]byte, difficulty int) (err int) {\n\terr = -1\n\tdata[idx] = nil\n\n\t// Do a common sense check before we even try to match:\n\t// we can have up to (blknum-idx)*(Nerrdata + 1) insertions or deletions for\n\t// the rest of the oligo. If the difference between olen and the actual\n\t// oligo length is bigger, there is no point of going that route, we'll fail anyway\n\t// This is very conservative and will probably easily pass in the first recursion\n\t// steps, but it may save us some time once we get deeper.\n\td := ol.Len() - olen\n\tif d < 0 {\n\t\td = -d\n\t}\n\n\tif d > (c.blknum - idx)*(Nerrdata + Nerrmd) {\n\t\treturn -1\n\t}\n\n\t// try without errors\n\terr = c.tryMd(idx, ol.Slice(13, 17), ol.Slice(17, 0), olen - 17, mdblks, data, difficulty)\n\tif err >= 0 {\n\t\t// we try to decode the data block only if we didn't assume it have errors\n\t\tif ol.Len() < 17 {\n\t\t\t// if the block is too short, don't even try to decode it\n\t\t\treturn\n\t\t}\n\n\t\tv, errr := l0.Decode(prefix, ol.Slice(0, 17), c.crit)\n\t\tif errr != nil {\n\t\t\treturn\n\t\t}\n\n\t\tpbit := int(v & 1)\n\t\tv >>= 1\n\n\t\tvar parityok bool\n\t\tif PARITY_BUG {\n\t\t\tparityok = (v + uint64(pbit)) % 2 == 0\n\t\t} else {\n\t\t\tparityok = (bits.OnesCount64(v) + pbit) % 2 == 0\n\t\t}\n\n\t\tif parityok {\n\t\t\td := make([]byte, 4)\n\t\t\td[0] = byte(v)\n\t\t\td[1] = byte(v >> 8)\n\t\t\td[2] = byte(v >> 16)\n\t\t\td[3] = byte(v >> 24)\n\t\t\tdata[idx] = d\n\t\t}\n\n\t\treturn\n\t}\n\n\t// iterate through all possible errors\n\tfor derr := 1; derr < Nerrdata; derr++ {\n\t\t// data deletes\n\t\t// (we assume that the deletes are not in the last derr nts)\n\t\t// TODO: We should calculate the prefix correctly by assuming there are\n\t\t// errors\n\t\tprefix := ol.Slice(13 - derr, 17 - derr)\n\t\terr = c.tryMd(idx, prefix, ol.Slice(17 - derr, 0), olen - 17, mdblks, data, difficulty)\n\t\tif err >= 0 {\n\t\t\terr += derr\n\t\t\tbreak\n\t\t}\n\n\t\t// data inserts\n\t\t// TODO: We should calculate the prefix correctly by assuming there are\n\t\t// errors\n\t\tprefix = ol.Slice(13 + derr, 17 + derr)\n\t\terr = c.tryMd(idx, prefix, ol.Slice(17 + derr, 0), olen - 17, mdblks, data, difficulty)\n\t\tif err >= 0 {\n\t\t\terr += derr\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn\n}", "func (bd *BlockDAG) checkLayerGap(parents []*hash.Hash) bool {\n\tif len(parents) == 0 {\n\t\treturn false\n\t}\n\tparentsNode := []IBlock{}\n\tfor _, v := range parents {\n\t\tib := bd.getBlock(v)\n\t\tif ib == nil {\n\t\t\treturn false\n\t\t}\n\t\tparentsNode = append(parentsNode, ib)\n\t}\n\n\tpLen := len(parentsNode)\n\tif pLen == 0 {\n\t\treturn false\n\t}\n\tvar gap float64\n\tif pLen == 1 {\n\t\treturn true\n\t} else if pLen == 2 {\n\t\tgap = math.Abs(float64(parentsNode[0].GetLayer()) - float64(parentsNode[1].GetLayer()))\n\t} else {\n\t\tvar minLayer int64 = -1\n\t\tvar maxLayer int64 = -1\n\t\tfor i := 0; i < pLen; i++ {\n\t\t\tparentLayer := int64(parentsNode[i].GetLayer())\n\t\t\tif maxLayer == -1 || parentLayer > maxLayer {\n\t\t\t\tmaxLayer = parentLayer\n\t\t\t}\n\t\t\tif minLayer == -1 || parentLayer < minLayer {\n\t\t\t\tminLayer = parentLayer\n\t\t\t}\n\t\t}\n\t\tgap = math.Abs(float64(maxLayer) - float64(minLayer))\n\t}\n\tif gap > MaxTipLayerGap {\n\t\tlog.Error(fmt.Sprintf(\"Parents gap is %f which is more than %d\", gap, MaxTipLayerGap))\n\t\treturn false\n\t}\n\n\treturn true\n}", "func isBlockValid(newBlock, oldBlock Block) bool {\n\tif oldBlock.Index+1 != newBlock.Index {\n\t\treturn false\n\t}\n\n\tif oldBlock.Hash != newBlock.PrevHash {\n\t\treturn false\n\t}\n\n\tif calculateHash(newBlock) != newBlock.Hash {\n\t\treturn false\n\t}\n\n\treturn true\n}", "func ShouldBeCompressed(recoveryFlag int) bool {\n\treturn recoveryFlag >= 31\n}", "func (dc *Decompressor) tryMergeBlocks(ctx context.Context, ch <-chan *blockDesc, min *blockDesc) bool {\n\t// wait for the second consecutive block.\n\tfor {\n\t\tfor len(*dc.heap) < 1 {\n\t\t\tselect {\n\t\t\tcase block, ok := <-ch:\n\t\t\t\tif !ok {\n\t\t\t\t\t// channel has been closed.\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t\theap.Push(dc.heap, block)\n\t\t\tcase <-ctx.Done():\n\t\t\t\terr := ctx.Err()\n\t\t\t\tdc.trace(\"tryMergeBlocks: %v\", err)\n\t\t\t\tdc.pwr.CloseWithError(err)\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\tif (*dc.heap)[0].order == min.order+1 {\n\t\t\tbreak\n\t\t}\n\t}\n\tnext := (*dc.heap)[0]\n\tbwr := &bitstream.BitWriter{}\n\t// Note that the first block has an offset in the first byte and a size in\n\t// bits and hence need the sum of those to accurately reflect the size of\n\t// the first block in terms of appending to it.\n\tbwr.Init(min.Data, min.SizeInBits+min.BitOffset, len(min.Data)+len(next.Data)+len(blockMagic)+1)\n\tbwr.Append(blockMagic[:], 0, len(blockMagic)*8)\n\tbwr.Append(next.Data, next.BitOffset, next.SizeInBits)\n\tmin.Data, min.SizeInBits = bwr.Data()\n\n\tmin.decompress()\n\tif min.err != nil {\n\t\treturn false\n\t}\n\t// The merge succeeded, remove the block that was merged from the heap.\n\theap.Remove(dc.heap, 0)\n\treturn true\n\n}", "func isBlockValid(newBlock, oldBlock Block) bool {\n\tif oldBlock.Index+1 != newBlock.Index {\n\t\treturn false\n\t}\n\n\tif oldBlock.Hash != newBlock.PrevHash {\n\t\treturn false\n\t}\n\n\tif calculateBlockHash(newBlock) != newBlock.Hash {\n\t\treturn false\n\t}\n\n\treturn true\n}", "func isBlockValid(newBlock, oldBlock Block) bool {\n\tif oldBlock.Index+1 != newBlock.Index {\n\t\tfmt.Println(\"Invalid index !!\", newBlock.Index, oldBlock.Index)\n\t\treturn false\n\t}\n\n\tif oldBlock.Hash != newBlock.PrevHash {\n\t\tfmt.Println(\"Invalid prev hash !!\")\n\t\treturn false\n\t}\n\n\tif calculateBlockHash(newBlock) != newBlock.Hash {\n\t\tfmt.Println(\"Invalid hash !!\")\n\t\treturn false\n\t}\n\tif !isServerData(newBlock.Data, newBlock.Hash) {\n\t\treturn false\n\t}\n\treturn true\n}", "func (q *BytesQueue) canInsertAfterTail(need int) bool {\n\tif q.full {\n\t\treturn false\n\t}\n\tif q.tail >= q.head {\n\t\treturn q.capacity-q.tail >= need\n\t}\n\t// 1. there is exactly need bytes between head and tail, so we do not need\n\t// to reserve extra space for a potential empty entry when realloc this queue\n\t// 2. still have unused space between tail and head, then we must reserve\n\t// at least headerEntrySize bytes so we can put an empty entry\n\treturn q.head-q.tail == need || q.head-q.tail >= need+minimumHeaderSize\n}", "func SmithWatermanFull(seqPair *SeqPair, wm *WeightMatrix, gapPenalty int) *AlignResult {\n dp := make([][]int, len(seqPair.S1) + 1)\n\n for i := range dp {\n dp[i] = make([]int, len(seqPair.S2) + 1)\n }\n\n maxScore := 0\n maxPos := Point{}\n\n // Main part\n\n for row := 0; row < len(seqPair.S1); row += 1 {\n for col := 0; col < len(seqPair.S2); col += 1 {\n // dp indices are actually (row + 1, col + 1) because of dummy zero-indexed elements\n dp[row + 1][col + 1] = util.Max4(\n 0,\n dp[row + 1][col] + gapPenalty, /* left */\n dp[row][col + 1] + gapPenalty, /* top */\n dp[row][col] + seqPair.WeightIn(wm, row, col)) /* top-left */\n\n if maxScore < dp[row + 1][col + 1] {\n maxScore = dp[row + 1][col + 1]\n maxPos = Point{ col + 1, row + 1 }\n }\n }\n }\n\n //fmt.Println(\"TABLE:\")\n //for i, lst := range dp[1:] {\n // fmt.Printf(\"%-2v: %-3v\\n\", i, lst)\n //}\n //fmt.Println()\n\n // Recovery\n\n s1Builder := strings.Builder{}\n s2Builder := strings.Builder{}\n\n curPos := maxPos\n\n if curPos.X > 0 {\n for i := len(seqPair.S2) - 1; i > curPos.X - 1; i -= 1 {\n s1Builder.WriteByte('-')\n s2Builder.WriteByte(seqPair.S2[i])\n }\n }\n\n if curPos.Y > 0 {\n for i := len(seqPair.S1) - 1; i > curPos.Y - 1; i -= 1 {\n s1Builder.WriteByte(seqPair.S1[i])\n s2Builder.WriteByte('-')\n }\n }\n\n for curPos.X > 0 && curPos.Y > 0 {\n l := dp[curPos.Y][curPos.X - 1] + gapPenalty /* left */\n t := dp[curPos.Y - 1][curPos.X] + gapPenalty /* top */\n tl := dp[curPos.Y - 1][curPos.X - 1] + seqPair.WeightIn(wm, curPos.Y - 1, curPos.X - 1) /* top-left */\n\n max := util.Max4(0, l, t, tl)\n\n if max == 0 {\n break\n } else if max == l {\n s1Builder.WriteByte('-')\n s2Builder.WriteByte(seqPair.S2[curPos.X - 1])\n curPos.X -= 1\n } else if max == t {\n s1Builder.WriteByte(seqPair.S1[curPos.Y - 1])\n s2Builder.WriteByte('-')\n curPos.Y -= 1\n } else { /* max == tl */\n s1Builder.WriteByte(seqPair.S1[curPos.Y - 1])\n s2Builder.WriteByte(seqPair.S2[curPos.X - 1])\n curPos.X -= 1\n curPos.Y -= 1\n }\n }\n\n for curPos.X > 0 {\n s1Builder.WriteByte('-')\n s2Builder.WriteByte(seqPair.S2[curPos.X - 1])\n curPos.X -= 1\n }\n\n for curPos.Y > 0 {\n s1Builder.WriteByte(seqPair.S1[curPos.Y - 1])\n s2Builder.WriteByte('-')\n curPos.Y -= 1\n }\n\n s1Align := util.ReverseString(s1Builder.String())\n s2Align := util.ReverseString(s2Builder.String())\n\n s1AlignColored := strings.Builder{}\n s2AlignColored := strings.Builder{}\n\n for i := range s1Align {\n if s1Align[i] == '-' {\n s1AlignColored.WriteString(util.Colorify(\"-\", util.ColorBlue))\n s2AlignColored.WriteByte(s2Align[i])\n } else if s2Align[i] == '-' {\n s1AlignColored.WriteByte(s1Align[i])\n s2AlignColored.WriteString(util.Colorify(\"-\", util.ColorBlue))\n } else if s1Align[i] == s2Align[i] {\n s1AlignColored.WriteString(util.Colorify(string(s1Align[i]), util.ColorGreen))\n s2AlignColored.WriteString(util.Colorify(string(s2Align[i]), util.ColorGreen))\n } else {\n s1AlignColored.WriteString(util.Colorify(string(s1Align[i]), util.ColorRed))\n s2AlignColored.WriteString(util.Colorify(string(s2Align[i]), util.ColorRed))\n }\n }\n\n // Return result\n return &AlignResult{\n Score: maxScore,\n Align: s1AlignColored.String() + \"\\n\" + s2AlignColored.String(),\n }\n}", "func Encode(dst, src []byte) (compressedSize int, error error) {\n\tif len(src) >= MaxInputSize {\n\t\treturn 0, ErrTooLarge\n\t}\n\n\tif n := CompressBound(len(src)); len(dst) < n {\n\t\treturn 0, ErrEncodeTooSmall\n\t}\n\n\thashTable := hashPool.Get().([]uint32)\n\tfor i := range hashTable {\n\t\thashTable[i] = 0\n\t}\n\te := encoder{src: src, dst: dst, hashTable: hashTable}\n\tdefer func() {\n\t\thashPool.Put(hashTable)\n\t}()\n\t// binary.LittleEndian.PutUint32(dst, uint32(len(src)))\n\t// e.dpos = 0\n\n\tvar (\n\t\tstep uint32 = 1\n\t\tlimit = incompressible\n\t)\n\n\tfor {\n\t\tif int(e.pos)+12 >= len(e.src) {\n\t\t\te.writeLiterals(uint32(len(e.src))-e.anchor, 0, e.anchor)\n\t\t\treturn int(e.dpos), nil\n\t\t}\n\n\t\tsequence := uint32(e.src[e.pos+3])<<24 | uint32(e.src[e.pos+2])<<16 | uint32(e.src[e.pos+1])<<8 | uint32(e.src[e.pos+0])\n\n\t\thash := (sequence * 2654435761) >> hashShift\n\t\tref := e.hashTable[hash] + uninitHash\n\t\te.hashTable[hash] = e.pos - uninitHash\n\n\t\tif ((e.pos-ref)>>16) != 0 || uint32(e.src[ref+3])<<24|uint32(e.src[ref+2])<<16|uint32(e.src[ref+1])<<8|uint32(e.src[ref+0]) != sequence {\n\t\t\tif e.pos-e.anchor > limit {\n\t\t\t\tlimit <<= 1\n\t\t\t\tstep += 1 + (step >> 2)\n\t\t\t}\n\t\t\te.pos += step\n\t\t\tcontinue\n\t\t}\n\n\t\tif step > 1 {\n\t\t\te.hashTable[hash] = ref - uninitHash\n\t\t\te.pos -= step - 1\n\t\t\tstep = 1\n\t\t\tcontinue\n\t\t}\n\t\tlimit = incompressible\n\n\t\tln := e.pos - e.anchor\n\t\tback := e.pos - ref\n\n\t\tanchor := e.anchor\n\n\t\te.pos += minMatch\n\t\tref += minMatch\n\t\te.anchor = e.pos\n\n\t\tfor int(e.pos) < len(e.src)-5 && e.src[e.pos] == e.src[ref] {\n\t\t\te.pos++\n\t\t\tref++\n\t\t}\n\n\t\tmlLen := e.pos - e.anchor\n\n\t\te.writeLiterals(ln, mlLen, anchor)\n\t\te.dst[e.dpos] = uint8(back)\n\t\te.dst[e.dpos+1] = uint8(back >> 8)\n\t\te.dpos += 2\n\n\t\tif mlLen > mlMask-1 {\n\t\t\tmlLen -= mlMask\n\t\t\tfor mlLen > 254 {\n\t\t\t\tmlLen -= 255\n\n\t\t\t\te.dst[e.dpos] = 255\n\t\t\t\te.dpos++\n\t\t\t}\n\n\t\t\te.dst[e.dpos] = byte(mlLen)\n\t\t\te.dpos++\n\t\t}\n\n\t\te.anchor = e.pos\n\t}\n}", "func fixLen(fixme []byte) []byte {\n l := (len(fixme) - 1) // skip the Q in \"Q\\x00\\x00\\x01\\x01SELECT...\"\n binary.BigEndian.PutUint32(fixme[1:], uint32(l))\n return fixme\n}", "func block(h *[4][16]uint32, base uintptr, offsets *[16]uint32, mask uint16)", "func (uc *Cypher) pkcs7unpad(padded []byte, blockSize int) []byte {\n\n\tdataLen := len(padded)\n\tpaddingCount := int(padded[dataLen-1])\n\n\tif paddingCount > blockSize || paddingCount <= 0 {\n\t\treturn padded //data is not padded (or not padded correctly), return as is\n\t}\n\n\tpadding := padded[dataLen-paddingCount : dataLen-1]\n\n\tfor _, b := range padding {\n\t\tif int(b) != paddingCount {\n\t\t\treturn padded //data is not padded (or not padded correcly), return as is\n\t\t}\n\t}\n\n\treturn padded[:len(padded)-paddingCount] //return data - padding\n}", "func padBlock(b []byte) []byte {\n\tpad := BlockSize - uint64(len(b))\n\tfor i := uint64(0); i < pad; i++ {\n\t\tb = append(b, byte(pad))\n\t}\n\treturn b\n}", "func encodeBlock(dst, src []byte) (d int) {\n\tif len(src) < minNonLiteralBlockSize {\n\t\treturn 0\n\t}\n\treturn encodeBlockGo(dst, src)\n}", "func doFindSmallerSize(size int64, pattern string) bool {\n\ti, err := humanize.ParseBytes(pattern)\n\tfatalIf(probe.NewError(err), \"Error parsing string passed to flag smaller\")\n\n\treturn int64(i) > size\n}", "func TestCompression(t *testing.T) {\n\tvec := NewVector()\n\tfor i := 0; i < 1e5; i++ {\n\t\tv := int(rand.Uint32())\n\t\tvec.Add(v)\n\t}\n\n\tsizeofUint := int(unsafe.Sizeof(uint(0)))\n\n\trawsize := float64(sizeofUint * 1e5)\n\tvecsize := float64(vec.Size())\n\n\tpercentage := ((rawsize - vecsize) / rawsize) * 100\n\tfmt.Printf(\"=== COMPRESSION: %.2f%%\\n\", percentage)\n}", "func canCompress(value value) bool { return value&0x7fffffff == value }", "func align(n, b int) int {\n\tbsz := b - 1 // blocksize\n\treturn (n + bsz) &^ bsz\n}", "func isValidBlock(newBlock, oldBlock Block) bool {\n\tif oldBlock.Index+1 != newBlock.Index {\n\t\treturn false\n\t}\n\n\tif oldBlock.Hash != newBlock.PrevHash {\n\t\treturn false\n\t}\n\n\tif calculateBlockHash(newBlock) != newBlock.Hash {\n\t\treturn false\n\t}\n\n\treturn true\n}", "func checkPallocBits(t *testing.T, got, want *PallocBits) bool {\n\td := DiffPallocBits(got, want)\n\tif len(d) != 0 {\n\t\tt.Errorf(\"%d range(s) different\", len(d))\n\t\tfor _, bits := range d {\n\t\t\tt.Logf(\"\\t@ bit index %d\", bits.I)\n\t\t\tt.Logf(\"\\t| got: %s\", StringifyPallocBits(got, bits))\n\t\t\tt.Logf(\"\\t| want: %s\", StringifyPallocBits(want, bits))\n\t\t}\n\t\treturn false\n\t}\n\treturn true\n}", "func (dec *XMASDecoder) CheckAtPosition(pos int) (bool, error) {\n\tidx := pos + dec.PreambleSize\n\ttarget := dec.Stream[idx]\n\n\tif pos < 0 || idx >= len(dec.Stream) {\n\t\treturn false, errors.New(\"out of bounds\")\n\t}\n\n\tbeginIdx := idx - dec.PreambleSize\n\tfinalIdx := idx - 1\n\n\tpass := false\n\tfor i := beginIdx; i <= finalIdx; i++ {\n\t\touterVal := dec.Stream[i]\n\t\tfor j := beginIdx; j <= finalIdx; j++ {\n\t\t\tif i == j {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tinnerVal := dec.Stream[j]\n\n\t\t\tif outerVal+innerVal == target {\n\t\t\t\tpass = true\n\t\t\t}\n\t\t}\n\t}\n\n\treturn pass, nil\n}", "func (t kSamples) checkUpdate(d float64, row []interface{}) {\n\tindexToChange := -1\n\tvar maxDistance float64\n\tfor i, e := range t {\n\t\tif e.distance > maxDistance {\n\t\t\tmaxDistance = e.distance\n\t\t\tindexToChange = i\n\t\t}\n\t}\n\tif d < maxDistance {\n\t\tt[indexToChange].row = row\n\t\tt[indexToChange].distance = d\n\t}\n}", "func shouldKeepLooking(inputIndex, inputSize int, inputR []rune, next rune) bool {\n\treturn inputIndex < inputSize && inputR[inputIndex] != next\n}", "func checkOffset(queens []core.VarId, store *core.Store, offset int) {\n\theadQueen := queens[0]\n\tprop := propagator.CreateXplusCneqY(headQueen,\n\t\toffset, queens[core.AbsInt(offset)])\n\tstore.AddPropagator(prop)\n}", "func (db *dgrambuf_t) _canhold(sz int) bool {\n\tif (db.head-db.tail) == uint(len(db.dgrams)) ||\n\t\tdb.cbuf.Left() < sz {\n\t\treturn false\n\t}\n\treturn true\n}", "func (c *Cache) blockAlign(offset int64) int64 {\n\treturn offset / int64(c.blockSize) * int64(c.blockSize)\n}", "func checkReadQuality(read *sam.Record) bool {\n\tif int(read.MapQ) < MinMapQuality || read.Len() < MinReadLength {\n\t\treturn false\n\t}\n\n\t//\t\tfor _, cigar := range read.Cigar {\n\t//\t\t\tif cigar.Type() != sam.CigarMatch && cigar.Type() != sam.CigarSoftClipped {\n\t//\t\t\t\treturn false\n\t//\t\t\t}\n\t//\t\t}\n\t\n return true\n}", "func buildSlidingAttackMask(sqStart Square, mask uint64, deltas []int) uint64 {\n\tvar results uint64\n\tfor _, delta := range deltas {\n\t\tsq := sqStart\n\t\tfor {\n\t\t\tprevSquare := sq\n\t\t\tif int(sq.Address)+delta < 0 || int(sq.Address)+delta >= 64 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tsq.Address = uint8(int(sq.Address) + delta)\n\t\t\tif SquareDistance(prevSquare, sq) > 3 {\n\t\t\t\t// Dont wrap around edges\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tresults |= sq.mask()\n\t\t\tif mask&sq.mask() != 0 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\treturn results\n}", "func checkRow(m [][]int, padding, x, y int) (flag bool) {\n\tflag = true\n\tfor k := 1; k <= padding && flag; k++ {\n\t\tif m[x+k][y] == 1 || m[x-k][y] == 1 {\n\t\t\tflag = false\n\t\t}\n\t}\n\n\treturn\n}", "func contains(shorter, longer *TrieKey, prematchedBits uint) (matches, exact bool, common, child uint) {\n\t// Two variables important in finding which child to descend into\n\tvar pivotByte, numBytes uint\n\tpivotMask := byte(0x80)\n\n\t// calculate `exact`, `common`, and `child` at the end with defer\n\tdefer func() {\n\t\tif !matches {\n\t\t\tvar s, l byte\n\n\t\t\t// We know both of these slices are large enough to index with\n\t\t\t// `numBytes` because `matches` is false and therefore it must have\n\t\t\t// been a previous comparison of these bytes that got us here.\n\t\t\tfor i := prematchedBits / 8; i <= numBytes; i++ {\n\t\t\t\ts, l = shorter.Bits[i], longer.Bits[i]\n\t\t\t\tif s == l {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tcommon = 8*i + uint(bits.LeadingZeros8(s^l))\n\n\t\t\t\t// Whether `longer` goes on the left (0) or right (1)\n\t\t\t\tif longer.Bits[i] < shorter.Bits[i] {\n\t\t\t\t\tchild = 0\n\t\t\t\t} else {\n\t\t\t\t\tchild = 1\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tcommon = shorter.Length\n\t\texact = shorter.Length == longer.Length\n\t\tif !exact {\n\t\t\t// Whether `longer` goes on the left (0) or right (1)\n\t\t\tif longer.Bits[pivotByte]&pivotMask == 0 {\n\t\t\t\tchild = 0\n\t\t\t} else {\n\t\t\t\tchild = 1\n\t\t\t}\n\t\t}\n\t}()\n\n\t// Prefix length of 0 matches everything!\n\tif shorter.Length == 0 {\n\t\tmatches = true\n\t\treturn\n\t}\n\n\t// The bits to compare in the two keys always follows the following pattern:\n\t// 1. any number of leading \"full\" bytes which must match exactly\n\t// 2. 0 or 1 \"partial\" byte in the least significant (last) position which\n\t// must match up to the number of partial bits (1-7 bits).\n\t//\n\t// The strategy here is to compare the bytes from the least significant\n\t// (last) to the most significant (first) to avoid redundantly comparing\n\t// bytes that might have already matched higher in the tree.\n\n\t// Calculate number of bytes (including possible least-significant partial)\n\t// Decrement this as we compare bytes up to the most significant.\n\tnumBytes = bitsToBytes(shorter.Length)\n\n\t// Figure out how many bits are in the partial byte (0 means no partial)\n\tmaskLen := shorter.Length % 8\n\n\t// If the last byte is partial, compare using a bitmask\n\tif maskLen > 0 {\n\t\tvar mask byte\n\t\tmask = 0xff << (8 - maskLen)\n\n\t\t// decrement before comparing since the slices are indexed from 0\n\t\tnumBytes--\n\t\tif shorter.Bits[numBytes]&mask != longer.Bits[numBytes]&mask {\n\t\t\tmatches = false\n\t\t\treturn\n\t\t}\n\n\t\tpivotMask >>= maskLen\n\t}\n\n\tpivotByte = numBytes\n\n\t// The other bytes are all full and can be compared simply\n\tfor numBytes > (prematchedBits / 8) {\n\t\t// decrement before comparing since the slices are indexed from 0\n\t\tnumBytes--\n\t\tif shorter.Bits[numBytes] != longer.Bits[numBytes] {\n\t\t\tmatches = false\n\t\t\treturn\n\t\t}\n\t}\n\n\tmatches = true\n\treturn\n}", "func (w *Window) Check(index uint64) bool {\n\t// check if too old\n\tif index+WindowSize < w.highest {\n\t\treturn false\n\t}\n\n\t// bits outside the block size represent which block the index is in\n\tindexBlock := index >> blockBitsLog\n\n\t// move window if new index is higher\n\tif index > w.highest {\n\t\tcurrTopBlock := w.highest >> blockBitsLog\n\t\t// how many blocks ahead is indexBlock?\n\t\t// cap it at a full circle around the array, at that point we clear the\n\t\t// whole thing\n\t\tnewBlocks := min(indexBlock-currTopBlock, numBlocks)\n\t\t// clear each new block\n\t\tfor i := uint64(1); i <= newBlocks; i++ {\n\t\t\t// mod index so it wraps around\n\t\t\tw.blocks[(currTopBlock+i)%numBlocks] = 0\n\t\t}\n\t\tw.highest = index\n\t}\n\n\t// we didn't mod until now because we needed to know the difference between\n\t// a lower index and wrapped higher index\n\t// we need to keep the index inside the array now\n\tindexBlock %= numBlocks\n\n\t// bits inside the block represent where in the block the bit is\n\t// mask it with the block size\n\tindexBit := index & uint64(blockBits-1)\n\n\t// finally check the index\n\n\t// save existing block to see if it changes\n\toldBlock := w.blocks[indexBlock]\n\t// create updated block\n\tnewBlock := oldBlock | (1 << indexBit)\n\t// set block to new value\n\tw.blocks[indexBlock] = newBlock\n\n\t// if the bit wasn't already 1, the values should be different and this should return true\n\treturn oldBlock != newBlock\n}", "func checkPkcs7Padding(buf []byte) ([]byte, error) {\n\tif len(buf) < 16 {\n\t\treturn nil, fmt.Errorf(\"Invalid padded buffer\")\n\t}\n\n\tpadLen := int(buf[len(buf)-1])\n\tif padLen < 1 || padLen > 16 {\n\t\treturn nil, fmt.Errorf(\"Invalid padded buffer\")\n\t}\n\n\tif padLen > len(buf) {\n\t\treturn nil, fmt.Errorf(\"Invalid padded buffer\")\n\t}\n\n\tfor pos := len(buf) - padLen; pos < len(buf); pos++ {\n\t\tif int(buf[pos]) != padLen {\n\t\t\treturn nil, fmt.Errorf(\"Invalid padded buffer\")\n\t\t}\n\t}\n\n\treturn buf[:len(buf)-padLen], nil\n}", "func canMakePaliQueries(s string, queries [][]int) []bool {\n\tn := len(queries)\n\n\tcnt := make([]int, 1, n+1)\n\tc := 0\n\tfor _, l := range s {\n\t\tc ^= 1 << uint(l-'a')\n\t\tcnt = append(cnt, c)\n\t}\n\n\tres := make([]bool, n)\n\tfor i, q := range queries {\n\t\tlo, hi, k := q[0], q[1], q[2]\n\t\tif k >= 13 {\n\t\t\tres[i] = true\n\t\t\tcontinue\n\t\t}\n\t\tremains := bits(cnt[hi+1] ^ cnt[lo])\n\t\tres[i] = remains/2 <= k\n\t}\n\n\treturn res\n}", "func (da *DoubleArray) _decideBaseOffset(firstChars []uint8, existsTerminator bool, offset uint8, rootIndex uint32, baseSearchOffset uint32) (uint32, uint32) {\n for {\n if baseSearchOffset >= uint32(len(da.Base)) {\n da._resizeDoubleArray()\n }\n if da.Check[baseSearchOffset] == 0 {\n break\n }\n baseSearchOffset++\n }\n var baseOffset uint32\n if baseSearchOffset <= charIndexCount + 2 {\n baseOffset = 2\n } else {\n baseOffset = baseSearchOffset - charIndexCount\n }\n for {\n if baseOffset + charIndexCount >= uint32(len(da.Base)) {\n da._resizeDoubleArray()\n }\n if !da._checkCollision(firstChars, existsTerminator, baseOffset) {\n // 衝突しない場合\n var i uint32\n for i = 1; i < charIndexCount; i++ {\n if firstChars[i] != 0 {\n da.Check[baseOffset + i] = rootIndex\n }\n }\n if existsTerminator {\n da.Check[baseOffset + charIndexCount] = rootIndex\n }\n\t\t\t//daCount++\n\t\t\t//if daCount % 1000 == 0 {\n\t\t\t//\tfmt.Printf(\"DEBUG decideBaseOffset %d %d %d\\n\", daCount, baseOffset, baseSearchOffset)\n\t\t\t//}\n return baseOffset, baseSearchOffset\n }\n baseOffset++\n }\n}", "func discoverBlockSizeInfo(oracle EncryptionOracleFn) BlockSizeInfo {\n\t// Assume block size is 8:\n\t// =>\n\t// suffix | inputSizeToGetFullPadding\n\t// 0 | 8\n\t// 1 | 7\n\t// 2 | 6\n\t// 3 | 5\n\t// 4 | 4\n\t// 5 | 3\n\t// 6 | 2\n\t// 7 | 1\n\t// 8 | 8\n\t// 9 | 7\n\n\tplainText := []byte{}\n\tcipher := askOracle(oracle, plainText)\n\tinitialLength := len(cipher)\n\tcipherLength := initialLength\n\n\tfor cipherLength == initialLength {\n\t\tplainText = append(plainText, 'A')\n\t\tcipher = askOracle(oracle, plainText)\n\t\tcipherLength = len(cipher)\n\t}\n\n\tbs := cipherLength - initialLength\n\treturn BlockSizeInfo{\n\t\tinputSizeToGetFullPadding: len(plainText),\n\t\tblockSize: bs,\n\t}\n}", "func PADDB(mx, x operand.Op) { ctx.PADDB(mx, x) }", "func TestFuzzBlockHash(t *testing.T) {\n\tf := fuzz.New()\n\tf.NilChance(0)\n\tfor i := 0; i < 10000; i++ {\n\t\tvar testBlock Block\n\t\tf.Fuzz(&testBlock)\n\t\ttestBlock.Justify = CreateQuorumCert(&testBlock)\n\t\tnumSigs, _ := rand.Int(rand.Reader, big.NewInt(10))\n\t\tfor j := int64(0); j < numSigs.Int64(); j++ {\n\t\t\tvar sig PartialSig\n\t\t\tf.Fuzz(&sig)\n\t\t\tid, _ := rand.Int(rand.Reader, big.NewInt(1000))\n\t\t\trID := config.ReplicaID(id.Int64())\n\t\t\tsig.ID = rID\n\t\t\tsig.R, _ = rand.Int(rand.Reader, big.NewInt(math.MaxInt64))\n\t\t\tsig.S, _ = rand.Int(rand.Reader, big.NewInt(math.MaxInt64))\n\t\t\ttestBlock.Justify.Sigs[rID] = sig\n\t\t}\n\t\thash1 := testBlock.Hash()\n\t\thash2 := testBlock.Hash()\n\t\tif !bytes.Equal(hash1[:], hash2[:]) {\n\t\t\tt.Fatalf(\"Non-determinism in hash function detected:\\nBlock: %s\\nHash1: %s\\nHash2: %s\", testBlock, hash1, hash2)\n\t\t}\n\t}\n}", "func isLeadingNumZeroes(hash []byte) bool {\n\tif numLeadingZeroes == 0 {\n\t\treturn true\n\t} else {\n\t\ti := 0\n\t\tnumZeroes := numLeadingZeroes\n\t\tfor {\n\t\t\t// numZeroes <= 8, byte at hash[i] will determine validity\n\t\t\tif numZeroes-8 <= 0 {\n\t\t\t\tbreak\n\t\t\t} else {\n\t\t\t\t// numZeroes is greater than 8, byte at hash[i] must be zero\n\t\t\t\tif hash[i] != 0 {\n\t\t\t\t\treturn false\n\t\t\t\t} else {\n\t\t\t\t\ti++\n\t\t\t\t\tnumZeroes -= 8\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t// returns true if byte at hash[i] has the the minimum number of leading zeroes\n\t\t// if numZeroes is 8: hash[i] < 2^(8-8) == hash[1] < 1 == hash[i] must be (0000 0000)b.\n\t\t// if numZeroes is 1: hash[i] < 2^(8-1) == hash[1] < (1000 0000)b == hash[i] <= (0111 1111)b\n\t\treturn float64(hash[i]) < math.Pow(2, float64(8-numZeroes))\n\t}\n}", "func checkPallocSum(t testing.TB, got, want PallocSum) {\n\tif got.Start() != want.Start() {\n\t\tt.Errorf(\"inconsistent start: got %d, want %d\", got.Start(), want.Start())\n\t}\n\tif got.Max() != want.Max() {\n\t\tt.Errorf(\"inconsistent max: got %d, want %d\", got.Max(), want.Max())\n\t}\n\tif got.End() != want.End() {\n\t\tt.Errorf(\"inconsistent end: got %d, want %d\", got.End(), want.End())\n\t}\n}", "func (this *DatastoreOperations) CompactIfNeeded(state *DatastoreState, minSize int64, minGrowthRatio float64, minUnusedSizeRatio float64) (bool, error) {\n\t// Store the start time of the operation\n\tstartTime := MonoUnixTimeMilli()\n\n\t// Get the current size of the index\n\tcurrentSize := state.Size()\n\n\t// Continue only if current size is at least the minimum size to perform compaction checks\n\tif currentSize < minSize {\n\t\treturn false, nil\n\t}\n\n\t// Continue only if file size has grown a sufficient amount since last check\n\tif float64(currentSize) < float64(state.HeadEntryValue.LastCompactionCheckSize)*minGrowthRatio {\n\t\treturn false, nil\n\t}\n\n\t// Create a key index and add all entries to it\n\tkeyIndex := NewDatastoreKeyIndex()\n\terr := keyIndex.AddFromEntryStream(NewPrefetchingReaderAt(state.File), 0, currentSize)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\t// Get compacted size and calculate unused size\n\tcompactedSize := keyIndex.GetCompactedSize()\n\tunusedSize := currentSize - compactedSize\n\n\t// If the compacted size is below the threshold for a file rewrite\n\tif float64(unusedSize)/float64(currentSize) < minUnusedSizeRatio {\n\t\t// Update in-place and persist the updated head entry\n\t\terr = state.UpdateHeadEntry(&HeadEntryValue{\n\t\t\tVersion: this.State.HeadEntryValue.Version,\n\t\t\tLastCompactionTime: this.State.HeadEntryValue.LastCompactionTime,\n\t\t\tLastCompactionCheckTime: MonoUnixTimeMicro(),\n\t\t\tLastCompactionCheckSize: currentSize,\n\t\t\tLastCompactionCheckUnusedSize: unusedSize,\n\t\t})\n\n\t\t// Return with any error that occurred, or nil\n\t\treturn false, err\n\t}\n\n\t// Create a timestamp for the compacted datastore head entry\n\tcompactionTimestamp := MonoUnixTimeMicro()\n\n\t// Create a new head entry that preserves the original creation time\n\tcompactedDatastoreHeadEntry := CreateSerializedHeadEntry(&HeadEntryValue{\n\t\tVersion: DatastoreVersion,\n\t\tLastCompactionTime: compactionTimestamp,\n\t\tLastCompactionCheckTime: compactionTimestamp,\n\t\tLastCompactionCheckSize: compactedSize,\n\t\tLastCompactionCheckUnusedSize: 0,\n\t}, state.CreationTime)\n\n\t// Create a reader for the compacted datastore\n\tcompactedDatastoreReader := io.MultiReader(\n\t\tbytes.NewReader(compactedDatastoreHeadEntry),\n\t\tkeyIndex.CreateReaderForCompactedRanges(state.File, HeadEntrySize))\n\n\t// Rewrite the file with the compacted ranges\n\terr = CreateOrRewriteFileSafe(this.FilePath, compactedDatastoreReader)\n\n\t// If an error occurred while rewriting the file\n\tif err != nil {\n\t\t// Return the error\n\t\treturn false, err\n\t}\n\n\t// Reload the datastore\n\tnewState, err := this.Load()\n\n\t// If an error occurred while loading the rewritten file\n\tif err != nil {\n\t\t// Return the error\n\t\treturn false, err\n\t}\n\n\t// Atomically replace the current state object with the new state object\n\tthis.ReplaceState(newState)\n\n\t// Log message\n\tthis.ParentServer.Logf(1, \"Compacted datastore '%s' from %d to %d bytes in %dms\", this.Name, currentSize, compactedSize, MonoUnixTimeMilli()-startTime)\n\n\t// Return without error\n\treturn true, nil\n}", "func isNewBlockValid(newBlock Block) bool {\n\tlastBlock := Blockchain[len(Blockchain)-1]\n\tif lastBlock.Index+1 == newBlock.Index && makeLastBlockHeaderHash(lastBlock) == newBlock.PrevHashHeader && newBlock.blockHash == makeBlockHash(newBlock) {\n\t\treturn true\n\t}\n\treturn false\n}", "func didCompress(input []byte) bool {\n\tvar output bytes.Buffer\n\n\tw := zlib.NewWriter(&output)\n\t_, err := w.Write(input)\n\tw.Close()\n\n\treturn err == nil && len(input) > output.Len()\n}", "func encodeBlockBetterSnappy(dst, src []byte) (d int) {\n\treturn encodeBlockBetterSnappyGo(dst, src)\n}", "func (c *Codec) tryMd(idx int, prefix, ol oligo.Oligo, olen int, mdblks []uint64, data [][]byte, difficulty int) (err int) {\n\t// TODO: this works for only single error allowed at the moment\n\t// should probably be made more general\n\n\tmdsz := c.mdsz\n\tif idx >= c.blknum - c.rsnum {\n\t\tmdsz = 5\t// RS erasure blocks are always 5 nts\n\t}\n\n\tif ol.Len() < mdsz {\n\t\treturn -1\n\t}\n\n\t// No error\n\terr = c.tryIt(idx, prefix, ol.Slice(0, mdsz), ol.Slice(mdsz, 0), olen - mdsz, mdblks, data, difficulty)\n\tif err >= 0 {\n\t\treturn\n\t}\n\n\t// FIXME: The code below is not fully tested yet\n\n\t// One error\n\t// Delete\n\t// Iterate through all positions, and insert all possible nts\n\tif difficulty > 2 && ol.Len() + 1 >= mdsz {\n\t\tfor p := 0; p < mdsz - 1; p++ {\n\t\t\tvar sol oligo.Oligo\n\t\t\tif p == 0 {\n\t\t\t\tsol = long.New(0)\n\t\t\t} else {\n\t\t\t\tsol = ol.Slice(0, p)\n\t\t\t}\n\t\t\teol := ol.Slice(p + 1, mdsz)\n\n\t\t\tfor n := 0; n < len(singleNts); n++ {\n\t\t\t\tmdol, _ := long.Copy(sol)\n\t\t\t\tmdol.Append(singleNts[n])\n\t\t\t\tmdol.Append(eol)\n\n\t\t\t\terr = c.tryIt(idx, prefix, mdol, ol.Slice(mdsz - 1, 0), olen - mdsz, mdblks, data, difficulty)\n\t\t\t\tif err >= 0 {\n\t\t\t\t\terr++\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Insert\n\t// Iterate through all positions and remove one nt\n\tif difficulty > 2 && ol.Len() > mdsz {\n\t\tfor p := 0; p < mdsz + 1; p++ {\n\t\t\tvar mdol oligo.Oligo\n\n\t\t\tif p == 0 {\n\t\t\t\tmdol = long.New(0)\n\t\t\t} else {\n\t\t\t\tmdol = ol.Slice(0, p)\n\t\t\t}\n\n\t\t\tmdol.Append(ol.Slice(p+1, mdsz + 1))\n\t\t\terr = c.tryIt(idx, prefix, mdol, ol.Slice(mdsz + 1, 0), olen - mdsz, mdblks, data, difficulty)\n\t\t\tif err >= 0 {\n\t\t\t\terr++\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\n\t// Substitution\n\t// Iterate through all positions and replace the nt with the rest of the possible values\n\tif difficulty > 2 && ol.Len() >= mdsz {\n\t\tfor p := 0; p < mdsz; p++ {\n\t\t\tvar sol oligo.Oligo\n\t\t\tif p == 0 {\n\t\t\t\tsol = long.New(0)\n\t\t\t} else {\n\t\t\t\tsol = ol.Slice(0, p)\n\t\t\t}\n\t\t\teol := ol.Slice(p + 1, mdsz)\n\t\t\tnt := ol.At(p)\n\t\t\tfor n := 0; n < len(singleNts); n++ {\n\t\t\t\tif n == nt {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tmdol, _ := long.Copy(sol)\n\t\t\t\tmdol.Append(singleNts[n])\n\t\t\t\tmdol.Append(eol)\n\n\t\t\t\terr = c.tryIt(idx, prefix, mdol, ol.Slice(mdsz, 0), olen - mdsz, mdblks, data, difficulty)\n\t\t\t\tif err >= 0 {\n\t\t\t\t\terr++\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// we've gone through all and no luck\n\treturn -1\n}", "func (j *SeqChunkProvider) checkForPreviouslyAllocatedChunks(\n\tseqMetadata *SequenceMetadata, c *CellInfoAnnotation, progress *jobspb.Progress,\n) (bool, error) {\n\tvar found bool\n\tfileProgress := progress.GetImport().SequenceDetails[c.sourceID]\n\tif fileProgress.SeqIdToChunks == nil {\n\t\treturn found, nil\n\t}\n\tvar allocatedSeqChunks *jobspb.SequenceDetails_SequenceChunks\n\tvar ok bool\n\tif allocatedSeqChunks, ok = fileProgress.SeqIdToChunks[int32(seqMetadata.id)]; !ok {\n\t\treturn found, nil\n\t}\n\n\tfor _, chunk := range allocatedSeqChunks.Chunks {\n\t\t// We have found the chunk of sequence values that was assigned to the\n\t\t// swath of rows encompassing rowID.\n\t\tif chunk.ChunkStartRow <= c.rowID && chunk.NextChunkStartRow > c.rowID {\n\t\t\trelativeRowIndex := c.rowID - chunk.ChunkStartRow\n\t\t\tseqMetadata.curVal = chunk.ChunkStartVal +\n\t\t\t\tseqMetadata.seqDesc.GetSequenceOpts().Increment*(seqMetadata.instancesPerRow*relativeRowIndex)\n\t\t\tfound = true\n\t\t\treturn found, nil\n\t\t}\n\t}\n\treturn found, nil\n}", "func (s IntegrationSuite) TestCheckSchemaCompression(t *testing.T) {\n\tdir := getDir(t, \"testdata/validcfg\")\n\n\t// Ignore all linters except for the compression one\n\tforceOnlyRulesWarning(dir.Config, \"compression\")\n\topts, err := OptionsForDir(dir)\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected error from OptionsForDir: %v\", err)\n\t}\n\tlogicalSchema := dir.LogicalSchemas[0]\n\twsOpts, err := workspace.OptionsForDir(dir, s.d.Instance)\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected error from workspace.OptionsForDir: %v\", err)\n\t}\n\twsSchema, err := workspace.ExecLogicalSchema(logicalSchema, wsOpts)\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected error from workspace.ExecLogicalSchema: %v\", err)\n\t}\n\n\t// Count the InnoDB tables in the dir, for use in computing the expected\n\t// warning annotation count below\n\tvar innoTableCount int\n\tfor _, tbl := range wsSchema.Tables {\n\t\tif tbl.Engine == \"InnoDB\" {\n\t\t\tinnoTableCount++\n\t\t}\n\t}\n\n\t// Perform tests with various permutations of allow-list and flavor, and\n\t// confirm the number of annotations matches expectations. Note that the only\n\t// compressed tables in the dir are the two in testdata/validcfg/compression.sql;\n\t// one uses KEY_BLOCK_SIZE=2, and the other effectively uses 8 by way of\n\t// defaulting to half the page size.\n\tcases := []struct {\n\t\tallowList []string\n\t\tflavor tengo.Flavor\n\t\texpectedWarningCount int\n\t}{\n\t\t{[]string{\"8kb\"}, s.d.Flavor(), innoTableCount - 1},\n\t\t{[]string{\"page\", \"8kb\"}, tengo.FlavorMySQL57, innoTableCount - 1},\n\t\t{[]string{\"page\"}, tengo.FlavorMariaDB103, innoTableCount},\n\t\t{[]string{\"none\"}, s.d.Flavor(), 2},\n\t\t{[]string{\"none\", \"4kb\"}, s.d.Flavor(), 2},\n\t\t{[]string{\"none\", \"4kb\", \"page\"}, s.d.Flavor(), 2},\n\t\t{[]string{\"none\", \"invalid-value\"}, s.d.Flavor(), 2},\n\t\t{[]string{\"invalid-value\"}, s.d.Flavor(), innoTableCount},\n\t}\n\tfor n, c := range cases {\n\t\topts.RuleConfig[\"compression\"] = c.allowList\n\t\topts.Flavor = c.flavor\n\t\tresult := CheckSchema(wsSchema, opts)\n\t\tif result.WarningCount != c.expectedWarningCount {\n\t\t\tt.Errorf(\"cases[%d] expected warning count %d, instead found %d\", n, c.expectedWarningCount, result.WarningCount)\n\t\t}\n\t}\n\n\t// If the Dockerized test instance's Flavor supports page compression, verify\n\t// that the regexp used by tableCompressionMode() works properly.\n\t// Store a mapping of table name -> expected 2nd return value of tableCompressionMode().\n\tvar tableExpectedClause map[string]string\n\tif s.d.Flavor().Min(tengo.FlavorMySQL57) {\n\t\tdir = getDir(t, \"testdata/pagecomprmysql\")\n\t\ttableExpectedClause = map[string]string{\n\t\t\t\"page_comp_zlib\": \"COMPRESSION='zlib'\",\n\t\t\t\"page_comp_lz4\": \"COMPRESSION='lz4'\",\n\t\t\t\"page_comp_none\": \"\",\n\t\t}\n\t} else if s.d.Flavor().Min(tengo.FlavorMariaDB102) {\n\t\tdir = getDir(t, \"testdata/pagecomprmaria\")\n\t\ttableExpectedClause = map[string]string{\n\t\t\t\"page_comp_1\": \"`PAGE_COMPRESSED`=1\",\n\t\t\t\"page_comp_on\": \"`PAGE_COMPRESSED`='on'\",\n\t\t\t\"page_comp_0\": \"\",\n\t\t\t\"page_comp_off\": \"\",\n\t\t}\n\t}\n\tif tableExpectedClause != nil {\n\t\tlogicalSchema := dir.LogicalSchemas[0]\n\t\twsOpts, err := workspace.OptionsForDir(dir, s.d.Instance)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Unexpected error from workspace.OptionsForDir: %v\", err)\n\t\t}\n\t\twsSchema, err := workspace.ExecLogicalSchema(logicalSchema, wsOpts)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Unexpected error from workspace.ExecLogicalSchema: %v\", err)\n\t\t}\n\t\tif len(wsSchema.Failures) > 0 {\n\t\t\tt.Fatalf(\"%d of the CREATEs in %s unexpectedly failed: %+v\", len(wsSchema.Failures), dir, wsSchema.Failures)\n\t\t}\n\t\tfor _, tbl := range wsSchema.Tables {\n\t\t\texpectedClause, ok := tableExpectedClause[tbl.Name]\n\t\t\tif !ok {\n\t\t\t\tt.Fatalf(\"Unexpectedly found table %s in dir %s, not present in tableExpectedClause mapping for flavor %s\", tbl.Name, dir, s.d.Flavor())\n\t\t\t}\n\t\t\tvar expectedMode string\n\t\t\tif expectedClause == \"\" {\n\t\t\t\texpectedMode = \"none\"\n\t\t\t} else {\n\t\t\t\texpectedMode = \"page\"\n\t\t\t}\n\t\t\tactualMode, actualClause := tableCompressionMode(tbl)\n\t\t\tif actualMode != expectedMode || actualClause != expectedClause {\n\t\t\t\tt.Errorf(\"Unexpected return value from tableCompressionMode(%s): got %q,%q; expected %q,%q\", tbl.Name, actualMode, actualClause, expectedMode, expectedClause)\n\t\t\t}\n\t\t}\n\t}\n}", "func encodeBlockBetter(dst, src []byte) (d int) {\n\treturn encodeBlockBetterGo(dst, src)\n}", "func (b testBody) isDeleted(t *testing.T, encoding []byte, bspan dvid.Span) bool {\n\t// Get to the # spans and RLE in encoding\n\tspansEncoding := encoding[8:]\n\tvar spans dvid.Spans\n\tif err := spans.UnmarshalBinary(spansEncoding); err != nil {\n\t\tt.Fatalf(\"Error in decoding sparse volume: %v\\n\", err)\n\t\treturn false\n\t}\n\n\t// Iterate true spans to see if any are in the blocks given.\n\tfor _, span := range spans {\n\t\tbx0 := span[2] / 32\n\t\tbx1 := span[3] / 32\n\t\tby := span[1] / 32\n\t\tbz := span[0] / 32\n\n\t\twithin_x := (bx0 >= bspan[2] && bx0 <= bspan[3]) || (bx1 >= bspan[2] && bx1 <= bspan[3])\n\t\tif bz == bspan[0] && by == bspan[1] && within_x {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func compactSigCheck(t *testing.T, sig []byte) {\n\tt.Helper()\n\tb := int(sig[32])\n\tif b < 0 {\n\t\tt.Errorf(\"highest bit is negative: %d\", b)\n\t}\n\tif ((b >> 7) == 1) != ((b & 0x80) == 0x80) {\n\t\tt.Errorf(\"highest bit: %d bit >> 7: %d\", b, b>>7)\n\t}\n\tif (b & 0x80) == 0x80 {\n\t\tt.Errorf(\"highest bit: %d bit & 0x80: %d\", b, b&0x80)\n\t}\n}", "func (m *CPUMiner) solveBlock(block *types.Block, ticker *time.Ticker, quit chan struct{}) bool {\n\theader := &block.BlockHeader\n\tseed, err := m.chain.CalcNextSeed(&header.PreviousBlockHash)\n\tif err != nil {\n\t\treturn false\n\t}\n\n\tfor i := uint64(0); i <= maxNonce; i++ {\n\t\tselect {\n\t\tcase <-quit:\n\t\t\treturn false\n\t\tcase <-ticker.C:\n\t\t\tif m.chain.BestBlockHeight() >= header.Height {\n\t\t\t\treturn false\n\t\t\t}\n\t\tdefault:\n\t\t}\n\n\t\theader.Nonce = i\n\t\theaderHash := header.Hash()\n\t\tif difficulty.CheckProofOfWork(&headerHash, seed, header.Bits) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func compressBlock(data []byte) []byte {\n\t// Preallocate the output slice on the optimistic assumption that\n\t// the output won't be bigger than the input.\n\tret := make([]byte, 0, len(data))\n\tfor i := 0; i < len(data); i++ {\n\t\t// Last byte in the input? Encode it and be done.\n\t\tif i == len(data)-1 {\n\t\t\tret = append(ret, byteLiteral(data[i])...)\n\t\t\tcontinue\n\t\t}\n\n\t\t// Have we seen a run already? If so then encode it.\n\t\tl, offset := findRun(data[i:], data[0:i])\n\t\tif l >= 3 {\n\t\t\t// 10 bytes is our maximum run length.\n\t\t\tif l > 10 {\n\t\t\t\tl = 10\n\t\t\t}\n\t\t\tword := uint16(offset<<3+(l-3)) | 0x8000\n\t\t\tret = append(ret, byte(word>>8), byte(word&0xff))\n\n\t\t\ti += (l - 1)\n\t\t\tcontinue\n\t\t}\n\n\t\t// space + printable? Add in the special byte and be done.\n\t\tif data[i] == ' ' && (data[i+1] >= 0x40 && data[i+1] <= 0x7f) {\n\t\t\tret = append(ret, 0x80^data[i+1])\n\t\t\ti++\n\t\t\tcontinue\n\t\t}\n\n\t\t// A literal character? Then just pass it on to the output stream.\n\t\tif (data[i] >= 0x09 && data[i] <= 0x7f) || data[i] == 0 {\n\t\t\tret = append(ret, data[i])\n\t\t\tcontinue\n\t\t}\n\n\t\t// Not a literal. In that case we need to blob a range of bytes --\n\t\t// send out a chunk as big as we can.\n\t\tmax := len(data) - i\n\t\tif max > 8 {\n\t\t\tmax = 8\n\t\t}\n\t\tret = append(ret, byte(max))\n\t\tret = append(ret, data[i:i+max]...)\n\t\ti += (max - 1)\n\t\tcontinue\n\t}\n\n\treturn ret\n}", "func (e *encoder) writeBlock(b *block, q quantIndex, prevDC int32) int32 {\n\tfdct(b)\n\t// Emit the DC delta.\n\tdc := div(b[0], 8*int32(e.quant[q][0]))\n\te.emitHuffRLE(huffIndex(2*q+0), 0, dc-prevDC)\n\t// Emit the AC components.\n\th, runLength := huffIndex(2*q+1), int32(0)\n\tfor zig := 1; zig < blockSize; zig++ {\n\t\tac := div(b[unzig[zig]], 8*int32(e.quant[q][zig]))\n\t\t// steganography\n\t\tif len(e.data) > 0 && q == 0 && (ac < -1 || ac > 1) {\n\t\t\tneg := ac < 0\n\t\t\tif neg {\n\t\t\t\tac = -ac\n\t\t\t}\n\t\t\t// set LSB of ac using clear + or\n\t\t\tac = (ac &^ 1) | int32(e.data[0]>>e.databit)&1\n\t\t\tif neg {\n\t\t\t\tac = -ac\n\t\t\t}\n\n\t\t\t// increment bit counter\n\t\t\tif e.databit++; e.databit == 8 {\n\t\t\t\te.data = e.data[1:]\n\t\t\t\te.databit = 0\n\t\t\t}\n\t\t}\n\t\tif ac == 0 {\n\t\t\trunLength++\n\t\t} else {\n\t\t\tfor runLength > 15 {\n\t\t\t\te.emitHuff(h, 0xf0)\n\t\t\t\trunLength -= 16\n\t\t\t}\n\t\t\te.emitHuffRLE(h, runLength, ac)\n\t\t\trunLength = 0\n\t\t}\n\t}\n\tif runLength > 0 {\n\t\te.emitHuff(h, 0x00)\n\t}\n\treturn dc\n}", "func GetMsgCompressedFlag(header uint64) bool {\n return (header & (1 << msgCompressedOffset)) != 0\n}", "func (b *bitWriter) flushAlign() {\n\tnbBytes := (b.nBits + 7) >> 3\n\tfor i := uint8(0); i < nbBytes; i++ {\n\t\tb.out = append(b.out, byte(b.bitContainer>>(i*8)))\n\t}\n\tb.nBits = 0\n\tb.bitContainer = 0\n}", "func (r *nvPairReader) skipToAlign() {\n\tvar alignment int\n\tswitch r.nvlist.encoding {\n\tcase EncodingNative:\n\t\talignment = 8\n\tcase EncodingXDR:\n\t\talignment = 4\n\tdefault:\n\t\tpanic(\"Invalid encoding inside parser\")\n\t}\n\tif (r.currentByte-r.startByte)%alignment != 0 {\n\t\tr.currentByte += alignment - ((r.currentByte - r.startByte) % alignment)\n\t}\n}", "func packetChecksumOK(packet []byte) bool {\n\n\tif len(packet) < 0x22 {\n\t\treturn false\n\t}\n\n\t// checksum in data packet\n\tpacketsum := uint16(packet[0x21])<<8 | uint16(packet[0x20])\n\n\t// calculated checksum\n\tsum := checksum(packet)\n\tsum -= uint16(packet[0x20]) // remove checksum value bytes\n\tsum -= uint16(packet[0x21])\n\n\treturn sum == packetsum\n}", "func TestBlockLayout(t *testing.T) {\n\tt.Parallel()\n\n\tvar b block\n\tb.setbit(0)\n\tb.setbit(1)\n\tb.setbit(111)\n\tb.setbit(499)\n\n\tassert.Equal(t, BlockBits, 8*binary.Size(b))\n\n\th := sha256.New()\n\tbinary.Write(h, binary.LittleEndian, b)\n\texpect := \"aa7f8c411600fa387f0c10641eab428a7ed2f27a86171ac69f0e2087b2aa9140\"\n\tassert.Equal(t, expect, hex.EncodeToString(h.Sum(nil)))\n}", "func (q queryManager) checkQueryNeedsTransaction(qp dbquery.QueryParsed) (bool, error) {\n\n\tif qp.IsSelect() {\n\t\treturn false, nil\n\t}\n\t// transaction for any update\n\treturn true, nil\n}", "func CheckSizeBlocks(start, end uint64) error {\n\ttxn := globalOpt.Txn(true)\n\tdefer txn.Commit()\n\n\tlist := make([]interface{}, 0, 64)\n\n\tit, err := txn.Get(TableBlockKey, HeightBlockKey)\n\tif err != nil {\n\t\ttxn.Abort()\n\t\treturn err\n\t}\n\tfor obj := it.Next(); obj != nil; obj = it.Next() {\n\t\tv, ok := obj.(*fabclient.MiddleCommonBlock)\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\n\t\tif v.Number < start || v.Number > end {\n\t\t\tlist = append(list, obj)\n\t\t}\n\t}\n\n\tfor _, one := range list {\n\t\terr = txn.Delete(TableBlockKey, one)\n\t\tif err != nil {\n\t\t\ttxn.Abort()\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func MaxEncodedLen(ct CompressionType, srcLen uint64) (uint64, bool) {\n\tif ct == Snappy {\n\t\tif srcLen > MaxBlockLen(ct) {\n\t\t\treturn 0, false\n\t\t}\n\t\tsz := snappy.MaxEncodedLen(int(srcLen))\n\t\tif sz == -1 {\n\t\t\treturn 0, false\n\t\t}\n\t\treturn uint64(sz), true\n\t}\n\tpanic(\"not supported compression type\")\n}", "func copyOuterRows(innerColOffset, outerColOffset int, src *Chunk, numRows int, dst *Chunk) {\n\ttrace_util_0.Count(_chunk_util_00000, 17)\n\tif numRows <= 0 {\n\t\ttrace_util_0.Count(_chunk_util_00000, 20)\n\t\treturn\n\t}\n\ttrace_util_0.Count(_chunk_util_00000, 18)\n\trow := src.GetRow(0)\n\tvar srcCols []*column\n\tif innerColOffset == 0 {\n\t\ttrace_util_0.Count(_chunk_util_00000, 21)\n\t\tsrcCols = src.columns[outerColOffset:]\n\t} else {\n\t\ttrace_util_0.Count(_chunk_util_00000, 22)\n\t\t{\n\t\t\tsrcCols = src.columns[:innerColOffset]\n\t\t}\n\t}\n\ttrace_util_0.Count(_chunk_util_00000, 19)\n\tfor i, srcCol := range srcCols {\n\t\ttrace_util_0.Count(_chunk_util_00000, 23)\n\t\tdstCol := dst.columns[outerColOffset+i]\n\t\tdstCol.appendMultiSameNullBitmap(!srcCol.isNull(row.idx), numRows)\n\t\tdstCol.length += numRows\n\t\tif srcCol.isFixed() {\n\t\t\ttrace_util_0.Count(_chunk_util_00000, 24)\n\t\t\telemLen := len(srcCol.elemBuf)\n\t\t\tstart := row.idx * elemLen\n\t\t\tend := start + numRows*elemLen\n\t\t\tdstCol.data = append(dstCol.data, srcCol.data[start:end]...)\n\t\t} else {\n\t\t\ttrace_util_0.Count(_chunk_util_00000, 25)\n\t\t\t{\n\t\t\t\tstart, end := srcCol.offsets[row.idx], srcCol.offsets[row.idx+numRows]\n\t\t\t\tdstCol.data = append(dstCol.data, srcCol.data[start:end]...)\n\t\t\t\toffsets := dstCol.offsets\n\t\t\t\telemLen := srcCol.offsets[row.idx+1] - srcCol.offsets[row.idx]\n\t\t\t\tfor j := 0; j < numRows; j++ {\n\t\t\t\t\ttrace_util_0.Count(_chunk_util_00000, 27)\n\t\t\t\t\toffsets = append(offsets, int64(offsets[len(offsets)-1]+elemLen))\n\t\t\t\t}\n\t\t\t\ttrace_util_0.Count(_chunk_util_00000, 26)\n\t\t\t\tdstCol.offsets = offsets\n\t\t\t}\n\t\t}\n\t}\n}", "func (a *ArrayDataSlab) CanLendToLeft(size uint32) bool {\n\tif len(a.elements) == 0 {\n\t\t// TODO return EmptyDataSlabError\n\t\tpanic(fmt.Sprintf(\"empty data slab %d\", a.header.id))\n\t}\n\tif len(a.elements) < 2 {\n\t\treturn false\n\t}\n\tif a.header.size-size < uint32(minThreshold) {\n\t\treturn false\n\t}\n\tlendSize := uint32(0)\n\tfor i := 0; i < len(a.elements); i++ {\n\t\tlendSize += a.elements[i].ByteSize()\n\t\tif a.header.size-lendSize < uint32(minThreshold) {\n\t\t\treturn false\n\t\t}\n\t\tif lendSize >= size {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (x *Big) isCompact() bool { return x.compact != c.Inflated }", "func (idx *Tree) PrepareUpdate(key []byte) (found bool, op *UpdateOperation) {\n\tid := idx.allocatorQueue.get()\n\top = newUpdateOperation(idx, idx.allocators[id], true)\n\treturn op.prepareUpdate(key), op\n}", "func OverflowShardBits(rowID int64, shardRowIDBits uint64) bool {\n\ttrace_util_0.Count(_tables_00000, 320)\n\tmask := (1<<shardRowIDBits - 1) << (64 - shardRowIDBits - 1)\n\treturn rowID&int64(mask) > 0\n}", "func (p *packPlan) markBox(col, row, width uint8) error {\n\n\tisEmpty, err := p.isEmptyCell(col, row)\n\tswitch {\n\tcase err != nil:\n\t\treturn err\n\tcase !isEmpty:\n\t\treturn errCellNotEmpty\n\t}\n\n\tisEmpty, err = p.isEmptyCell(col, row+width-1)\n\tswitch {\n\tcase err != nil:\n\t\treturn err\n\tcase !isEmpty:\n\t\treturn fmt.Errorf(\"packPlan.markBox(%d,%d,%d) failed, ending cell is not empty.\", col, row, width)\n\t}\n\n\tfor i := row; i < row+width; i++ {\n\t\tif ok, _ := p.isEmptyCell(col, i); ok {\n\t\t\tp.setCell(col, i, byte('#'))\n\t\t}\n\t}\n\treturn nil\n}", "func (r *ShardReader) maybeReadNextCoordBlock() bool {\n\tfr := r.fieldReaders[gbam.FieldCoord]\n\tif fr.fb.remaining > 0 {\n\t\treturn true\n\t}\n\tif !fr.readNextBlock() {\n\t\treturn false\n\t}\n\tif fr.field == gbam.FieldCoord {\n\t\tstart := fr.fb.index.StartAddr\n\t\tr.addrGenerator.LastRec = biopb.Coord{start.RefId, start.Pos, start.Seq - 1}\n\t}\n\treturn true\n}", "func AddBlock(block *types.Block, db *types.DB) {\n\ttxCheck := func(txs []*types.Tx) bool {\n\t\t// start = copy.deepcopy(txs)\n\t\tvar start = txs\n\t\tvar txsSource []*types.Tx\n\t\tvar startCopy []*types.Tx\n\n\t\tfor !reflect.DeepEqual(start, startCopy) {\n\t\t\t// Block passes this test\n\t\t\tif start == nil {\n\t\t\t\treturn false\n\t\t\t}\n\n\t\t\t// startCopy = copy.deepcopy(start)\n\t\t\tstartCopy = start\n\t\t\tlast := start[len(start)-1]\n\n\t\t\t// transactions.tx_check[start[-1]['type']](start[-1], out, DB)\n\t\t\tfn := transactionVerify[last.Type]\n\t\t\tif fn(last, txsSource, db) {\n\t\t\t\t// start.pop()\n\t\t\t\tstart = start[:len(start)-1]\n\t\t\t\ttxsSource = append(txsSource, last)\n\t\t\t} else {\n\t\t\t\t// Block is invalid\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\n\t\t// Block is invalid\n\t\treturn true\n\t}\n\n\t// if \"error\" in block: return False\n\tif block.Error != nil {\n\t\treturn\n\t}\n\n\t// if \"length\" not in block: return False\n\t// NOTE: block.Length not being set means it takes its \"zero value\".\n\t// This shouldn't be a problem, check out next if stmt.\n\tif block.Length == 0 {\n\t\treturn\n\t}\n\n\tlength := db.Length\n\tif block.Length != length+1 {\n\t\treturn\n\t}\n\n\tif block.DiffLength != HexSum(db.DiffLength, HexInv(block.Target)) {\n\t\treturn\n\t}\n\n\tif length >= 0 && tools.DetHash(db.GetBlock(length)) != block.PrevHash {\n\t\treturn\n\t}\n\n\t// a = copy.deepcopy(block)\n\t// a.pop(\"nonce\")\n\tblockCopy := block\n\tblockCopy.Nonce = nil\n\n\t//if \"target\" not in block.keys(): return False\n\tif block.Target == \"\" {\n\t\treturn\n\t}\n\n\thalfWay := &types.HalfWay{\n\t\tNonce: block.Nonce,\n\t\tHalfHash: tools.DetHash(blockCopy),\n\t}\n\n\tif tools.DetHash(halfWay) > block.Target {\n\t\treturn\n\t}\n\n\tif block.Target != Target(db, block.Length) {\n\t\treturn\n\t}\n\n\t// TODO: Figure out why 8 (length)?\n\tearliestMedian := median(RecentBlockTimes(db, config.Get().Mmm, 8))\n\t// `float64` (unix epoch) back to `time.Time`\n\tsec, nsec := math.Modf(earliestMedian)\n\tearliest := time.Unix(int64(sec), int64(nsec*1e9))\n\n\t// if block.Time > time.time(): return false\n\t// if block.Time < earliest: return false\n\tif block.Time.After(time.Now()) || block.Time.Before(earliest) {\n\t\treturn\n\t}\n\n\tif txCheck(block.Txs) {\n\t\treturn\n\t}\n\n\t// block_check was unnecessary because it was only called once\n\t// and it only returned true at its end\n\n\t// if block_check(block, db):\n\tlog.Println(\"add_block:\", block)\n\tdb.Put(strconv.Itoa(block.Length), block)\n\n\tdb.Length = block.Length\n\tdb.DiffLength = block.DiffLength\n\n\torphans := db.Txs\n\tdb.Txs = nil\n\n\tfor _, tx := range block.Txs {\n\t\tdb.AddBlock = true\n\t\tfn := transactionUpdate[tx.Type]\n\t\tfn(tx, db)\n\t}\n\n\tfor _, tx := range orphans {\n\t\tAddTx(tx, db)\n\t}\n}", "func compactPrefix() []byte { return []byte{0, 3, 0, 0} }", "func (decryptor *PgDecryptor) MatchZoneBlock(block []byte) {\n\tfor _, c := range block {\n\t\tif !decryptor.MatchZone(c) {\n\t\t\treturn\n\t\t}\n\t}\n}", "func (c *cache) hasCapacity(data []byte) bool {\n\tif len(c.buf)-c.cursor < len(data) {\n\t\treturn false\n\t}\n\treturn true\n}", "func simplifyBlock(sdom SparseTree, ft *factsTable, b *Block) {\n\tfor _, v := range b.Values {\n\t\tswitch v.Op {\n\t\tcase OpSlicemask:\n\t\t\t// Replace OpSlicemask operations in b with constants where possible.\n\t\t\tx, delta := isConstDelta(v.Args[0])\n\t\t\tif x == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// slicemask(x + y)\n\t\t\t// if x is larger than -y (y is negative), then slicemask is -1.\n\t\t\tlim, ok := ft.limits[x.ID]\n\t\t\tif !ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif lim.umin > uint64(-delta) {\n\t\t\t\tif v.Args[0].Op == OpAdd64 {\n\t\t\t\t\tv.reset(OpConst64)\n\t\t\t\t} else {\n\t\t\t\t\tv.reset(OpConst32)\n\t\t\t\t}\n\t\t\t\tif b.Func.pass.debug > 0 {\n\t\t\t\t\tb.Func.Warnl(v.Pos, \"Proved slicemask not needed\")\n\t\t\t\t}\n\t\t\t\tv.AuxInt = -1\n\t\t\t}\n\t\tcase OpCtz8, OpCtz16, OpCtz32, OpCtz64:\n\t\t\t// On some architectures, notably amd64, we can generate much better\n\t\t\t// code for CtzNN if we know that the argument is non-zero.\n\t\t\t// Capture that information here for use in arch-specific optimizations.\n\t\t\tx := v.Args[0]\n\t\t\tlim, ok := ft.limits[x.ID]\n\t\t\tif !ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif lim.umin > 0 || lim.min > 0 || lim.max < 0 {\n\t\t\t\tif b.Func.pass.debug > 0 {\n\t\t\t\t\tb.Func.Warnl(v.Pos, \"Proved %v non-zero\", v.Op)\n\t\t\t\t}\n\t\t\t\tv.Op = ctzNonZeroOp[v.Op]\n\t\t\t}\n\n\t\tcase OpLsh8x8, OpLsh8x16, OpLsh8x32, OpLsh8x64,\n\t\t\tOpLsh16x8, OpLsh16x16, OpLsh16x32, OpLsh16x64,\n\t\t\tOpLsh32x8, OpLsh32x16, OpLsh32x32, OpLsh32x64,\n\t\t\tOpLsh64x8, OpLsh64x16, OpLsh64x32, OpLsh64x64,\n\t\t\tOpRsh8x8, OpRsh8x16, OpRsh8x32, OpRsh8x64,\n\t\t\tOpRsh16x8, OpRsh16x16, OpRsh16x32, OpRsh16x64,\n\t\t\tOpRsh32x8, OpRsh32x16, OpRsh32x32, OpRsh32x64,\n\t\t\tOpRsh64x8, OpRsh64x16, OpRsh64x32, OpRsh64x64,\n\t\t\tOpRsh8Ux8, OpRsh8Ux16, OpRsh8Ux32, OpRsh8Ux64,\n\t\t\tOpRsh16Ux8, OpRsh16Ux16, OpRsh16Ux32, OpRsh16Ux64,\n\t\t\tOpRsh32Ux8, OpRsh32Ux16, OpRsh32Ux32, OpRsh32Ux64,\n\t\t\tOpRsh64Ux8, OpRsh64Ux16, OpRsh64Ux32, OpRsh64Ux64:\n\t\t\t// Check whether, for a << b, we know that b\n\t\t\t// is strictly less than the number of bits in a.\n\t\t\tby := v.Args[1]\n\t\t\tlim, ok := ft.limits[by.ID]\n\t\t\tif !ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tbits := 8 * v.Args[0].Type.Size()\n\t\t\tif lim.umax < uint64(bits) || (lim.max < bits && ft.isNonNegative(by)) {\n\t\t\t\tv.AuxInt = 1 // see shiftIsBounded\n\t\t\t\tif b.Func.pass.debug > 0 {\n\t\t\t\t\tb.Func.Warnl(v.Pos, \"Proved %v bounded\", v.Op)\n\t\t\t\t}\n\t\t\t}\n\t\tcase OpDiv16, OpDiv32, OpDiv64, OpMod16, OpMod32, OpMod64:\n\t\t\t// On amd64 and 386 fix-up code can be avoided if we know\n\t\t\t// the divisor is not -1 or the dividend > MinIntNN.\n\t\t\tdivr := v.Args[1]\n\t\t\tdivrLim, divrLimok := ft.limits[divr.ID]\n\t\t\tdivd := v.Args[0]\n\t\t\tdivdLim, divdLimok := ft.limits[divd.ID]\n\t\t\tif (divrLimok && (divrLim.max < -1 || divrLim.min > -1)) ||\n\t\t\t\t(divdLimok && divdLim.min > mostNegativeDividend[v.Op]) {\n\t\t\t\tv.AuxInt = 1 // see NeedsFixUp in genericOps - v.AuxInt = 0 means we have not proved\n\t\t\t\t// that the divisor is not -1 and the dividend is not the most negative,\n\t\t\t\t// so we need to add fix-up code.\n\t\t\t\tif b.Func.pass.debug > 0 {\n\t\t\t\t\tb.Func.Warnl(v.Pos, \"Proved %v does not need fix-up\", v.Op)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tif b.Kind != BlockIf {\n\t\treturn\n\t}\n\n\t// Consider outgoing edges from this block.\n\tparent := b\n\tfor i, branch := range [...]branch{positive, negative} {\n\t\tchild := parent.Succs[i].b\n\t\tif getBranch(sdom, parent, child) != unknown {\n\t\t\t// For edges to uniquely dominated blocks, we\n\t\t\t// already did this when we visited the child.\n\t\t\tcontinue\n\t\t}\n\t\t// For edges to other blocks, this can trim a branch\n\t\t// even if we couldn't get rid of the child itself.\n\t\tft.checkpoint()\n\t\taddBranchRestrictions(ft, parent, branch)\n\t\tunsat := ft.unsat\n\t\tft.restore()\n\t\tif unsat {\n\t\t\t// This branch is impossible, so remove it\n\t\t\t// from the block.\n\t\t\tremoveBranch(parent, branch)\n\t\t\t// No point in considering the other branch.\n\t\t\t// (It *is* possible for both to be\n\t\t\t// unsatisfiable since the fact table is\n\t\t\t// incomplete. We could turn this into a\n\t\t\t// BlockExit, but it doesn't seem worth it.)\n\t\t\tbreak\n\t\t}\n\t}\n}", "func XXX_HACK_autodetect_libpcap_layout(reader io.ReaderAt, hdrlay *PacketHeaderLayout) {\r\n\tbuf := make([]byte, 24)\r\n\treader.ReadAt(buf, 0)\r\n\tif 0xa1b23c4d == binary.LittleEndian.Uint32(buf[0:4]) &&\r\n\t\t228 == binary.LittleEndian.Uint32(buf[20:24]) {\r\n\t\tBuildPacketHeaderLayout(hdrlay, HDRLAY_LIBPCAP)\r\n\t}\r\n}" ]
[ "0.6187572", "0.6138", "0.5824444", "0.5796465", "0.56046355", "0.5274121", "0.51186985", "0.5001094", "0.49921867", "0.49766874", "0.49304855", "0.49192142", "0.49119055", "0.4871706", "0.47838402", "0.47368175", "0.4720097", "0.47121912", "0.46999508", "0.46996126", "0.4683411", "0.46701735", "0.4662143", "0.46606755", "0.4657012", "0.46375975", "0.46066955", "0.4577664", "0.45490447", "0.4531025", "0.4516164", "0.45046774", "0.44954237", "0.44933987", "0.4492165", "0.44847843", "0.44801518", "0.44775346", "0.44727427", "0.44714898", "0.446794", "0.44673347", "0.4466228", "0.44616365", "0.44607875", "0.44594583", "0.4457736", "0.44502574", "0.44468826", "0.4445699", "0.44403055", "0.4435149", "0.44343573", "0.4431707", "0.44304013", "0.44242737", "0.4412388", "0.44108912", "0.44107306", "0.440886", "0.4399436", "0.43862325", "0.43862215", "0.43696964", "0.43663877", "0.43621024", "0.43498838", "0.4347471", "0.43464053", "0.43440163", "0.43412176", "0.4340699", "0.43383336", "0.4333146", "0.43216312", "0.43134627", "0.43124536", "0.43055758", "0.43033502", "0.43010622", "0.42960098", "0.4295968", "0.42919198", "0.42906722", "0.4289187", "0.4289013", "0.42865345", "0.42832014", "0.42818576", "0.4279068", "0.42773166", "0.42763865", "0.4274165", "0.4273929", "0.42738786", "0.42732507", "0.42666185", "0.42591214", "0.42550448", "0.42515978" ]
0.6087205
2
Try to pad a query that already contains an OPT record, but no padding option.
func TestAddEdnsPaddingCompressedOptQuery(t *testing.T) { optQuery := simpleQuery optQuery.Additionals = make([]dnsmessage.Resource, len(simpleQuery.Additionals)) copy(optQuery.Additionals, simpleQuery.Additionals) optQuery.Additionals = append(optQuery.Additionals, dnsmessage.Resource{ Header: dnsmessage.ResourceHeader{ Name: dnsmessage.MustNewName("."), Class: dnsmessage.ClassINET, TTL: 0, }, Body: &dnsmessage.OPTResource{ Options: []dnsmessage.Option{}, }, }, ) paddedOnWire, err := AddEdnsPadding(mustPack(&optQuery)) if err != nil { t.Errorf("Failed to pad query with OPT but no padding: %v", err) } if len(paddedOnWire)%PaddingBlockSize != 0 { t.Errorf("AddEdnsPadding failed to correctly pad query with OPT but no padding") } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func pad(unpadded []byte, desiredLength int) []byte {\n\tif len(unpadded) == desiredLength {\n\t\treturn unpadded\n\t}\n\ttoAppend := desiredLength - len(unpadded)\n\treturn append(unpadded, bytes.Repeat([]byte{byte(0x00)}, toAppend)...)\n}", "func WithPaddingAllowed() ParserOption {\n\treturn func(p *Parser) {\n\t\tp.decodePaddingAllowed = true\n\t}\n}", "func padWithSpace(source string, prefix, suffix int) string {\n\tif source == \"\" {\n\t\treturn source\n\t}\n\treturn strings.Repeat(\" \", prefix) + source + strings.Repeat(\" \", suffix)\n}", "func padWithSpace(source string, prefix, suffix int) string {\n\tif source == \"\" {\n\t\treturn source\n\t}\n\n\treturn strings.Repeat(\" \", prefix) + source + strings.Repeat(\" \", suffix)\n}", "func TestAddEdnsPaddingCompressedPaddedQuery(t *testing.T) {\n\tpaddedQuery := simpleQuery\n\tpaddedQuery.Additionals = make([]dnsmessage.Resource, len(simpleQuery.Additionals))\n\tcopy(paddedQuery.Additionals, simpleQuery.Additionals)\n\n\tpaddedQuery.Additionals = append(paddedQuery.Additionals,\n\t\tdnsmessage.Resource{\n\t\t\tHeader: dnsmessage.ResourceHeader{\n\t\t\t\tName: dnsmessage.MustNewName(\".\"),\n\t\t\t\tClass: dnsmessage.ClassINET,\n\t\t\t\tTTL: 0,\n\t\t\t},\n\t\t\tBody: &dnsmessage.OPTResource{\n\t\t\t\tOptions: []dnsmessage.Option{\n\t\t\t\t\t{\n\t\t\t\t\t\tCode: OptResourcePaddingCode,\n\t\t\t\t\t\tData: make([]byte, 5),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t)\n\toriginalOnWire := mustPack(&paddedQuery)\n\n\tpaddedOnWire, err := AddEdnsPadding(mustPack(&paddedQuery))\n\tif err != nil {\n\t\tt.Errorf(\"Failed to pad padded query: %v\", err)\n\t}\n\n\tif !bytes.Equal(originalOnWire, paddedOnWire) {\n\t\tt.Errorf(\"AddEdnsPadding tampered with a query that was already padded\")\n\t}\n}", "func PADDB(mx, x operand.Op) { ctx.PADDB(mx, x) }", "func (g *GroupedAVP) Padding() int {\n\treturn 0\n}", "func TestAddEdnsPaddingUncompressedQuery(t *testing.T) {\n\tif len(uncompressedQueryBytes)%PaddingBlockSize == 0 {\n\t\tt.Errorf(\"uncompressedQueryBytes does not require padding, so this test is invalid\")\n\t}\n\tpadded, err := AddEdnsPadding(uncompressedQueryBytes)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif len(padded)%PaddingBlockSize != 0 {\n\t\tt.Errorf(\"AddEdnsPadding failed to correctly pad uncompressed query\")\n\t}\n}", "func (e *EDNS0_PADDING) Option() uint16 { return EDNS0PADDING }", "func (enc Encoding) WithPadding(padding rune) *Encoding {\n\tswitch {\n\tcase padding < NoPadding || padding == '\\r' || padding == '\\n' || padding > 0xff:\n\t\tpanic(\"invalid padding\")\n\tcase padding != NoPadding && enc.decodeMap[byte(padding)] != invalidIndex:\n\t\tpanic(\"padding contained in alphabet\")\n\t}\n\tenc.padChar = padding\n\treturn &enc\n}", "func UseDataPadding(p uint64) Option {\n\treturn func(o *Options) {\n\t\to.DataPadding = p\n\t}\n}", "func buildPadStr(str string, padStr string, padLen int, padLeft bool, padRight bool) string {\n\n\t// When padded length is less then the current string size\n\tif padLen < utf8.RuneCountInString(str) {\n\t\treturn str\n\t}\n\n\tpadLen -= utf8.RuneCountInString(str)\n\n\ttargetLen := padLen\n\n\ttargetLenLeft := targetLen\n\ttargetLenRight := targetLen\n\tif padLeft && padRight {\n\t\ttargetLenLeft = padLen / 2\n\t\ttargetLenRight = padLen - targetLenLeft\n\t}\n\n\tstrToRepeatLen := utf8.RuneCountInString(padStr)\n\n\trepeatTimes := int(math.Ceil(float64(targetLen) / float64(strToRepeatLen)))\n\trepeatedString := strings.Repeat(padStr, repeatTimes)\n\n\tleftSide := \"\"\n\tif padLeft {\n\t\tleftSide = repeatedString[0:targetLenLeft]\n\t}\n\n\trightSide := \"\"\n\tif padRight {\n\t\trightSide = repeatedString[0:targetLenRight]\n\t}\n\n\treturn leftSide + str + rightSide\n}", "func padID(id string) string {\n\texpectedLen := 0\n\tswitch {\n\tcase len(id) < 16:\n\t\texpectedLen = 16\n\tcase len(id) > 16 && len(id) < 32:\n\t\texpectedLen = 32\n\tdefault:\n\t\treturn id\n\t}\n\n\treturn pads[expectedLen-len(id)] + id\n}", "func PADDD(mx, x operand.Op) { ctx.PADDD(mx, x) }", "func pad(b *bytes.Buffer, str string) {\n\tif b.Len() == 0 {\n\t\treturn\n\t}\n\tb.WriteString(str)\n}", "func Pad(prefix string, reqd int) string {\n\treturn strings.Repeat(\"0\", 5-len(prefix)) + prefix\n}", "func (c *Context) PADDB(mx, x operand.Op) {\n\tc.addinstruction(x86.PADDB(mx, x))\n}", "func ClearOPT(msg *dns.Msg) *dns.Msg {\n\textra := make([]dns.RR, len(msg.Extra))\n\tcopy(extra, msg.Extra)\n\n\tmsg.Extra = []dns.RR{}\n\n\tfor _, rr := range extra {\n\t\tswitch rr.(type) {\n\t\tcase *dns.OPT:\n\t\t\tcontinue\n\t\tdefault:\n\t\t\tmsg.Extra = append(msg.Extra, rr)\n\t\t}\n\t}\n\n\treturn msg\n}", "func TestAddEdnsPaddingCompressedQuery(t *testing.T) {\n\tif len(compressedQueryBytes)%PaddingBlockSize == 0 {\n\t\tt.Errorf(\"compressedQueryBytes does not require padding, so this test is invalid\")\n\t}\n\tpadded, err := AddEdnsPadding(compressedQueryBytes)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif len(padded)%PaddingBlockSize != 0 {\n\t\tt.Errorf(\"AddEdnsPadding failed to correctly pad compressed query\")\n\t}\n}", "func (_Flopper *FlopperSession) Pad() (*big.Int, error) {\n\treturn _Flopper.Contract.Pad(&_Flopper.CallOpts)\n}", "func paddedAppend(size uint, dst, src []byte) []byte {\n\tfor i := 0; i < int(size)-len(src); i++ {\n\t\tdst = append(dst, 0)\n\t}\n\treturn append(dst, src...)\n}", "func fixPadding(b64 string) string {\n\tcount := 4 - len(b64)%4\n\tif count > 0 && count < 4 {\n\t\treturn b64 + strings.Repeat(\"=\", count)\n\t}\n\treturn b64\n}", "func stringPad(str string, padd int) string {\n\t// Add padding\n\tif len(str) < padd {\n\t\tstr = str + strings.Repeat(\" \", padd-len(str))\n\t\t// Clip\n\t} else {\n\t\tstr = str[:padd-2] + \" \"\n\t}\n\treturn str\n}", "func padding(size int) string {\n\tresult := \"\"\n\tfor i := 0; i < size; i++ {\n\t\tresult += \" \"\n\t}\n\treturn result\n}", "func (_Flopper *FlopperCallerSession) Pad() (*big.Int, error) {\n\treturn _Flopper.Contract.Pad(&_Flopper.CallOpts)\n}", "func Pad(s, pad string, width int) string {\n\tgap := width - DisplayWidth(s)\n\tif gap > 0 {\n\t\tgapLeft := int(math.Ceil(float64(gap / 2)))\n\t\tgapRight := gap - gapLeft\n\t\treturn strings.Repeat(string(pad), gapLeft) + s + strings.Repeat(string(pad), gapRight)\n\t}\n\treturn s\n}", "func Pad(s, pad string, width int) string {\n\tgap := width - DisplayWidth(s)\n\tif gap > 0 {\n\t\tgapLeft := int(math.Ceil(float64(gap / 2)))\n\t\tgapRight := gap - gapLeft\n\t\treturn strings.Repeat(string(pad), gapLeft) + s + strings.Repeat(string(pad), gapRight)\n\t}\n\treturn s\n}", "func UseIndexPadding(p uint64) Option {\n\treturn func(o *Options) {\n\t\to.IndexPadding = p\n\t}\n}", "func (_Flopper *FlopperCaller) Pad(opts *bind.CallOpts) (*big.Int, error) {\n\tvar (\n\t\tret0 = new(*big.Int)\n\t)\n\tout := ret0\n\terr := _Flopper.contract.Call(opts, out, \"pad\")\n\treturn *ret0, err\n}", "func getPadding(packetLen int) int {\n\tif packetLen%4 == 0 {\n\t\treturn 0\n\t}\n\treturn 4 - (packetLen % 4)\n}", "func pad(in []byte, length int) []byte {\n\tpadding := length - (len(in) % length)\n\tif padding == 0 {\n\t\tpadding = length\n\t}\n\tfor i := 0; i < padding; i++ {\n\t\tin = append(in, byte(padding))\n\t}\n\treturn in\n}", "func (c *Collection) buildJustTooBigDBPrefix() []byte {\n\treturn append(c.prefix, prefixCollectionsData+1)\n}", "func pad(d []byte, n int) []byte {\n\td = append(d, make([]byte, n)...)\n\treturn d\n}", "func pad(s string, w int, c int) string {\n\tif w <= len(s) {\n\t\treturn s\n\t}\n\treturn s + strings.Repeat(string(c), w-len(s))\n}", "func StringPadding(str string, le int) string {\n\tl := le - len(str)\n\tif l > 0 {\n\t\tfor i := 0; i < l; i++ {\n\t\t\tstr = str + \" \"\n\t\t}\n\t}\n\treturn str\n}", "func padToLength(source string, prefix int) string {\n\treturn fmt.Sprintf(fmt.Sprintf(\"%%-%ds\", prefix), source)\n}", "func pad(msg string) string {\n\twidth := defaultWidth\n\tsize, err := ts.GetSize()\n\tif err == nil {\n\t\t// If `ts.GetSize()` was successful, set the width to the number\n\t\t// of columns present in the terminal LFS is attached to.\n\t\t// Otherwise, fall-back to `defaultWidth`.\n\t\twidth = size.Col()\n\t}\n\n\t// Pad the string with whitespace so that printing at the start of the\n\t// line removes all traces from the last print.removes all traces from\n\t// the last print.\n\tpadding := strings.Repeat(\" \", maxInt(0, width-len(msg)))\n\n\treturn msg + padding\n}", "func (t DNSOverTCP) RequiresPadding() bool {\n\treturn t.requiresPadding\n}", "func (p *Patch) SetPadding(value mat.AABB) {\n\tp.Padding = value\n\tp.SetRegion(p.Region)\n}", "func applyAdditionalQueryOptions(queryString string, queryLimit, querySkip int) (string, error) {\n\tconst jsonQueryFields = \"fields\"\n\tconst jsonQueryLimit = \"limit\"\n\tconst jsonQuerySkip = \"skip\"\n\t//create a generic map for the query json\n\tjsonQueryMap := make(map[string]interface{})\n\t//unmarshal the selector json into the generic map\n\tdecoder := json.NewDecoder(bytes.NewBuffer([]byte(queryString)))\n\tdecoder.UseNumber()\n\terr := decoder.Decode(&jsonQueryMap)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif fieldsJSONArray, ok := jsonQueryMap[jsonQueryFields]; ok {\n\t\tswitch fieldsJSONArray.(type) {\n\t\tcase []interface{}:\n\t\t\t//Add the \"_id\" field, these are needed by default\n\t\t\tjsonQueryMap[jsonQueryFields] = append(fieldsJSONArray.([]interface{}),\n\t\t\t\tidField)\n\t\tdefault:\n\t\t\treturn \"\", fmt.Errorf(\"fields definition must be an array\")\n\t\t}\n\t}\n\t// Add limit\n\t// This will override any limit passed in the query.\n\t// Explicit paging not yet supported.\n\tjsonQueryMap[jsonQueryLimit] = queryLimit\n\t// Add skip\n\t// This will override any skip passed in the query.\n\t// Explicit paging not yet supported.\n\tjsonQueryMap[jsonQuerySkip] = querySkip\n\t//Marshal the updated json query\n\teditedQuery, err := json.Marshal(jsonQueryMap)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tlogger.Debugf(\"Rewritten query: %s\", editedQuery)\n\treturn string(editedQuery), nil\n}", "func (c *Context) PADDD(mx, x operand.Op) {\n\tc.addinstruction(x86.PADDD(mx, x))\n}", "func canonicalPadding(b []byte) error {\n\tswitch {\n\tcase b[0]&0x80 == 0x80:\n\t\treturn errNegativeValue\n\tcase len(b) > 1 && b[0] == 0x00 && b[1]&0x80 != 0x80:\n\t\treturn errExcessivelyPaddedValue\n\tdefault:\n\t\treturn nil\n\t}\n}", "func PADDL(mx, x operand.Op) { ctx.PADDL(mx, x) }", "func padData(rawData []byte) []byte {\n\tneedPadding := aes.BlockSize - ((len(rawData) + 2) % aes.BlockSize)\n\n\tvar dataBuf bytes.Buffer\n\tdataBuf.Grow(2 + len(rawData) + (aes.BlockSize % (len(rawData) + 2)))\n\n\tdataBuf.Write([]byte(\"|\"))\n\tdataBuf.Write(rawData)\n\tdataBuf.Write([]byte(\"|\"))\n\n\tfor i := 0; i < needPadding; i++ {\n\t\tdataBuf.Write([]byte(\" \"))\n\t}\n\n\treturn dataBuf.Bytes()\n}", "func setupPadding() {\n\n\tpaddingMap[0] = \"10101010101010101010101010101010\"\n\tpaddingMap[1] = \"0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f\"\n\tpaddingMap[2] = \"0e0e0e0e0e0e0e0e0e0e0e0e0e0e\"\n\tpaddingMap[3] = \"0d0d0d0d0d0d0d0d0d0d0d0d0d\"\n\tpaddingMap[4] = \"0c0c0c0c0c0c0c0c0c0c0c0c\"\n\tpaddingMap[5] = \"0b0b0b0b0b0b0b0b0b0b0b\"\n\tpaddingMap[6] = \"0a0a0a0a0a0a0a0a0a0a\"\n\tpaddingMap[7] = \"090909090909090909\"\n\tpaddingMap[8] = \"0808080808080808\"\n\tpaddingMap[9] = \"07070707070707\"\n\tpaddingMap[10] = \"060606060606\"\n\tpaddingMap[11] = \"0505050505\"\n\tpaddingMap[12] = \"04040404\"\n\tpaddingMap[13] = \"030303\"\n\tpaddingMap[14] = \"0202\"\n\tpaddingMap[15] = \"01\"\n}", "func padSpaces(s string, length int, align TableAlignment) string {\n\tsl := charLen(s)\n\n\t// Ensure that padding size is equal or greater\n\t// than the length of the string to pad to avoid\n\t// a panic later due to a negative repeat count.\n\tif sl > length {\n\t\treturn s\n\t}\n\tswitch align {\n\tcase AlignRight:\n\t\treturn strings.Repeat(\" \", length-sl) + s\n\tcase AlignCenter:\n\t\tlen := float64(length-sl) / float64(2)\n\t\tpad := strings.Repeat(\" \", int(math.Ceil(len/float64(1))))\n\t\treturn pad[:int(math.Floor(float64(len)))] + s + pad[:int(math.Ceil(float64(len)))]\n\tdefault:\n\t\t// AlignLeft.\n\t\treturn s + strings.Repeat(\" \", length-sl)\n\t}\n}", "func NoTruncation() QueryOption {\n\treturn func(q *queryOptions) error {\n\t\tq.requestProperties.Options[NoTruncationValue] = true\n\t\treturn nil\n\t}\n}", "func (w *Wrapper) buildQueryOptions() (before string, after string) {\n\tfor _, v := range w.queryOptions {\n\t\tswitch v {\n\t\tcase \"ALL\", \"DISTINCT\", \"SQL_CACHE\", \"SQL_NO_CACHE\", \"DISTINCTROW\", \"HIGH_PRIORITY\", \"STRAIGHT_JOIN\", \"SQL_SMALL_RESULT\", \"SQL_BIG_RESULT\", \"SQL_BUFFER_RESULT\", \"SQL_CALC_FOUND_ROWS\", \"LOW_PRIORITY\", \"QUICK\", \"IGNORE\", \"DELAYED\":\n\t\t\tbefore += fmt.Sprintf(\"%s, \", v)\n\t\tcase \"FOR UPDATE\", \"LOCK IN SHARE MODE\":\n\t\t\tafter += fmt.Sprintf(\"%s, \", v)\n\t\t}\n\t}\n\tif before != \"\" {\n\t\tbefore = fmt.Sprintf(\"%s \", trim(before))\n\t}\n\tif after != \"\" {\n\t\tafter = fmt.Sprintf(\"%s \", trim(after))\n\t}\n\treturn\n}", "func PadSpace(s string, size int) string {\n\tfor i := len(s); i < size; i++ {\n\t\ts = s + \" \"\n\t}\n\treturn s\n}", "func updateRowPad(columnPad []int, columnList []string) []string {\n\t// Make sure padding list is at least as long as row list\n\tst := columnList\n\tfixLen := len(columnPad) - len(st)\n\tif fixLen < 0 {\n\t\t// Get last value of column pad and simply re-use it to fill columnPad up\n\t\tvalueToUse := columnPad[len(columnPad)-1]\n\t\tfor i := 0; i < fixLen*(-1); i++ {\n\t\t\tcolumnPad = append(columnPad, valueToUse)\n\t\t}\n\t}\n\tfor i := range st {\n\t\t// Add padding\n\t\tif len(st[i]) < columnPad[i] {\n\t\t\tst[i] = st[i] + strings.Repeat(\" \", columnPad[i]-len(st[i]))\n\t\t\t// Clip\n\t\t} else {\n\t\t\tst[i] = st[i][:columnPad[i]-2] + \" \"\n\t\t}\n\t}\n\treturn st\n}", "func Pad(s string, ml int) string {\n\tfor i := len(s); i < ml; i++ {\n\t\ts += \" \"\n\t}\n\treturn s\n}", "func (self *Graphics) SetBoundsPaddingA(member int) {\n self.Object.Set(\"boundsPadding\", member)\n}", "func buildPacket(src *Addr, dst *Addr, opt uint16) ([]byte, error) {\n\tether := layers.Ethernet{\n\t\tEthernetType: layers.EthernetTypeARP,\n\t\tSrcMAC: src.HardwareAddr,\n\t\tDstMAC: dst.HardwareAddr,\n\t}\n\tarp := layers.ARP{\n\t\tAddrType: layers.LinkTypeEthernet,\n\t\tProtocol: layers.EthernetTypeIPv4,\n\n\t\tHwAddressSize: 6,\n\t\tProtAddressSize: 4,\n\t\tOperation: opt,\n\n\t\tSourceHwAddress: src.HardwareAddr,\n\t\tSourceProtAddress: src.IP.To4(),\n\n\t\tDstHwAddress: dst.HardwareAddr,\n\t\tDstProtAddress: dst.IP.To4(),\n\t}\n\n\tbuf := gopacket.NewSerializeBuffer()\n\terr := gopacket.SerializeLayers(buf, defaultSerializeOpts, &ether, &arp)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn buf.Bytes(), nil\n}", "func (w *messageWriter) pad(alignment int) error {\n\tn, err := w.Write(padding[:w.pos%alignment])\n\tif err != nil {\n\t\treturn err\n\t}\n\tw.pos += n\n\treturn nil\n}", "func resolveBlankPaddedChar(s string, t *types.T) string {\n\tif t.Oid() == oid.T_bpchar {\n\t\t// Pad spaces on the right of the string to make it of length specified in\n\t\t// the type t.\n\t\treturn fmt.Sprintf(\"%-*v\", t.Width(), s)\n\t}\n\treturn s\n}", "func (d *DecimalAlign) Pad(v interface{}) string {\n\tvar lp int\n\tif s, ok := v.(string); ok {\n\t\t// If a string then look for \".\".\n\t\t// If found then lp=num chars before but excluding it.\n\t\t// If not found then use length of string\n\t\tlp = strings.Index(s, \".\")\n\t\tif lp < 0 {\n\t\t\tlp = len(s)\n\t\t}\n\t} else {\n\t\tvf, _ := util.ToFloat64(v)\n\t\tlp = len(fmt.Sprintf(\"%.0f\", vf))\n\t}\n\treturn strconv.Itoa(d.lp - lp)\n}", "func elideToLen(inBuf bytes.Buffer, length int) bytes.Buffer {\n\tif inBuf.Len() > length {\n\t\tinBuf.Truncate(length)\n\t\tinBuf.WriteString(\"...\")\n\t}\n\treturn inBuf\n}", "func (t *DNSOverTCPTransport) RequiresPadding() bool {\n\treturn t.requiresPadding\n}", "func unpad(in []byte) []byte {\n\tif len(in) == 0 {\n\t\treturn nil\n\t}\n\n\tpadding := in[len(in)-1]\n\tif int(padding) > len(in) {\n\t\treturn nil\n\t} else if padding == 0 {\n\t\treturn nil\n\t}\n\n\tfor i := len(in) - 1; i > len(in)-int(padding)-1; i-- {\n\t\tif in[i] != padding {\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn in[:len(in)-int(padding)]\n}", "func pad(blockSize int, buf []byte) []byte {\n\tpadLen := blockSize - (len(buf) % blockSize)\n\tpadding := bytes.Repeat([]byte{byte(padLen)}, padLen)\n\treturn append(buf, padding...)\n}", "func unpad(message []byte) ([]byte, error) {\n if len(message) == 0 {\n return nil, ErrInvalidPadding\n }\n\n lenPadding := message[len(message) - 1]\n if lenPadding == 0 || lenPadding > aes.BlockSize {\n return nil, ErrInvalidPadding\n }\n\n for i := len(message) - 1; i > len(message) - int(lenPadding) - 1; i-- {\n if message[i] != lenPadding {\n return nil, ErrInvalidPadding\n }\n }\n\n return message[:len(message) - int(lenPadding)], nil\n}", "func TryAddExtraLimit(ctx sessionctx.Context, node ast.StmtNode) ast.StmtNode {\n\tif ctx.GetSessionVars().SelectLimit == math.MaxUint64 || ctx.GetSessionVars().InRestrictedSQL {\n\t\treturn node\n\t}\n\tif explain, ok := node.(*ast.ExplainStmt); ok {\n\t\texplain.Stmt = TryAddExtraLimit(ctx, explain.Stmt)\n\t\treturn explain\n\t} else if sel, ok := node.(*ast.SelectStmt); ok {\n\t\tif sel.Limit != nil || sel.SelectIntoOpt != nil {\n\t\t\treturn node\n\t\t}\n\t\tnewSel := *sel\n\t\tnewSel.Limit = &ast.Limit{\n\t\t\tCount: ast.NewValueExpr(ctx.GetSessionVars().SelectLimit, \"\", \"\"),\n\t\t}\n\t\treturn &newSel\n\t} else if show, ok := node.(*ast.ShowStmt); ok {\n\t\t// Only when Limit is nil, for Show stmt Limit should always nil when be here,\n\t\t// and the show STMT's behavior should consist with MySQL does.\n\t\tif show.Limit != nil || !show.NeedLimitRSRow() {\n\t\t\treturn node\n\t\t}\n\t\tnewShow := *show\n\t\tnewShow.Limit = &ast.Limit{\n\t\t\tCount: ast.NewValueExpr(ctx.GetSessionVars().SelectLimit, \"\", \"\"),\n\t\t}\n\t\treturn &newShow\n\t} else if setOprStmt, ok := node.(*ast.SetOprStmt); ok {\n\t\tif setOprStmt.Limit != nil {\n\t\t\treturn node\n\t\t}\n\t\tnewSetOpr := *setOprStmt\n\t\tnewSetOpr.Limit = &ast.Limit{\n\t\t\tCount: ast.NewValueExpr(ctx.GetSessionVars().SelectLimit, \"\", \"\"),\n\t\t}\n\t\treturn &newSetOpr\n\t}\n\treturn node\n}", "func padToMaxLength(source string) string {\n\treturn fmt.Sprintf(fmt.Sprintf(\"%%-%ds\", maxLenPrefix), source)\n}", "func padBytesNeeded(elementLen int) int {\n\treturn 4*(elementLen/4+1) - elementLen\n}", "func evenPad(a *string, b *string) {\n\tif len(*a) != len(*b) {\n\t\tfor len(*a) < len(*b) {\n\t\t\t*a = \"0\" + *a\n\t\t}\n\t\tfor len(*b) < len(*a) {\n\t\t\t*b = \"0\" + *b\n\t\t}\n\t}\n}", "func PruneOpt(g *types.Cmd) {\n\tg.AddOptions(\"--prune\")\n}", "func unpad(src []byte) ([]byte, error) {\n\tlength := len(src)\n\tunpadding := int(src[length-1])\n\n\tif unpadding > length {\n\t\treturn nil, errors.New(\"unpad error. This could happen when incorrect encryption key is used\")\n\t}\n\n\treturn src[:(length - unpadding)], nil\n}", "func PadMissingLenth(t []byte, l int) []byte {\n\tmissing := l - len(t)%l\n\tpad := make([]byte, missing, missing)\n\tfmt.Printf(\"will pad with missing %d\\n\", missing)\n\tpad[missing-1] = byte(missing)\n\tfmt.Printf(\"padded with %d\\n\", int(pad[missing-1]))\n\treturn append(t, pad...)\n}", "func truncated(state request.Request, ret *dns.Msg, err error) (*dns.Msg, error) {\n\t// If you query for instance ANY isc.org; you get a truncated query back which miekg/dns fails to unpack\n\t// because the RRs are not finished. The returned message can be useful or useless. Return the original\n\t// query with some header bits set that they should retry with TCP.\n\tif err != dns.ErrTruncated {\n\t\treturn ret, err\n\t}\n\n\t// We may or may not have something sensible... if not reassemble something to send to the client.\n\tm := ret\n\tif ret == nil {\n\t\tm = new(dns.Msg)\n\t\tm.SetReply(state.Req)\n\t\tm.Truncated = true\n\t\tm.Authoritative = true\n\t\tm.Rcode = dns.RcodeSuccess\n\t}\n\treturn m, nil\n}", "func PadBoth(str string, padStr string, padLen int) string {\n\treturn buildPadStr(str, padStr, padLen, true, true)\n}", "func FormatImpossibleQuery(buf *TrackedBuffer, node SQLNode) {\n\tswitch node := node.(type) {\n\tcase *Select:\n\t\tbuf.Myprintf(\"select %v from %v where 1 != 1\", node.SelectExprs, node.From)\n\t\tif node.GroupBy != nil {\n\t\t\tnode.GroupBy.Format(buf)\n\t\t}\n\tdefault:\n\t\tnode.Format(buf)\n\t}\n}", "func GetPadding(option string) (IPaddingStrategy, error) {\n\tswitch option {\n\tcase PKCS5, PKCS7:\n\t\treturn PKCS7Padding{}, nil\n\t}\n\n\treturn NullPadding{}, errors.New(\"error: invalid padding option\")\n}", "func leftPad(input []byte, size int) (out []byte) {\n\tn := len(input)\n\tif n > size {\n\t\tn = size\n\t}\n\tout = make([]byte, size)\n\tcopy(out[len(out)-n:], input)\n\treturn\n}", "func PKCS(data []byte, mode string) (padded_data []byte) {\r\n\tvar pad_num int\r\n\r\n\tif mode == \"add\" {\r\n\t\trem := len(data) % userlib.AESBlockSizeBytes\r\n\t\tpad_num = userlib.AESBlockSizeBytes - rem //number to pad by\r\n\t\t//pad := make([]byte, pad_num) //pad array we are appending later\r\n\t\tpadded_data = data[:]\r\n\t\tfor i := 0; i < pad_num; i++ {\r\n\t\t\t//pad = append(pad, byte(pad_num))\r\n\t\t\tpadded_data = append(padded_data, byte(pad_num))\r\n\t\t}\r\n\r\n\t\t//userlib.DebugMsg(\"%d\", padded_data)\r\n\t} else { //remove padding\r\n\t\t//last byte is amount of padding there is\r\n\t\t//ex: d = [1022] means 2 bytes of padding so return d[:2] which is [10]\r\n\r\n\t\tnum := len(data) - 1\r\n\t\tpad_num = len(data) - int(data[num]) //piazza: convert to byte > hex string > int?\r\n\t\tpadded_data = data[:pad_num]\r\n\t}\r\n\r\n\treturn padded_data\r\n}", "func (d *Decoder) padEntry(idx *Index, e *Entry, read int) error {\n\tif idx.Version == 4 {\n\t\treturn nil\n\t}\n\n\tentrySize := read + len(e.Name)\n\tpadLen := 8 - entrySize%8\n\t_, err := io.CopyN(ioutil.Discard, d.r, int64(padLen))\n\treturn err\n}", "func paddKey(key string, val int) string {\n\tif len(key) == val {\n\t\treturn key\n\t}\n\n\tpaddedKey := []byte{}\n\ti := 0\n\tfor len(paddedKey) < val {\n\t\tpaddedKey = append(paddedKey, key[i])\n\n\t\tif i < len(key)-1 {\n\t\t\ti++\n\t\t} else {\n\t\t\ti = 0\n\t\t}\n\t}\n\n\treturn string(paddedKey)\n}", "func (me TxsdLinearGradientTypeSpreadMethod) IsPad() bool { return me.String() == \"pad\" }", "func rpad(s string, padding int) string {\n\ttemplate := fmt.Sprintf(\"%%-%ds\", padding)\n\treturn fmt.Sprintf(template, s)\n}", "func buildOffset(offset int64) string {\n\tif offset == 0 {\n\t\treturn \"\"\n\t}\n\treturn fmt.Sprintf(\" OFFSET %d\", offset)\n}", "func PadEnd(s string, l int, c string) string {\n\tvar strLen int\n\tif l <= 0 {\n\t\tstrLen = 0\n\t} else {\n\t\tstrLen = len(s)\n\t}\n\n\tif strLen < l {\n\t\treturn s + createPadding(l-strLen, c)\n\t}\n\n\treturn s\n}", "func rightPad(s string, padStr string, pLen int) string {\n\treturn s + strings.Repeat(padStr, pLen)\n}", "func (c *Conn) createEmptyPacket() ([]byte, error) {\n\tvar (\n\t\tb []byte\n\t\toff int\n\t\terr error\n\t)\n\n\tif b, err = c.buff.Reset(4); err != nil {\n\t\treturn nil, err\n\t}\n\n\toff += 4 // placeholder for protocol packet header\n\n\treturn b[0:off], nil\n}", "func fixLen(fixme []byte) []byte {\n l := (len(fixme) - 1) // skip the Q in \"Q\\x00\\x00\\x01\\x01SELECT...\"\n binary.BigEndian.PutUint32(fixme[1:], uint32(l))\n return fixme\n}", "func getPadder(options DecryptionClientOptions, cekAlg string) Padder {\n\tpadder, ok := options.CryptoRegistry.GetPadder(cekAlg)\n\tif !ok {\n\t\tpadder, ok = options.CryptoRegistry.GetPadder(cekAlg[strings.LastIndex(cekAlg, \"/\")+1:])\n\t\tif !ok {\n\t\t\treturn NoPadder\n\t\t}\n\t}\n\treturn padder\n}", "func Pad(b []byte) []byte {\n\tfor i := len(b); (i % 4) != 0; i++ {\n\t\tb = append(b, 0)\n\t}\n\treturn b\n}", "func pad(src []byte) []byte {\n\tpadding := aes.BlockSize - len(src)%aes.BlockSize\n\tpadtext := bytes.Repeat([]byte{byte(padding)}, padding)\n\n\treturn append(src, padtext...)\n}", "func truncatedPacket(view []byte, trunc, netHdrLen int) stack.PacketBufferPtr {\n\tv := view[:len(view)-trunc]\n\tpkt := stack.NewPacketBuffer(stack.PacketBufferOptions{\n\t\tPayload: buffer.MakeWithData(v),\n\t})\n\treturn pkt\n}", "func (c StringArrayCollection) Pad(num int, value interface{}) Collection {\n\tif len(c.value) > num {\n\t\td := make([]string, len(c.value))\n\t\tcopy(d, c.value)\n\t\treturn StringArrayCollection{\n\t\t\tvalue: d,\n\t\t}\n\t}\n\tif num > 0 {\n\t\td := make([]string, num)\n\t\tfor i := 0; i < num; i++ {\n\t\t\tif i < len(c.value) {\n\t\t\t\td[i] = c.value[i]\n\t\t\t} else {\n\t\t\t\td[i] = value.(string)\n\t\t\t}\n\t\t}\n\t\treturn StringArrayCollection{\n\t\t\tvalue: d,\n\t\t}\n\t} else {\n\t\td := make([]string, -num)\n\t\tfor i := 0; i < -num; i++ {\n\t\t\tif i < -num-len(c.value) {\n\t\t\t\td[i] = value.(string)\n\t\t\t} else {\n\t\t\t\td[i] = c.value[i]\n\t\t\t}\n\t\t}\n\t\treturn StringArrayCollection{\n\t\t\tvalue: d,\n\t\t}\n\t}\n}", "func PKCS7Pad(buf []byte, blockSize int) []byte {\n\tif blockSize < 0 || blockSize > 0xff {\n\t\tpanic(\"PKCS7Pad: invalid block size\")\n\t}\n\t// Find the number (and value) of padding bytes.\n\tn := blockSize - (len(buf) % blockSize)\n\n\treturn append(dup(buf), bytes.Repeat([]byte{byte(n)}, n)...)\n}", "func NewPaddingTLV(length uint8) *PaddingTLV {\n\treturn &PaddingTLV{\n\t\tTLVType: PaddingType,\n\t\tTLVLength: length,\n\t\tPaddingData: make([]byte, length),\n\t}\n}", "func (plugin *RouteConfigurator) fillEmptyNextHop(dstIP string) string {\n\t_, isIPv6, err := addrs.ParseIPWithPrefix(dstIP)\n\tif err != nil {\n\t\tplugin.log.Errorf(\"route resync error: failed to parse IP address %s\", dstIP)\n\t\treturn \"\"\n\t}\n\tif isIPv6 {\n\t\treturn net.IPv6zero.String()\n\t}\n\treturn net.IPv4zero.String()\n}", "func EncodeStringWithPadding(data string, targetLength int) []byte {\n\tvar buf bytes.Buffer\n\n\tif len(data) < targetLength {\n\t\tfor i := 0; i < targetLength-len(data); i++ {\n\t\t\tbuf.WriteByte(0)\n\t\t}\n\t}\n\n\tbuf.Write([]byte(data))\n\treturn buf.Bytes()\n}", "func WithWidth(w int) ColumnOpt {\n\treturn func(c *Column) {\n\t\tif w < 0 {\n\t\t\tw = 0\n\t\t}\n\t\tc.width = w\n\t\tc.pad.fixed = true\n\t}\n}", "func TestNDPOptionsIterCheck(t *testing.T) {\n\ttests := []struct {\n\t\tname string\n\t\tbuf []byte\n\t\texpectedErr error\n\t}{\n\t\t{\n\t\t\tname: \"ZeroLengthField\",\n\t\t\tbuf: []byte{0, 0, 0, 0, 0, 0, 0, 0},\n\t\t\texpectedErr: ErrNDPOptMalformedHeader,\n\t\t},\n\t\t{\n\t\t\tname: \"ValidSourceLinkLayerAddressOption\",\n\t\t\tbuf: []byte{1, 1, 1, 2, 3, 4, 5, 6},\n\t\t\texpectedErr: nil,\n\t\t},\n\t\t{\n\t\t\tname: \"TooSmallSourceLinkLayerAddressOption\",\n\t\t\tbuf: []byte{1, 1, 1, 2, 3, 4, 5},\n\t\t\texpectedErr: io.ErrUnexpectedEOF,\n\t\t},\n\t\t{\n\t\t\tname: \"ValidTargetLinkLayerAddressOption\",\n\t\t\tbuf: []byte{2, 1, 1, 2, 3, 4, 5, 6},\n\t\t\texpectedErr: nil,\n\t\t},\n\t\t{\n\t\t\tname: \"TooSmallTargetLinkLayerAddressOption\",\n\t\t\tbuf: []byte{2, 1, 1, 2, 3, 4, 5},\n\t\t\texpectedErr: io.ErrUnexpectedEOF,\n\t\t},\n\t\t{\n\t\t\tname: \"ValidPrefixInformation\",\n\t\t\tbuf: []byte{\n\t\t\t\t3, 4, 43, 64,\n\t\t\t\t1, 2, 3, 4,\n\t\t\t\t5, 6, 7, 8,\n\t\t\t\t0, 0, 0, 0,\n\t\t\t\t9, 10, 11, 12,\n\t\t\t\t13, 14, 15, 16,\n\t\t\t\t17, 18, 19, 20,\n\t\t\t\t21, 22, 23, 24,\n\t\t\t},\n\t\t\texpectedErr: nil,\n\t\t},\n\t\t{\n\t\t\tname: \"TooSmallPrefixInformation\",\n\t\t\tbuf: []byte{\n\t\t\t\t3, 4, 43, 64,\n\t\t\t\t1, 2, 3, 4,\n\t\t\t\t5, 6, 7, 8,\n\t\t\t\t0, 0, 0, 0,\n\t\t\t\t9, 10, 11, 12,\n\t\t\t\t13, 14, 15, 16,\n\t\t\t\t17, 18, 19, 20,\n\t\t\t\t21, 22, 23,\n\t\t\t},\n\t\t\texpectedErr: io.ErrUnexpectedEOF,\n\t\t},\n\t\t{\n\t\t\tname: \"InvalidPrefixInformationLength\",\n\t\t\tbuf: []byte{\n\t\t\t\t3, 3, 43, 64,\n\t\t\t\t1, 2, 3, 4,\n\t\t\t\t5, 6, 7, 8,\n\t\t\t\t0, 0, 0, 0,\n\t\t\t\t9, 10, 11, 12,\n\t\t\t\t13, 14, 15, 16,\n\t\t\t},\n\t\t\texpectedErr: ErrNDPOptMalformedBody,\n\t\t},\n\t\t{\n\t\t\tname: \"ValidSourceAndTargetLinkLayerAddressWithPrefixInformation\",\n\t\t\tbuf: []byte{\n\t\t\t\t// Source Link-Layer Address.\n\t\t\t\t1, 1, 1, 2, 3, 4, 5, 6,\n\n\t\t\t\t// Target Link-Layer Address.\n\t\t\t\t2, 1, 7, 8, 9, 10, 11, 12,\n\n\t\t\t\t// Prefix information.\n\t\t\t\t3, 4, 43, 64,\n\t\t\t\t1, 2, 3, 4,\n\t\t\t\t5, 6, 7, 8,\n\t\t\t\t0, 0, 0, 0,\n\t\t\t\t9, 10, 11, 12,\n\t\t\t\t13, 14, 15, 16,\n\t\t\t\t17, 18, 19, 20,\n\t\t\t\t21, 22, 23, 24,\n\t\t\t},\n\t\t\texpectedErr: nil,\n\t\t},\n\t\t{\n\t\t\tname: \"ValidSourceAndTargetLinkLayerAddressWithPrefixInformationWithUnrecognized\",\n\t\t\tbuf: []byte{\n\t\t\t\t// Source Link-Layer Address.\n\t\t\t\t1, 1, 1, 2, 3, 4, 5, 6,\n\n\t\t\t\t// Target Link-Layer Address.\n\t\t\t\t2, 1, 7, 8, 9, 10, 11, 12,\n\n\t\t\t\t// 255 is an unrecognized type. If 255 ends up\n\t\t\t\t// being the type for some recognized type,\n\t\t\t\t// update 255 to some other unrecognized value.\n\t\t\t\t255, 2, 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6, 7, 8,\n\n\t\t\t\t// Prefix information.\n\t\t\t\t3, 4, 43, 64,\n\t\t\t\t1, 2, 3, 4,\n\t\t\t\t5, 6, 7, 8,\n\t\t\t\t0, 0, 0, 0,\n\t\t\t\t9, 10, 11, 12,\n\t\t\t\t13, 14, 15, 16,\n\t\t\t\t17, 18, 19, 20,\n\t\t\t\t21, 22, 23, 24,\n\t\t\t},\n\t\t\texpectedErr: nil,\n\t\t},\n\t\t{\n\t\t\tname: \"InvalidRecursiveDNSServerCutsOffAddress\",\n\t\t\tbuf: []byte{\n\t\t\t\t25, 4, 0, 0,\n\t\t\t\t0, 0, 0, 0,\n\t\t\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,\n\t\t\t\t0, 1, 2, 3, 4, 5, 6, 7,\n\t\t\t},\n\t\t\texpectedErr: ErrNDPOptMalformedBody,\n\t\t},\n\t\t{\n\t\t\tname: \"InvalidRecursiveDNSServerInvalidLengthField\",\n\t\t\tbuf: []byte{\n\t\t\t\t25, 2, 0, 0,\n\t\t\t\t0, 0, 0, 0,\n\t\t\t\t0, 1, 2, 3, 4, 5, 6, 7, 8,\n\t\t\t},\n\t\t\texpectedErr: io.ErrUnexpectedEOF,\n\t\t},\n\t\t{\n\t\t\tname: \"RecursiveDNSServerTooSmall\",\n\t\t\tbuf: []byte{\n\t\t\t\t25, 1, 0, 0,\n\t\t\t\t0, 0, 0,\n\t\t\t},\n\t\t\texpectedErr: io.ErrUnexpectedEOF,\n\t\t},\n\t\t{\n\t\t\tname: \"RecursiveDNSServerMulticast\",\n\t\t\tbuf: []byte{\n\t\t\t\t25, 3, 0, 0,\n\t\t\t\t0, 0, 0, 0,\n\t\t\t\t255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,\n\t\t\t},\n\t\t\texpectedErr: ErrNDPOptMalformedBody,\n\t\t},\n\t\t{\n\t\t\tname: \"RecursiveDNSServerUnspecified\",\n\t\t\tbuf: []byte{\n\t\t\t\t25, 3, 0, 0,\n\t\t\t\t0, 0, 0, 0,\n\t\t\t\t0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n\t\t\t},\n\t\t\texpectedErr: ErrNDPOptMalformedBody,\n\t\t},\n\t\t{\n\t\t\tname: \"DNSSearchListLargeCompliantRFC1035\",\n\t\t\tbuf: []byte{\n\t\t\t\t31, 33, 0, 0,\n\t\t\t\t0, 0, 0, 0,\n\t\t\t\t63, 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',\n\t\t\t\t'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q',\n\t\t\t\t'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',\n\t\t\t\t'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',\n\t\t\t\t'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q',\n\t\t\t\t'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',\n\t\t\t\t'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',\n\t\t\t\t'i', 'j', 'k',\n\t\t\t\t63, 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',\n\t\t\t\t'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q',\n\t\t\t\t'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',\n\t\t\t\t'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',\n\t\t\t\t'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q',\n\t\t\t\t'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',\n\t\t\t\t'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',\n\t\t\t\t'i', 'j', 'k',\n\t\t\t\t63, 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',\n\t\t\t\t'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q',\n\t\t\t\t'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',\n\t\t\t\t'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',\n\t\t\t\t'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q',\n\t\t\t\t'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',\n\t\t\t\t'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',\n\t\t\t\t'i', 'j', 'k',\n\t\t\t\t62, 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',\n\t\t\t\t'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q',\n\t\t\t\t'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',\n\t\t\t\t'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',\n\t\t\t\t'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q',\n\t\t\t\t'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',\n\t\t\t\t'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',\n\t\t\t\t'i', 'j',\n\t\t\t\t0,\n\t\t\t},\n\t\t\texpectedErr: nil,\n\t\t},\n\t\t{\n\t\t\tname: \"DNSSearchListNonCompliantRFC1035\",\n\t\t\tbuf: []byte{\n\t\t\t\t31, 33, 0, 0,\n\t\t\t\t0, 0, 0, 0,\n\t\t\t\t63, 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',\n\t\t\t\t'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q',\n\t\t\t\t'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',\n\t\t\t\t'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',\n\t\t\t\t'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q',\n\t\t\t\t'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',\n\t\t\t\t'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',\n\t\t\t\t'i', 'j', 'k',\n\t\t\t\t63, 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',\n\t\t\t\t'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q',\n\t\t\t\t'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',\n\t\t\t\t'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',\n\t\t\t\t'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q',\n\t\t\t\t'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',\n\t\t\t\t'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',\n\t\t\t\t'i', 'j', 'k',\n\t\t\t\t63, 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',\n\t\t\t\t'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q',\n\t\t\t\t'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',\n\t\t\t\t'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',\n\t\t\t\t'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q',\n\t\t\t\t'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',\n\t\t\t\t'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',\n\t\t\t\t'i', 'j', 'k',\n\t\t\t\t63, 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',\n\t\t\t\t'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q',\n\t\t\t\t'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',\n\t\t\t\t'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',\n\t\t\t\t'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q',\n\t\t\t\t'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',\n\t\t\t\t'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',\n\t\t\t\t'i', 'j', 'k',\n\t\t\t\t0,\n\t\t\t\t0, 0, 0, 0, 0, 0, 0, 0,\n\t\t\t},\n\t\t\texpectedErr: ErrNDPOptMalformedBody,\n\t\t},\n\t\t{\n\t\t\tname: \"DNSSearchListValidSmall\",\n\t\t\tbuf: []byte{\n\t\t\t\t31, 2, 0, 0,\n\t\t\t\t0, 0, 0, 0,\n\t\t\t\t6, 'a', 'b', 'c', 'd', 'e', 'f',\n\t\t\t\t0,\n\t\t\t},\n\t\t\texpectedErr: nil,\n\t\t},\n\t\t{\n\t\t\tname: \"DNSSearchListTooSmall\",\n\t\t\tbuf: []byte{\n\t\t\t\t31, 1, 0, 0,\n\t\t\t\t0, 0, 0,\n\t\t\t},\n\t\t\texpectedErr: io.ErrUnexpectedEOF,\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\topts := NDPOptions(test.buf)\n\n\t\t\tif _, err := opts.Iter(true); !errors.Is(err, test.expectedErr) {\n\t\t\t\tt.Fatalf(\"got Iter(true) = (_, %v), want = (_, %v)\", err, test.expectedErr)\n\t\t\t}\n\n\t\t\t// test.buf may be malformed but we chose not to check\n\t\t\t// the iterator so it must return true.\n\t\t\tif _, err := opts.Iter(false); err != nil {\n\t\t\t\tt.Fatalf(\"got Iter(false) = (_, %s), want = (_, nil)\", err)\n\t\t\t}\n\t\t})\n\t}\n}", "func addBase64Padding(value string) string {\r\n\tm := len(value) % 4\r\n\tif m != 0 {\r\n\t\tvalue += strings.Repeat(\"=\", 4-m)\r\n\t}\r\n\r\n\treturn value\r\n}", "func EncodeBase64Pad(value bool) EncodeBase64Attr {\n\treturn func(m optionalAttr) {\n\t\tm[\"pad\"] = value\n\t}\n}", "func pad(data []byte, blockSize int, padder Padder) []byte {\n \tdataLen := len(data)\n\tpadLen := blockSize - (dataLen % blockSize)\n\tpadding := padder(padLen)\n\treturn append(data, padding...)\n}", "func Pad(scope *Scope, input tf.Output, paddings tf.Output) (output tf.Output) {\n\tif scope.Err() != nil {\n\t\treturn\n\t}\n\topspec := tf.OpSpec{\n\t\tType: \"Pad\",\n\t\tInput: []tf.Input{\n\t\t\tinput, paddings,\n\t\t},\n\t}\n\top := scope.AddOperation(opspec)\n\treturn op.Output(0)\n}", "func (o TableExternalDataConfigurationGoogleSheetsOptionsOutput) SkipLeadingRows() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v TableExternalDataConfigurationGoogleSheetsOptions) *int { return v.SkipLeadingRows }).(pulumi.IntPtrOutput)\n}", "func (c *Context) PADDL(mx, x operand.Op) {\n\tc.addinstruction(x86.PADDL(mx, x))\n}" ]
[ "0.5929429", "0.56697994", "0.5376924", "0.5368815", "0.5289138", "0.523237", "0.5228269", "0.5175912", "0.5130749", "0.5128292", "0.51032317", "0.5100639", "0.5078234", "0.50763285", "0.5069053", "0.50648904", "0.5044292", "0.50086397", "0.49749118", "0.4940589", "0.48642007", "0.48636684", "0.48236135", "0.4786937", "0.4782603", "0.47820526", "0.47820526", "0.47598302", "0.47456676", "0.4706876", "0.4699964", "0.46916112", "0.4689924", "0.46758682", "0.46714908", "0.46395397", "0.4634715", "0.46282932", "0.46277443", "0.4623125", "0.45979458", "0.4571442", "0.45664707", "0.45408887", "0.45355636", "0.45265037", "0.45219356", "0.4521876", "0.45074618", "0.44696182", "0.44464892", "0.4437232", "0.44348547", "0.4415733", "0.4413174", "0.43932196", "0.4366323", "0.43643567", "0.4357523", "0.43557897", "0.43449864", "0.43418655", "0.43389693", "0.43297955", "0.43204835", "0.43128806", "0.42994455", "0.4288248", "0.4285072", "0.42793497", "0.42779383", "0.42758372", "0.42728564", "0.42526802", "0.42514485", "0.42464224", "0.42433295", "0.4241175", "0.42340377", "0.42209506", "0.4216119", "0.42144588", "0.42061412", "0.420198", "0.4189605", "0.41867584", "0.41815865", "0.41745117", "0.41743815", "0.4168188", "0.41641954", "0.41636974", "0.4161706", "0.41602466", "0.4150564", "0.41476795", "0.41334078", "0.41303742", "0.4127615", "0.41252413" ]
0.60436326
0
Try to pad a query that already contains an OPT record with padding. The query should be unmodified by AddEdnsPadding.
func TestAddEdnsPaddingCompressedPaddedQuery(t *testing.T) { paddedQuery := simpleQuery paddedQuery.Additionals = make([]dnsmessage.Resource, len(simpleQuery.Additionals)) copy(paddedQuery.Additionals, simpleQuery.Additionals) paddedQuery.Additionals = append(paddedQuery.Additionals, dnsmessage.Resource{ Header: dnsmessage.ResourceHeader{ Name: dnsmessage.MustNewName("."), Class: dnsmessage.ClassINET, TTL: 0, }, Body: &dnsmessage.OPTResource{ Options: []dnsmessage.Option{ { Code: OptResourcePaddingCode, Data: make([]byte, 5), }, }, }, }, ) originalOnWire := mustPack(&paddedQuery) paddedOnWire, err := AddEdnsPadding(mustPack(&paddedQuery)) if err != nil { t.Errorf("Failed to pad padded query: %v", err) } if !bytes.Equal(originalOnWire, paddedOnWire) { t.Errorf("AddEdnsPadding tampered with a query that was already padded") } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func TestAddEdnsPaddingCompressedOptQuery(t *testing.T) {\n\toptQuery := simpleQuery\n\toptQuery.Additionals = make([]dnsmessage.Resource, len(simpleQuery.Additionals))\n\tcopy(optQuery.Additionals, simpleQuery.Additionals)\n\n\toptQuery.Additionals = append(optQuery.Additionals,\n\t\tdnsmessage.Resource{\n\t\t\tHeader: dnsmessage.ResourceHeader{\n\t\t\t\tName: dnsmessage.MustNewName(\".\"),\n\t\t\t\tClass: dnsmessage.ClassINET,\n\t\t\t\tTTL: 0,\n\t\t\t},\n\t\t\tBody: &dnsmessage.OPTResource{\n\t\t\t\tOptions: []dnsmessage.Option{},\n\t\t\t},\n\t\t},\n\t)\n\tpaddedOnWire, err := AddEdnsPadding(mustPack(&optQuery))\n\tif err != nil {\n\t\tt.Errorf(\"Failed to pad query with OPT but no padding: %v\", err)\n\t}\n\tif len(paddedOnWire)%PaddingBlockSize != 0 {\n\t\tt.Errorf(\"AddEdnsPadding failed to correctly pad query with OPT but no padding\")\n\t}\n}", "func TestAddEdnsPaddingUncompressedQuery(t *testing.T) {\n\tif len(uncompressedQueryBytes)%PaddingBlockSize == 0 {\n\t\tt.Errorf(\"uncompressedQueryBytes does not require padding, so this test is invalid\")\n\t}\n\tpadded, err := AddEdnsPadding(uncompressedQueryBytes)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif len(padded)%PaddingBlockSize != 0 {\n\t\tt.Errorf(\"AddEdnsPadding failed to correctly pad uncompressed query\")\n\t}\n}", "func TestAddEdnsPaddingCompressedQuery(t *testing.T) {\n\tif len(compressedQueryBytes)%PaddingBlockSize == 0 {\n\t\tt.Errorf(\"compressedQueryBytes does not require padding, so this test is invalid\")\n\t}\n\tpadded, err := AddEdnsPadding(compressedQueryBytes)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif len(padded)%PaddingBlockSize != 0 {\n\t\tt.Errorf(\"AddEdnsPadding failed to correctly pad compressed query\")\n\t}\n}", "func pad(unpadded []byte, desiredLength int) []byte {\n\tif len(unpadded) == desiredLength {\n\t\treturn unpadded\n\t}\n\ttoAppend := desiredLength - len(unpadded)\n\treturn append(unpadded, bytes.Repeat([]byte{byte(0x00)}, toAppend)...)\n}", "func padWithSpace(source string, prefix, suffix int) string {\n\tif source == \"\" {\n\t\treturn source\n\t}\n\n\treturn strings.Repeat(\" \", prefix) + source + strings.Repeat(\" \", suffix)\n}", "func padWithSpace(source string, prefix, suffix int) string {\n\tif source == \"\" {\n\t\treturn source\n\t}\n\treturn strings.Repeat(\" \", prefix) + source + strings.Repeat(\" \", suffix)\n}", "func WithPaddingAllowed() ParserOption {\n\treturn func(p *Parser) {\n\t\tp.decodePaddingAllowed = true\n\t}\n}", "func PADDB(mx, x operand.Op) { ctx.PADDB(mx, x) }", "func (enc Encoding) WithPadding(padding rune) *Encoding {\n\tswitch {\n\tcase padding < NoPadding || padding == '\\r' || padding == '\\n' || padding > 0xff:\n\t\tpanic(\"invalid padding\")\n\tcase padding != NoPadding && enc.decodeMap[byte(padding)] != invalidIndex:\n\t\tpanic(\"padding contained in alphabet\")\n\t}\n\tenc.padChar = padding\n\treturn &enc\n}", "func PADDD(mx, x operand.Op) { ctx.PADDD(mx, x) }", "func (c *Context) PADDB(mx, x operand.Op) {\n\tc.addinstruction(x86.PADDB(mx, x))\n}", "func fixPadding(b64 string) string {\n\tcount := 4 - len(b64)%4\n\tif count > 0 && count < 4 {\n\t\treturn b64 + strings.Repeat(\"=\", count)\n\t}\n\treturn b64\n}", "func Pad(prefix string, reqd int) string {\n\treturn strings.Repeat(\"0\", 5-len(prefix)) + prefix\n}", "func buildPadStr(str string, padStr string, padLen int, padLeft bool, padRight bool) string {\n\n\t// When padded length is less then the current string size\n\tif padLen < utf8.RuneCountInString(str) {\n\t\treturn str\n\t}\n\n\tpadLen -= utf8.RuneCountInString(str)\n\n\ttargetLen := padLen\n\n\ttargetLenLeft := targetLen\n\ttargetLenRight := targetLen\n\tif padLeft && padRight {\n\t\ttargetLenLeft = padLen / 2\n\t\ttargetLenRight = padLen - targetLenLeft\n\t}\n\n\tstrToRepeatLen := utf8.RuneCountInString(padStr)\n\n\trepeatTimes := int(math.Ceil(float64(targetLen) / float64(strToRepeatLen)))\n\trepeatedString := strings.Repeat(padStr, repeatTimes)\n\n\tleftSide := \"\"\n\tif padLeft {\n\t\tleftSide = repeatedString[0:targetLenLeft]\n\t}\n\n\trightSide := \"\"\n\tif padRight {\n\t\trightSide = repeatedString[0:targetLenRight]\n\t}\n\n\treturn leftSide + str + rightSide\n}", "func (g *GroupedAVP) Padding() int {\n\treturn 0\n}", "func padID(id string) string {\n\texpectedLen := 0\n\tswitch {\n\tcase len(id) < 16:\n\t\texpectedLen = 16\n\tcase len(id) > 16 && len(id) < 32:\n\t\texpectedLen = 32\n\tdefault:\n\t\treturn id\n\t}\n\n\treturn pads[expectedLen-len(id)] + id\n}", "func UseDataPadding(p uint64) Option {\n\treturn func(o *Options) {\n\t\to.DataPadding = p\n\t}\n}", "func padding(size int) string {\n\tresult := \"\"\n\tfor i := 0; i < size; i++ {\n\t\tresult += \" \"\n\t}\n\treturn result\n}", "func applyAdditionalQueryOptions(queryString string, queryLimit, querySkip int) (string, error) {\n\tconst jsonQueryFields = \"fields\"\n\tconst jsonQueryLimit = \"limit\"\n\tconst jsonQuerySkip = \"skip\"\n\t//create a generic map for the query json\n\tjsonQueryMap := make(map[string]interface{})\n\t//unmarshal the selector json into the generic map\n\tdecoder := json.NewDecoder(bytes.NewBuffer([]byte(queryString)))\n\tdecoder.UseNumber()\n\terr := decoder.Decode(&jsonQueryMap)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif fieldsJSONArray, ok := jsonQueryMap[jsonQueryFields]; ok {\n\t\tswitch fieldsJSONArray.(type) {\n\t\tcase []interface{}:\n\t\t\t//Add the \"_id\" field, these are needed by default\n\t\t\tjsonQueryMap[jsonQueryFields] = append(fieldsJSONArray.([]interface{}),\n\t\t\t\tidField)\n\t\tdefault:\n\t\t\treturn \"\", fmt.Errorf(\"fields definition must be an array\")\n\t\t}\n\t}\n\t// Add limit\n\t// This will override any limit passed in the query.\n\t// Explicit paging not yet supported.\n\tjsonQueryMap[jsonQueryLimit] = queryLimit\n\t// Add skip\n\t// This will override any skip passed in the query.\n\t// Explicit paging not yet supported.\n\tjsonQueryMap[jsonQuerySkip] = querySkip\n\t//Marshal the updated json query\n\teditedQuery, err := json.Marshal(jsonQueryMap)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tlogger.Debugf(\"Rewritten query: %s\", editedQuery)\n\treturn string(editedQuery), nil\n}", "func StringPadding(str string, le int) string {\n\tl := le - len(str)\n\tif l > 0 {\n\t\tfor i := 0; i < l; i++ {\n\t\t\tstr = str + \" \"\n\t\t}\n\t}\n\treturn str\n}", "func UseIndexPadding(p uint64) Option {\n\treturn func(o *Options) {\n\t\to.IndexPadding = p\n\t}\n}", "func (e *EDNS0_PADDING) Option() uint16 { return EDNS0PADDING }", "func paddedAppend(size uint, dst, src []byte) []byte {\n\tfor i := 0; i < int(size)-len(src); i++ {\n\t\tdst = append(dst, 0)\n\t}\n\treturn append(dst, src...)\n}", "func padToLength(source string, prefix int) string {\n\treturn fmt.Sprintf(fmt.Sprintf(\"%%-%ds\", prefix), source)\n}", "func pad(d []byte, n int) []byte {\n\td = append(d, make([]byte, n)...)\n\treturn d\n}", "func stringPad(str string, padd int) string {\n\t// Add padding\n\tif len(str) < padd {\n\t\tstr = str + strings.Repeat(\" \", padd-len(str))\n\t\t// Clip\n\t} else {\n\t\tstr = str[:padd-2] + \" \"\n\t}\n\treturn str\n}", "func (c *Context) PADDD(mx, x operand.Op) {\n\tc.addinstruction(x86.PADDD(mx, x))\n}", "func Pad(s, pad string, width int) string {\n\tgap := width - DisplayWidth(s)\n\tif gap > 0 {\n\t\tgapLeft := int(math.Ceil(float64(gap / 2)))\n\t\tgapRight := gap - gapLeft\n\t\treturn strings.Repeat(string(pad), gapLeft) + s + strings.Repeat(string(pad), gapRight)\n\t}\n\treturn s\n}", "func Pad(s, pad string, width int) string {\n\tgap := width - DisplayWidth(s)\n\tif gap > 0 {\n\t\tgapLeft := int(math.Ceil(float64(gap / 2)))\n\t\tgapRight := gap - gapLeft\n\t\treturn strings.Repeat(string(pad), gapLeft) + s + strings.Repeat(string(pad), gapRight)\n\t}\n\treturn s\n}", "func pad(s string, w int, c int) string {\n\tif w <= len(s) {\n\t\treturn s\n\t}\n\treturn s + strings.Repeat(string(c), w-len(s))\n}", "func PADDL(mx, x operand.Op) { ctx.PADDL(mx, x) }", "func (p *Patch) SetPadding(value mat.AABB) {\n\tp.Padding = value\n\tp.SetRegion(p.Region)\n}", "func (_Flopper *FlopperSession) Pad() (*big.Int, error) {\n\treturn _Flopper.Contract.Pad(&_Flopper.CallOpts)\n}", "func pad(in []byte, length int) []byte {\n\tpadding := length - (len(in) % length)\n\tif padding == 0 {\n\t\tpadding = length\n\t}\n\tfor i := 0; i < padding; i++ {\n\t\tin = append(in, byte(padding))\n\t}\n\treturn in\n}", "func setupPadding() {\n\n\tpaddingMap[0] = \"10101010101010101010101010101010\"\n\tpaddingMap[1] = \"0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f\"\n\tpaddingMap[2] = \"0e0e0e0e0e0e0e0e0e0e0e0e0e0e\"\n\tpaddingMap[3] = \"0d0d0d0d0d0d0d0d0d0d0d0d0d\"\n\tpaddingMap[4] = \"0c0c0c0c0c0c0c0c0c0c0c0c\"\n\tpaddingMap[5] = \"0b0b0b0b0b0b0b0b0b0b0b\"\n\tpaddingMap[6] = \"0a0a0a0a0a0a0a0a0a0a\"\n\tpaddingMap[7] = \"090909090909090909\"\n\tpaddingMap[8] = \"0808080808080808\"\n\tpaddingMap[9] = \"07070707070707\"\n\tpaddingMap[10] = \"060606060606\"\n\tpaddingMap[11] = \"0505050505\"\n\tpaddingMap[12] = \"04040404\"\n\tpaddingMap[13] = \"030303\"\n\tpaddingMap[14] = \"0202\"\n\tpaddingMap[15] = \"01\"\n}", "func updateRowPad(columnPad []int, columnList []string) []string {\n\t// Make sure padding list is at least as long as row list\n\tst := columnList\n\tfixLen := len(columnPad) - len(st)\n\tif fixLen < 0 {\n\t\t// Get last value of column pad and simply re-use it to fill columnPad up\n\t\tvalueToUse := columnPad[len(columnPad)-1]\n\t\tfor i := 0; i < fixLen*(-1); i++ {\n\t\t\tcolumnPad = append(columnPad, valueToUse)\n\t\t}\n\t}\n\tfor i := range st {\n\t\t// Add padding\n\t\tif len(st[i]) < columnPad[i] {\n\t\t\tst[i] = st[i] + strings.Repeat(\" \", columnPad[i]-len(st[i]))\n\t\t\t// Clip\n\t\t} else {\n\t\t\tst[i] = st[i][:columnPad[i]-2] + \" \"\n\t\t}\n\t}\n\treturn st\n}", "func pad(b *bytes.Buffer, str string) {\n\tif b.Len() == 0 {\n\t\treturn\n\t}\n\tb.WriteString(str)\n}", "func (d *DecimalAlign) Pad(v interface{}) string {\n\tvar lp int\n\tif s, ok := v.(string); ok {\n\t\t// If a string then look for \".\".\n\t\t// If found then lp=num chars before but excluding it.\n\t\t// If not found then use length of string\n\t\tlp = strings.Index(s, \".\")\n\t\tif lp < 0 {\n\t\t\tlp = len(s)\n\t\t}\n\t} else {\n\t\tvf, _ := util.ToFloat64(v)\n\t\tlp = len(fmt.Sprintf(\"%.0f\", vf))\n\t}\n\treturn strconv.Itoa(d.lp - lp)\n}", "func PadSpace(s string, size int) string {\n\tfor i := len(s); i < size; i++ {\n\t\ts = s + \" \"\n\t}\n\treturn s\n}", "func padSpaces(s string, length int, align TableAlignment) string {\n\tsl := charLen(s)\n\n\t// Ensure that padding size is equal or greater\n\t// than the length of the string to pad to avoid\n\t// a panic later due to a negative repeat count.\n\tif sl > length {\n\t\treturn s\n\t}\n\tswitch align {\n\tcase AlignRight:\n\t\treturn strings.Repeat(\" \", length-sl) + s\n\tcase AlignCenter:\n\t\tlen := float64(length-sl) / float64(2)\n\t\tpad := strings.Repeat(\" \", int(math.Ceil(len/float64(1))))\n\t\treturn pad[:int(math.Floor(float64(len)))] + s + pad[:int(math.Ceil(float64(len)))]\n\tdefault:\n\t\t// AlignLeft.\n\t\treturn s + strings.Repeat(\" \", length-sl)\n\t}\n}", "func Pad(s string, ml int) string {\n\tfor i := len(s); i < ml; i++ {\n\t\ts += \" \"\n\t}\n\treturn s\n}", "func padToMaxLength(source string) string {\n\treturn fmt.Sprintf(fmt.Sprintf(\"%%-%ds\", maxLenPrefix), source)\n}", "func pad(src []byte) []byte {\n\tpadding := aes.BlockSize - len(src)%aes.BlockSize\n\tpadtext := bytes.Repeat([]byte{byte(padding)}, padding)\n\n\treturn append(src, padtext...)\n}", "func (_Flopper *FlopperCallerSession) Pad() (*big.Int, error) {\n\treturn _Flopper.Contract.Pad(&_Flopper.CallOpts)\n}", "func (_Flopper *FlopperCaller) Pad(opts *bind.CallOpts) (*big.Int, error) {\n\tvar (\n\t\tret0 = new(*big.Int)\n\t)\n\tout := ret0\n\terr := _Flopper.contract.Call(opts, out, \"pad\")\n\treturn *ret0, err\n}", "func (c *Collection) buildJustTooBigDBPrefix() []byte {\n\treturn append(c.prefix, prefixCollectionsData+1)\n}", "func addBase64Padding(value string) string {\r\n\tm := len(value) % 4\r\n\tif m != 0 {\r\n\t\tvalue += strings.Repeat(\"=\", 4-m)\r\n\t}\r\n\r\n\treturn value\r\n}", "func padData(rawData []byte) []byte {\n\tneedPadding := aes.BlockSize - ((len(rawData) + 2) % aes.BlockSize)\n\n\tvar dataBuf bytes.Buffer\n\tdataBuf.Grow(2 + len(rawData) + (aes.BlockSize % (len(rawData) + 2)))\n\n\tdataBuf.Write([]byte(\"|\"))\n\tdataBuf.Write(rawData)\n\tdataBuf.Write([]byte(\"|\"))\n\n\tfor i := 0; i < needPadding; i++ {\n\t\tdataBuf.Write([]byte(\" \"))\n\t}\n\n\treturn dataBuf.Bytes()\n}", "func PKCS7Pad(buf []byte, blockSize int) []byte {\n\tif blockSize < 0 || blockSize > 0xff {\n\t\tpanic(\"PKCS7Pad: invalid block size\")\n\t}\n\t// Find the number (and value) of padding bytes.\n\tn := blockSize - (len(buf) % blockSize)\n\n\treturn append(dup(buf), bytes.Repeat([]byte{byte(n)}, n)...)\n}", "func (d *Decoder) padEntry(idx *Index, e *Entry, read int) error {\n\tif idx.Version == 4 {\n\t\treturn nil\n\t}\n\n\tentrySize := read + len(e.Name)\n\tpadLen := 8 - entrySize%8\n\t_, err := io.CopyN(ioutil.Discard, d.r, int64(padLen))\n\treturn err\n}", "func (self *Graphics) SetBoundsPaddingA(member int) {\n self.Object.Set(\"boundsPadding\", member)\n}", "func rpad(s string, padding int) string {\n\ttemplate := fmt.Sprintf(\"%%-%ds\", padding)\n\treturn fmt.Sprintf(template, s)\n}", "func (w *messageWriter) pad(alignment int) error {\n\tn, err := w.Write(padding[:w.pos%alignment])\n\tif err != nil {\n\t\treturn err\n\t}\n\tw.pos += n\n\treturn nil\n}", "func pad(msg string) string {\n\twidth := defaultWidth\n\tsize, err := ts.GetSize()\n\tif err == nil {\n\t\t// If `ts.GetSize()` was successful, set the width to the number\n\t\t// of columns present in the terminal LFS is attached to.\n\t\t// Otherwise, fall-back to `defaultWidth`.\n\t\twidth = size.Col()\n\t}\n\n\t// Pad the string with whitespace so that printing at the start of the\n\t// line removes all traces from the last print.removes all traces from\n\t// the last print.\n\tpadding := strings.Repeat(\" \", maxInt(0, width-len(msg)))\n\n\treturn msg + padding\n}", "func canonicalPadding(b []byte) error {\n\tswitch {\n\tcase b[0]&0x80 == 0x80:\n\t\treturn errNegativeValue\n\tcase len(b) > 1 && b[0] == 0x00 && b[1]&0x80 != 0x80:\n\t\treturn errExcessivelyPaddedValue\n\tdefault:\n\t\treturn nil\n\t}\n}", "func getPadding(packetLen int) int {\n\tif packetLen%4 == 0 {\n\t\treturn 0\n\t}\n\treturn 4 - (packetLen % 4)\n}", "func (t DNSOverTCP) RequiresPadding() bool {\n\treturn t.requiresPadding\n}", "func Pad(scope *Scope, input tf.Output, paddings tf.Output) (output tf.Output) {\n\tif scope.Err() != nil {\n\t\treturn\n\t}\n\topspec := tf.OpSpec{\n\t\tType: \"Pad\",\n\t\tInput: []tf.Input{\n\t\t\tinput, paddings,\n\t\t},\n\t}\n\top := scope.AddOperation(opspec)\n\treturn op.Output(0)\n}", "func Pad(left string, right string) string {\n\tfor len(left+right) < 125 {\n\t\tleft += \" • \"\n\t}\n\n\treturn left + right\n}", "func PKCS7Pad(bytes []byte, alignAt int) []byte {\n\t//The PKCS #7 padding string consists of a sequence of bytes, each of which is equal to the total number of padding bytes added.\n\tvar padding []byte\n\t//padLen := alignAt - len(bytes)\n\tpadLen := alignAt - (len(bytes) % alignAt)\n\tswitch padLen {\n\tcase 0:\n\t\tpadding = []byte(\"\")\n\tcase 1:\n\t\tpadding = []byte(\"\\x01\")\n\tcase 2:\n\t\tpadding = []byte(\"\\x02\\x02\")\n\tcase 3:\n\t\tpadding = []byte(\"\\x03\\x03\\x03\")\n\tcase 4:\n\t\tpadding = []byte(\"\\x04\\x04\\x04\\x04\")\n\tcase 5:\n\t\tpadding = []byte(\"\\x05\\x05\\x05\\x05\\x05\")\n\tcase 6:\n\t\tpadding = []byte(\"\\x06\\x06\\x06\\x06\\x06\\x06\")\n\tcase 7:\n\t\tpadding = []byte(\"\\x07\\x07\\x07\\x07\\x07\\x07\\x07\")\n\tdefault:\n\t\tlog.Fatalf(\"PKCS7Pad unhandled: %v\", padLen)\n\t}\n\n\tvar paddedBytes []byte\n\tpaddedBytes = append(paddedBytes, bytes...)\n\tpaddedBytes = append(paddedBytes, padding...)\n\n\treturn paddedBytes\n}", "func (c *Context) PADDL(mx, x operand.Op) {\n\tc.addinstruction(x86.PADDL(mx, x))\n}", "func EncodeStringWithPadding(data string, targetLength int) []byte {\n\tvar buf bytes.Buffer\n\n\tif len(data) < targetLength {\n\t\tfor i := 0; i < targetLength-len(data); i++ {\n\t\t\tbuf.WriteByte(0)\n\t\t}\n\t}\n\n\tbuf.Write([]byte(data))\n\treturn buf.Bytes()\n}", "func PadEnd(s string, l int, c string) string {\n\tvar strLen int\n\tif l <= 0 {\n\t\tstrLen = 0\n\t} else {\n\t\tstrLen = len(s)\n\t}\n\n\tif strLen < l {\n\t\treturn s + createPadding(l-strLen, c)\n\t}\n\n\treturn s\n}", "func (w *Wrapper) buildQueryOptions() (before string, after string) {\n\tfor _, v := range w.queryOptions {\n\t\tswitch v {\n\t\tcase \"ALL\", \"DISTINCT\", \"SQL_CACHE\", \"SQL_NO_CACHE\", \"DISTINCTROW\", \"HIGH_PRIORITY\", \"STRAIGHT_JOIN\", \"SQL_SMALL_RESULT\", \"SQL_BIG_RESULT\", \"SQL_BUFFER_RESULT\", \"SQL_CALC_FOUND_ROWS\", \"LOW_PRIORITY\", \"QUICK\", \"IGNORE\", \"DELAYED\":\n\t\t\tbefore += fmt.Sprintf(\"%s, \", v)\n\t\tcase \"FOR UPDATE\", \"LOCK IN SHARE MODE\":\n\t\t\tafter += fmt.Sprintf(\"%s, \", v)\n\t\t}\n\t}\n\tif before != \"\" {\n\t\tbefore = fmt.Sprintf(\"%s \", trim(before))\n\t}\n\tif after != \"\" {\n\t\tafter = fmt.Sprintf(\"%s \", trim(after))\n\t}\n\treturn\n}", "func EncodeBase64Pad(value bool) EncodeBase64Attr {\n\treturn func(m optionalAttr) {\n\t\tm[\"pad\"] = value\n\t}\n}", "func (p Pretty) Padding(strs ...string) []string {\n\tvar ret []string\n\tvar right int\n\n\tleft := p.OutputPadding.Width\n\n\t// Loop through each string induvidually\n\tfor _, str := range strs {\n\t\tvar newStr string\n\t\t// Split by newline\n\t\tfor i, s := range strings.Split(str, p.Newline) {\n\t\t\tnewStr += p.addPaddingToLine(s, left, right)\n\t\t\tif i < len(strings.Split(str, p.Newline))-1 || p.UseTrailingNewLine {\n\t\t\t\tnewStr += \"\\n\"\n\t\t\t}\n\t\t}\n\t\tret = append(ret, newStr)\n\t}\n\n\treturn ret\n}", "func PKCS(data []byte, mode string) (padded_data []byte) {\r\n\tvar pad_num int\r\n\r\n\tif mode == \"add\" {\r\n\t\trem := len(data) % userlib.AESBlockSizeBytes\r\n\t\tpad_num = userlib.AESBlockSizeBytes - rem //number to pad by\r\n\t\t//pad := make([]byte, pad_num) //pad array we are appending later\r\n\t\tpadded_data = data[:]\r\n\t\tfor i := 0; i < pad_num; i++ {\r\n\t\t\t//pad = append(pad, byte(pad_num))\r\n\t\t\tpadded_data = append(padded_data, byte(pad_num))\r\n\t\t}\r\n\r\n\t\t//userlib.DebugMsg(\"%d\", padded_data)\r\n\t} else { //remove padding\r\n\t\t//last byte is amount of padding there is\r\n\t\t//ex: d = [1022] means 2 bytes of padding so return d[:2] which is [10]\r\n\r\n\t\tnum := len(data) - 1\r\n\t\tpad_num = len(data) - int(data[num]) //piazza: convert to byte > hex string > int?\r\n\t\tpadded_data = data[:pad_num]\r\n\t}\r\n\r\n\treturn padded_data\r\n}", "func paddKey(key string, val int) string {\n\tif len(key) == val {\n\t\treturn key\n\t}\n\n\tpaddedKey := []byte{}\n\ti := 0\n\tfor len(paddedKey) < val {\n\t\tpaddedKey = append(paddedKey, key[i])\n\n\t\tif i < len(key)-1 {\n\t\t\ti++\n\t\t} else {\n\t\t\ti = 0\n\t\t}\n\t}\n\n\treturn string(paddedKey)\n}", "func pad(blockSize int, buf []byte) []byte {\n\tpadLen := blockSize - (len(buf) % blockSize)\n\tpadding := bytes.Repeat([]byte{byte(padLen)}, padLen)\n\treturn append(buf, padding...)\n}", "func (m Mixer) EncodeIDPadding(password string, id uint64, paddingLen int) string {\n\treturn m.EncodeBase32Padding(password, strconv.FormatUint(id, 10), paddingLen)\n}", "func PadPKCS7(src []byte, blockSize int) []byte {\n\tmissing := blockSize - (len(src) % blockSize)\n\tnewSize := len(src) + missing\n\tdest := make([]byte, newSize, newSize)\n\t// copy data\n\tfor i := 0; i < len(src); i++ {\n\t\tdest[i] = src[i]\n\t}\n\t// fill in the rest\n\tmissingB := byte(missing)\n\tfor i := newSize - missing; i < newSize; i++ {\n\t\tdest[i] = missingB\n\t}\n\treturn dest\n}", "func evenPad(a *string, b *string) {\n\tif len(*a) != len(*b) {\n\t\tfor len(*a) < len(*b) {\n\t\t\t*a = \"0\" + *a\n\t\t}\n\t\tfor len(*b) < len(*a) {\n\t\t\t*b = \"0\" + *b\n\t\t}\n\t}\n}", "func pad(message []byte) []byte {\n lenPadding := aes.BlockSize - (len(message) % aes.BlockSize)\n for i := 0; i < lenPadding; i++ {\n message = append(message, byte(lenPadding))\n }\n return message\n}", "func pkcs7Pad(b []byte, blocksize int) ([]byte, error) {\n\tif blocksize <= 0 {\n\t\treturn nil, fmt.Errorf(\"invalid block size\")\n\t}\n\tif b == nil || len(b) == 0 {\n\t\treturn nil, fmt.Errorf(\"invalid pkcs7 data format\")\n\n\t}\n\tn := blocksize - (len(b) % blocksize)\n\tpb := make([]byte, len(b)+n)\n\tcopy(pb, b)\n\tcopy(pb[len(b):], bytes.Repeat([]byte{byte(n)}, n))\n\treturn pb, nil\n}", "func (uc *Cypher) pkcs7unpad(padded []byte, blockSize int) []byte {\n\n\tdataLen := len(padded)\n\tpaddingCount := int(padded[dataLen-1])\n\n\tif paddingCount > blockSize || paddingCount <= 0 {\n\t\treturn padded //data is not padded (or not padded correctly), return as is\n\t}\n\n\tpadding := padded[dataLen-paddingCount : dataLen-1]\n\n\tfor _, b := range padding {\n\t\tif int(b) != paddingCount {\n\t\t\treturn padded //data is not padded (or not padded correcly), return as is\n\t\t}\n\t}\n\n\treturn padded[:len(padded)-paddingCount] //return data - padding\n}", "func padBytesNeeded(elementLen int) int {\n\treturn 4*(elementLen/4+1) - elementLen\n}", "func rightPad(s string, padStr string, pLen int) string {\n\treturn s + strings.Repeat(padStr, pLen)\n}", "func ClearOPT(msg *dns.Msg) *dns.Msg {\n\textra := make([]dns.RR, len(msg.Extra))\n\tcopy(extra, msg.Extra)\n\n\tmsg.Extra = []dns.RR{}\n\n\tfor _, rr := range extra {\n\t\tswitch rr.(type) {\n\t\tcase *dns.OPT:\n\t\t\tcontinue\n\t\tdefault:\n\t\t\tmsg.Extra = append(msg.Extra, rr)\n\t\t}\n\t}\n\n\treturn msg\n}", "func buildOffset(offset int64) string {\n\tif offset == 0 {\n\t\treturn \"\"\n\t}\n\treturn fmt.Sprintf(\" OFFSET %d\", offset)\n}", "func PKCS7Padding(text string, length int) string {\n\tpaddingLength := length - (len(text) % length)\n\n\tbs := make([]byte, 1)\n\tbinary.PutUvarint(bs, uint64(paddingLength))\n\n\tpadding := bytes.Repeat(bs, paddingLength)\n\n\treturn text + string(padding)\n}", "func pad(data []byte, blockSize int, padder Padder) []byte {\n \tdataLen := len(data)\n\tpadLen := blockSize - (dataLen % blockSize)\n\tpadding := padder(padLen)\n\treturn append(data, padding...)\n}", "func PadPKCS7(bytesToPad []byte, blockSize int) []byte {\r\n\r\n\tif len(bytesToPad) == 0 {\r\n\t\tpanic(\"cannot pad an empty []byte\")\r\n\t}\r\n\r\n\tpaddingSize := blockSize - (len(bytesToPad) % blockSize)\r\n\r\n\t// bytes.Repeat needs []byte - we have int\r\n\t// Thus we use our own function\r\n\tpadding := ByteRepeat(byte(paddingSize), paddingSize)\r\n\r\n\t// Second param of append needs the primitive time of the first param\r\n\t// For example in this case bytesToPad is []byte so padding should be byte\r\n\t// But because it's []byte, we pass it as padding... to pass the bytes\r\n\t// one by one\r\n\t// At this point I am not exactly sure how this works other than it works!\r\n\toutputBytes := append(bytesToPad, padding...)\r\n\r\n\treturn outputBytes\r\n}", "func (b *Bound) Pad(amount float64) *Bound {\n\tb.sw.SetX(b.sw.X() - amount)\n\tb.sw.SetY(b.sw.Y() - amount)\n\n\tb.ne.SetX(b.ne.X() + amount)\n\tb.ne.SetY(b.ne.Y() + amount)\n\n\treturn b\n}", "func PadBoth(str string, padStr string, padLen int) string {\n\treturn buildPadStr(str, padStr, padLen, true, true)\n}", "func CellPadding(value string) attributes.Attribute {\n\treturn attributes.Attribute{\n\t\tName: \"CellPadding\",\n\t\tTempl: `{{define \"CellPadding\"}}cellpadding=\"` + value + `\"{{end}}`,\n\t}\n}", "func PaddingKey(key []byte) []byte {\n\t// Initially set to 0\n\t// Becuase it's hex byte array, so 128bit -> 32byte\n\tpaddingBytes := make([]byte, 32-len(key))\n\tkey = append(key, paddingBytes...)\n\n\treturn key\n}", "func (d *state) padAndPermute(dsbyte byte) {\n\tif d.buf == nil {\n\t\td.buf = d.storage[:0]\n\t}\n\t// Pad with this instance's domain-separator bits. We know that there's\n\t// at least one byte of space in d.buf because, if it were full,\n\t// permute would have been called to empty it. dsbyte also contains the\n\t// first one bit for the padding. See the comment in the state struct.\n\td.buf = append(d.buf, dsbyte)\n\tzerosStart := len(d.buf)\n\td.buf = d.storage[:d.rate]\n\tfor i := zerosStart; i < d.rate; i++ {\n\t\td.buf[i] = 0\n\t}\n\t// This adds the final one bit for the padding. Because of the way that\n\t// bits are numbered from the LSB upwards, the final bit is the MSB of\n\t// the last byte.\n\td.buf[d.rate-1] ^= 0x80\n\t// Apply the permutation\n\td.permute()\n\td.state = spongeSqueezing\n\td.buf = d.storage[:d.rate]\n\td.copyOut(d.buf)\n}", "func pad(b []byte) []byte {\n\tpadSize := aes.BlockSize - (len(b) % aes.BlockSize)\n\tfmt.Printf(\"aes.BlockSize: %d\\n\", aes.BlockSize) // 16\n\tfmt.Printf(\"padSize : %d\\n\", padSize)\n\tpad := bytes.Repeat([]byte{byte(padSize)}, padSize)\n\treturn append(b, pad...)\n}", "func buildPacket(src *Addr, dst *Addr, opt uint16) ([]byte, error) {\n\tether := layers.Ethernet{\n\t\tEthernetType: layers.EthernetTypeARP,\n\t\tSrcMAC: src.HardwareAddr,\n\t\tDstMAC: dst.HardwareAddr,\n\t}\n\tarp := layers.ARP{\n\t\tAddrType: layers.LinkTypeEthernet,\n\t\tProtocol: layers.EthernetTypeIPv4,\n\n\t\tHwAddressSize: 6,\n\t\tProtAddressSize: 4,\n\t\tOperation: opt,\n\n\t\tSourceHwAddress: src.HardwareAddr,\n\t\tSourceProtAddress: src.IP.To4(),\n\n\t\tDstHwAddress: dst.HardwareAddr,\n\t\tDstProtAddress: dst.IP.To4(),\n\t}\n\n\tbuf := gopacket.NewSerializeBuffer()\n\terr := gopacket.SerializeLayers(buf, defaultSerializeOpts, &ether, &arp)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn buf.Bytes(), nil\n}", "func getPadder(options DecryptionClientOptions, cekAlg string) Padder {\n\tpadder, ok := options.CryptoRegistry.GetPadder(cekAlg)\n\tif !ok {\n\t\tpadder, ok = options.CryptoRegistry.GetPadder(cekAlg[strings.LastIndex(cekAlg, \"/\")+1:])\n\t\tif !ok {\n\t\t\treturn NoPadder\n\t\t}\n\t}\n\treturn padder\n}", "func Pad(b []byte) []byte {\n\tfor i := len(b); (i % 4) != 0; i++ {\n\t\tb = append(b, 0)\n\t}\n\treturn b\n}", "func TestStringAdjustmentOptions(t *testing.T) {\n\tvar line = \"{8600}01CRDTUSD1234.56 *\"\n\tr := NewReader(strings.NewReader(line))\n\tr.line = line\n\n\terr := r.parseAdjustment()\n\trequire.Equal(t, err, nil)\n\n\tadj := r.currentFEDWireMessage.Adjustment\n\trequire.Equal(t, adj.String(), \"{8600}01CRDTUSD1234.56 \")\n\trequire.Equal(t, adj.Format(FormatOptions{VariableLengthFields: true}), \"{8600}01CRDTUSD1234.56*\")\n\trequire.Equal(t, adj.String(), adj.Format(FormatOptions{VariableLengthFields: false}))\n}", "func (c *AesCbcPkcs7Cipher) pad(b []byte) []byte {\n\tpadSize := aes.BlockSize - (len(b) % aes.BlockSize)\n\tpad := bytes.Repeat([]byte{byte(padSize)}, padSize)\n\treturn append(b, pad...)\n}", "func (t *DNSOverTCPTransport) RequiresPadding() bool {\n\treturn t.requiresPadding\n}", "func leftPad(s string, padStr string, pLen int) string {\n\tr := pLen - len(s)\n\tif r > 0 {\n\t\treturn strings.Repeat(padStr, pLen-len(s)) + s\n\t}\n\treturn s\n}", "func leftPad(input []byte, size int) (out []byte) {\n\tn := len(input)\n\tif n > size {\n\t\tn = size\n\t}\n\tout = make([]byte, size)\n\tcopy(out[len(out)-n:], input)\n\treturn\n}", "func (key Key) SetPadding(padding C.DWORD) error {\n\tif C.CryptSetKeyParam(key.hKey, C.KP_PADDING, C.LPBYTE(unsafe.Pointer(&padding)), 0) == 0 {\n\t\treturn getErr(\"Error setting padding for key\")\n\t}\n\treturn nil\n}", "func TryAddExtraLimit(ctx sessionctx.Context, node ast.StmtNode) ast.StmtNode {\n\tif ctx.GetSessionVars().SelectLimit == math.MaxUint64 || ctx.GetSessionVars().InRestrictedSQL {\n\t\treturn node\n\t}\n\tif explain, ok := node.(*ast.ExplainStmt); ok {\n\t\texplain.Stmt = TryAddExtraLimit(ctx, explain.Stmt)\n\t\treturn explain\n\t} else if sel, ok := node.(*ast.SelectStmt); ok {\n\t\tif sel.Limit != nil || sel.SelectIntoOpt != nil {\n\t\t\treturn node\n\t\t}\n\t\tnewSel := *sel\n\t\tnewSel.Limit = &ast.Limit{\n\t\t\tCount: ast.NewValueExpr(ctx.GetSessionVars().SelectLimit, \"\", \"\"),\n\t\t}\n\t\treturn &newSel\n\t} else if show, ok := node.(*ast.ShowStmt); ok {\n\t\t// Only when Limit is nil, for Show stmt Limit should always nil when be here,\n\t\t// and the show STMT's behavior should consist with MySQL does.\n\t\tif show.Limit != nil || !show.NeedLimitRSRow() {\n\t\t\treturn node\n\t\t}\n\t\tnewShow := *show\n\t\tnewShow.Limit = &ast.Limit{\n\t\t\tCount: ast.NewValueExpr(ctx.GetSessionVars().SelectLimit, \"\", \"\"),\n\t\t}\n\t\treturn &newShow\n\t} else if setOprStmt, ok := node.(*ast.SetOprStmt); ok {\n\t\tif setOprStmt.Limit != nil {\n\t\t\treturn node\n\t\t}\n\t\tnewSetOpr := *setOprStmt\n\t\tnewSetOpr.Limit = &ast.Limit{\n\t\t\tCount: ast.NewValueExpr(ctx.GetSessionVars().SelectLimit, \"\", \"\"),\n\t\t}\n\t\treturn &newSetOpr\n\t}\n\treturn node\n}", "func PKCSSPadding(ciphertext []byte, blockSize int) []byte {\n\tpadding := blockSize - len(ciphertext)%blockSize\n\tpadtext := bytes.Repeat([]byte{byte(padding)}, padding)\n\treturn append(ciphertext, padtext...)\n}", "func truncated(state request.Request, ret *dns.Msg, err error) (*dns.Msg, error) {\n\t// If you query for instance ANY isc.org; you get a truncated query back which miekg/dns fails to unpack\n\t// because the RRs are not finished. The returned message can be useful or useless. Return the original\n\t// query with some header bits set that they should retry with TCP.\n\tif err != dns.ErrTruncated {\n\t\treturn ret, err\n\t}\n\n\t// We may or may not have something sensible... if not reassemble something to send to the client.\n\tm := ret\n\tif ret == nil {\n\t\tm = new(dns.Msg)\n\t\tm.SetReply(state.Req)\n\t\tm.Truncated = true\n\t\tm.Authoritative = true\n\t\tm.Rcode = dns.RcodeSuccess\n\t}\n\treturn m, nil\n}" ]
[ "0.65444535", "0.58884513", "0.5867946", "0.57902974", "0.5498424", "0.54974675", "0.54956174", "0.5449674", "0.54282355", "0.5364954", "0.5342457", "0.5313182", "0.52898765", "0.5234249", "0.52095866", "0.5208116", "0.5151577", "0.50245225", "0.49983007", "0.49957857", "0.49859455", "0.4972616", "0.4969026", "0.4961194", "0.49494788", "0.49280936", "0.4923985", "0.48782673", "0.48782673", "0.48523396", "0.48452598", "0.48269656", "0.48242152", "0.48195878", "0.47970122", "0.4787888", "0.47307083", "0.4703175", "0.46935916", "0.46899265", "0.46866408", "0.46763298", "0.46760365", "0.46586615", "0.46574762", "0.46446407", "0.46240473", "0.4620872", "0.46166998", "0.45473298", "0.45392647", "0.45316395", "0.4530691", "0.45305017", "0.45283392", "0.4516265", "0.44948822", "0.44885793", "0.44838318", "0.44766054", "0.44750974", "0.44732922", "0.4471557", "0.4467501", "0.44646248", "0.44496697", "0.44416085", "0.44406202", "0.44295397", "0.44216365", "0.44193625", "0.44133848", "0.44022205", "0.4391414", "0.4377799", "0.4374411", "0.43693522", "0.43648344", "0.43642953", "0.43639678", "0.43372792", "0.42929688", "0.42875916", "0.42784867", "0.42731112", "0.42597753", "0.42582607", "0.4252107", "0.42447212", "0.42347002", "0.4223511", "0.42190516", "0.42180505", "0.42158666", "0.4214518", "0.42020193", "0.4199411", "0.41989154", "0.41870767", "0.4185392" ]
0.609781
1
ReadResponse reads a server response into the received o.
func (o *ProtectedGetPlaytimeGameTelemetryV1ProtectedSteamIdsSteamIDPlaytimeGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { switch response.Code() { case 200: result := NewProtectedGetPlaytimeGameTelemetryV1ProtectedSteamIdsSteamIDPlaytimeGetOK() if err := result.readResponse(response, consumer, o.formats); err != nil { return nil, err } return result, nil case 422: result := NewProtectedGetPlaytimeGameTelemetryV1ProtectedSteamIdsSteamIDPlaytimeGetUnprocessableEntity() if err := result.readResponse(response, consumer, o.formats); err != nil { return nil, err } return result, nil default: data, err := ioutil.ReadAll(response.Body()) if err != nil { return nil, err } return nil, fmt.Errorf("Requested GET /game-telemetry/v1/protected/steamIds/{steamId}/playtime returns an error %d: %s", response.Code(), string(data)) } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (d *ResourceHandler) ReadResponse(dataOut unsafe.Pointer, bytesToRead int32, bytesRead *int32, callback *Callback) int32 {\n\treturn lookupResourceHandlerProxy(d.Base()).ReadResponse(d, dataOut, bytesToRead, bytesRead, callback)\n}", "func (o *GetServerReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewGetServerOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *InteractionBindReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewInteractionBindOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 404:\n\t\tresult := NewInteractionBindNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tcase 500:\n\t\tresult := NewInteractionBindInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *InteractionUnbindReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewInteractionUnbindOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 404:\n\t\tresult := NewInteractionUnbindNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tcase 500:\n\t\tresult := NewInteractionUnbindInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (r *ResponseReader) ReadResponse(req *Request) (res *Response, err error) {\n\tres = CreateEmptyResponse(req)\n\t_, err = readFirstLine(r, res)\n\tif err != nil {\n\t\treturn\n\t}\n\n\terr = readHeaders(r, res)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t_, err = readBodyContent(r, res)\n\tif err != nil {\n\t\treturn\n\t}\n\treturn res, nil\n}", "func (c *Conn) ReadResponse(rmsg *Response) error {\n\tdata, err := c.ReadDataUnit()\n\tif err != nil {\n\t\treturn err\n\t}\n\tcolor.Printf(\"@{c}<!-- RESPONSE -->\\n%s\\n\\n\", string(data))\n\terr = xml.Unmarshal(data, rmsg)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// color.Fprintf(os.Stderr, \"@{y}%s\\n\", spew.Sprintf(\"%+v\", msg))\n\tif len(rmsg.Results) != 0 {\n\t\tr := rmsg.Results[0]\n\t\tif r.IsError() {\n\t\t\treturn r\n\t\t}\n\t}\n\treturn nil\n}", "func (o *VerifyConnectionReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewVerifyConnectionOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *GetAvailableReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewGetAvailableOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *ClosePositionReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewClosePositionOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 400:\n\t\tresult := NewClosePositionBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tcase 401:\n\t\tresult := NewClosePositionUnauthorized()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tcase 404:\n\t\tresult := NewClosePositionNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tcase 405:\n\t\tresult := NewClosePositionMethodNotAllowed()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *DescribeServerReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewDescribeServerOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewDescribeServerBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 404:\n\t\tresult := NewDescribeServerNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 409:\n\t\tresult := NewDescribeServerConflict()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 500:\n\t\tresult := NewDescribeServerInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *GetServerSessionReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewGetServerSessionOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewGetServerSessionBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 401:\n\t\tresult := NewGetServerSessionUnauthorized()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 404:\n\t\tresult := NewGetServerSessionNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 500:\n\t\tresult := NewGetServerSessionInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\tdata, err := ioutil.ReadAll(response.Body())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn nil, fmt.Errorf(\"Requested GET /dsmcontroller/namespaces/{namespace}/servers/{podName}/session returns an error %d: %s\", response.Code(), string(data))\n\t}\n}", "func (o *StartReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewStartOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (resp *PharosResponse) readResponse() {\n\tif !resp.hasBeenRead && resp.Response != nil && resp.Response.Body != nil {\n\t\tresp.data, resp.Error = ioutil.ReadAll(resp.Response.Body)\n\t\tresp.Response.Body.Close()\n\t\tresp.hasBeenRead = true\n\t}\n}", "func (o *HelloWorldReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewHelloWorldOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewHelloWorldBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 500:\n\t\tresult := NewHelloWorldInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (reader *BasicRpcReader) ReadResponse(r io.Reader, method string, requestID int32, resp proto.Message) error {\n\trrh := &hadoop.RpcResponseHeaderProto{}\n\terr := readRPCPacket(r, rrh, resp)\n\tif err != nil {\n\t\treturn err\n\t} else if int32(rrh.GetCallId()) != requestID {\n\t\treturn errors.New(\"unexpected sequence number\")\n\t} else if rrh.GetStatus() != hadoop.RpcResponseHeaderProto_SUCCESS {\n\t\treturn &NamenodeError{\n\t\t\tmethod: method,\n\t\t\tmessage: rrh.GetErrorMsg(),\n\t\t\tcode: int(rrh.GetErrorDetail()),\n\t\t\texception: rrh.GetExceptionClassName(),\n\t\t}\n\t}\n\n\treturn nil\n}", "func (o *UpdateAntivirusServerReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 204:\n\t\tresult := NewUpdateAntivirusServerNoContent()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\tresult := NewUpdateAntivirusServerDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (o *HasEventsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewHasEventsOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 401:\n\t\tresult := NewHasEventsUnauthorized()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 403:\n\t\tresult := NewHasEventsForbidden()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *GetV2Reader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewGetV2OK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 500:\n\t\tresult := NewGetV2InternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *SaveReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 204:\n\t\tresult := NewSaveNoContent()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 500:\n\t\tresult := NewSaveInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *TestWriteReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewTestWriteOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 401:\n\t\tresult := NewTestWriteUnauthorized()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *AllConnectionsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewAllConnectionsOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewAllConnectionsBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 404:\n\t\tresult := NewAllConnectionsNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *SendDataToDeviceReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewSendDataToDeviceOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewSendDataToDeviceBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 500:\n\t\tresult := NewSendDataToDeviceInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *HealthNoopReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewHealthNoopOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *PutOutOfRotationReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 204:\n\t\tresult := NewPutOutOfRotationNoContent()\n\t\tresult.HttpResponse = response\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\terrorResult := kbcommon.NewKillbillError(response.Code())\n\t\tif err := consumer.Consume(response.Body(), &errorResult); err != nil && err != io.EOF {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, errorResult\n\t}\n}", "func (o *GetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewGetOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *ReplaceServerReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewReplaceServerOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 202:\n\t\tresult := NewReplaceServerAccepted()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewReplaceServerBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 404:\n\t\tresult := NewReplaceServerNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tdefault:\n\t\tresult := NewReplaceServerDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (o *StatusReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewStatusOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 401:\n\t\tresult := NewStatusUnauthorized()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 403:\n\t\tresult := NewStatusForbidden()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func ReadResponse(r *bfe_bufio.Reader, req *Request) (*Response, error) {\n\ttp := textproto.NewReader(r)\n\tresp := &Response{\n\t\tRequest: req,\n\t}\n\n\t// Parse the first line of the response.\n\tline, err := tp.ReadLine()\n\tif err != nil {\n\t\tif err == io.EOF {\n\t\t\terr = io.ErrUnexpectedEOF\n\t\t}\n\t\treturn nil, err\n\t}\n\tf := strings.SplitN(line, \" \", 3)\n\tif len(f) < 2 {\n\t\treturn nil, &badStringError{\"malformed HTTP response\", line}\n\t}\n\treasonPhrase := \"\"\n\tif len(f) > 2 {\n\t\treasonPhrase = f[2]\n\t}\n\tresp.Status = f[1] + \" \" + reasonPhrase\n\tresp.StatusCode, err = strconv.Atoi(f[1])\n\tif err != nil {\n\t\treturn nil, &badStringError{\"malformed HTTP status code\", f[1]}\n\t}\n\n\tresp.Proto = f[0]\n\tvar ok bool\n\tif resp.ProtoMajor, resp.ProtoMinor, ok = ParseHTTPVersion(resp.Proto); !ok {\n\t\treturn nil, &badStringError{\"malformed HTTP version\", resp.Proto}\n\t}\n\n\t// Parse the response headers.\n\tmimeHeader, err := tp.ReadMIMEHeader()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresp.Header = Header(mimeHeader)\n\n\tfixPragmaCacheControl(resp.Header)\n\n\terr = readTransfer(resp, r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resp, nil\n}", "func (o *PostChatroomsChannelHashReadReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewPostChatroomsChannelHashReadOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 403:\n\t\tresult := NewPostChatroomsChannelHashReadForbidden()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *TogglePacketGeneratorsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 201:\n\t\tresult := NewTogglePacketGeneratorsCreated()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *FrontPutBinaryReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewFrontPutBinaryOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *SystemPingReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewSystemPingOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 500:\n\t\tresult := NewSystemPingInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *SendDummyAlertReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewSendDummyAlertOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 400:\n\t\tresult := NewSendDummyAlertBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tcase 404:\n\t\tresult := NewSendDummyAlertNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *GetViewsConnectionReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewGetViewsConnectionOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewGetViewsConnectionBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *SyncCopyReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewSyncCopyOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\tresult := NewSyncCopyDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (o *PostPatientsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewPostPatientsOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 400:\n\t\tresult := NewPostPatientsBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tcase 405:\n\t\tresult := NewPostPatientsMethodNotAllowed()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (c *Conn) readResponse(res *response_) error {\n\terr := c.readDataUnit()\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = IgnoreEOF(scanResponse.Scan(c.decoder, res))\n\tif err != nil {\n\t\treturn err\n\t}\n\tif res.Result.IsError() {\n\t\treturn res.Result\n\t}\n\treturn nil\n}", "func (o *AllConnectionsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n switch response.Code() {\n \n case 200:\n result := NewAllConnectionsOK()\n if err := result.readResponse(response, consumer, o.formats); err != nil {\n return nil, err\n }\n return result, nil\n \n case 400:\n result := NewAllConnectionsBadRequest()\n if err := result.readResponse(response, consumer, o.formats); err != nil {\n return nil, err\n }\n return nil, result\n \n case 404:\n result := NewAllConnectionsNotFound()\n if err := result.readResponse(response, consumer, o.formats); err != nil {\n return nil, err\n }\n return nil, result\n \n default:\n return nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n }\n}", "func (o *GetMsgVpnReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewGetMsgVpnOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\tresult := NewGetMsgVpnDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (r *Response) Read(p []byte) (n int, err error) {\n\n\tif r.Error != nil {\n\t\treturn -1, r.Error\n\t}\n\n\treturn r.RawResponse.Body.Read(p)\n}", "func (o *PostPciLinksMoidReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 201:\n\t\tresult := NewPostPciLinksMoidCreated()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\tresult := NewPostPciLinksMoidDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (o *THSRAPIODFare2121Reader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewTHSRAPIODFare2121OK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 299:\n\t\tresult := NewTHSRAPIODFare2121Status299()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 304:\n\t\tresult := NewTHSRAPIODFare2121NotModified()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *PostGatewayConnectNetaddressReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 204:\n\t\tresult := NewPostGatewayConnectNetaddressNoContent()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\tresult := NewPostGatewayConnectNetaddressDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (o *DNSGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewDNSGetOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\tresult := NewDNSGetDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (o *PostAPIV2EventsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 201:\n\t\tresult := NewPostAPIV2EventsNoContent()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewPostAPIV2EventsBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 403:\n\t\tresult := NewPostAPIV2EventsForbidden()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *GetGreetStatusReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewGetGreetStatusOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *CreateAntivirusServerReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewCreateAntivirusServerOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\tresult := NewCreateAntivirusServerDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (o *PostCarsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewPostCarsOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 405:\n\t\tresult := NewPostCarsMethodNotAllowed()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *LogReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewLogOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 404:\n\t\tresult := NewLogNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *ChatGetConnectedReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewChatGetConnectedOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 400:\n\t\tresult := NewChatGetConnectedBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tcase 401:\n\t\tresult := NewChatGetConnectedUnauthorized()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tcase 404:\n\t\tresult := NewChatGetConnectedNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *WebModifyReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewWebModifyOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 202:\n\t\tresult := NewWebModifyAccepted()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\tresult := NewWebModifyDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (o *GetHyperflexServerModelsMoidReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewGetHyperflexServerModelsMoidOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 404:\n\t\tresult := NewGetHyperflexServerModelsMoidNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tdefault:\n\t\tresult := NewGetHyperflexServerModelsMoidDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (o *KillQueryReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 204:\n\t\tresult := NewKillQueryNoContent()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewKillQueryBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 404:\n\t\tresult := NewKillQueryNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 422:\n\t\tresult := NewKillQueryUnprocessableEntity()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *GetProgressionViewReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewGetProgressionViewOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewGetProgressionViewBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *UpdateRackTopoReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewUpdateRackTopoOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewUpdateRackTopoBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *GetByUIDReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewGetByUIDOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 404:\n\t\tresult := NewGetByUIDNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *UtilTestReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewUtilTestOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *GetMeReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewGetMeOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\tresult := NewGetMeDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (o *Delete1Reader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 204:\n\t\tresult := NewDelete1NoContent()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 404:\n\t\tresult := NewDelete1NotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *PostGatewayDisconnectNetaddressReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 204:\n\t\tresult := NewPostGatewayDisconnectNetaddressNoContent()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\tresult := NewPostGatewayDisconnectNetaddressDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (o *GetProtocolsUsingGETReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewGetProtocolsUsingGETOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *RevokeReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewRevokeOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 401:\n\t\tresult := NewRevokeUnauthorized()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 404:\n\t\tresult := NewRevokeNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *DestroySessionUsingPOSTReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewDestroySessionUsingPOSTOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *CompleteTransactionReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 204:\n\t\tresult := NewCompleteTransactionNoContent()\n\t\tresult.HttpResponse = response\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\terrorResult := kbcommon.NewKillbillError(response.Code())\n\t\tif err := consumer.Consume(response.Body(), &errorResult); err != nil && err != io.EOF {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, errorResult\n\t}\n}", "func (o *GetMapNameEventsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewGetMapNameEventsOK(o.writer)\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 404:\n\t\tresult := NewGetMapNameEventsNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *RecoveryReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewRecoveryOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 500:\n\t\tresult := NewRecoveryInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *GetPeersReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewGetPeersOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 403:\n\t\tresult := NewGetPeersForbidden()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *InstallEventsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewInstallEventsOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *UpdateRackTopoReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewUpdateRackTopoOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewUpdateRackTopoBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 404:\n\t\tresult := NewUpdateRackTopoNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 500:\n\t\tresult := NewUpdateRackTopoInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *GetVoicesReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewGetVoicesOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *SetMemoRequiredReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewSetMemoRequiredOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewSetMemoRequiredBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 500:\n\t\tresult := NewSetMemoRequiredInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *PatchHyperflexServerModelsMoidReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 201:\n\t\tresult := NewPatchHyperflexServerModelsMoidCreated()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\tresult := NewPatchHyperflexServerModelsMoidDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (o *BounceReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tresult := NewBounceDefault(response.Code())\n\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\treturn nil, err\n\t}\n\tif response.Code()/100 == 2 {\n\t\treturn result, nil\n\t}\n\treturn nil, result\n}", "func (o *PostHyperflexHxdpVersionsMoidReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 201:\n\t\tresult := NewPostHyperflexHxdpVersionsMoidCreated()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\tresult := NewPostHyperflexHxdpVersionsMoidDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (o *GetDiscoverReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewGetDiscoverOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *UserQuerySessionReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewUserQuerySessionOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewUserQuerySessionBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 500:\n\t\tresult := NewUserQuerySessionInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\tdata, err := ioutil.ReadAll(response.Body())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn nil, fmt.Errorf(\"Requested GET /sessionbrowser/namespaces/{namespace}/gamesession returns an error %d: %s\", response.Code(), string(data))\n\t}\n}", "func (o *GetObmsLibraryIdentifierReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewGetObmsLibraryIdentifierOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 404:\n\t\tresult := NewGetObmsLibraryIdentifierNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\tresult := NewGetObmsLibraryIdentifierDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (o *DeleteApplianceRestoresMoidReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewDeleteApplianceRestoresMoidOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 404:\n\t\tresult := NewDeleteApplianceRestoresMoidNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tdefault:\n\t\tresult := NewDeleteApplianceRestoresMoidDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (o *UnclaimTrafficFilterLinkIDReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewUnclaimTrafficFilterLinkIDOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewUnclaimTrafficFilterLinkIDBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 500:\n\t\tresult := NewUnclaimTrafficFilterLinkIDInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (r *overwriteConsumerReader) ReadResponse(resp runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tif r.forStatusCode == ForAllStatusCodes || resp.Code() == r.forStatusCode {\n\t\treturn r.requestReader.ReadResponse(resp, r.consumer)\n\t}\n\n\treturn r.requestReader.ReadResponse(resp, consumer)\n}", "func (o *ChangeaspecificSpeedDialReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 204:\n\t\tresult := NewChangeaspecificSpeedDialNoContent()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *GetDebugRequestReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewGetDebugRequestOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 404:\n\t\tresult := NewGetDebugRequestNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *PostMemoryArraysMoidReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 201:\n\t\tresult := NewPostMemoryArraysMoidCreated()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\tresult := NewPostMemoryArraysMoidDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (c *Client) readResponse(conn net.Conn) ([]byte, error) {\n\tif c.Timeout > 0 {\n\t\t_ = conn.SetReadDeadline(time.Now().Add(c.Timeout))\n\t}\n\n\tproto := \"udp\"\n\tif _, ok := conn.(*net.TCPConn); ok {\n\t\tproto = \"tcp\"\n\t}\n\n\tif proto == \"udp\" {\n\t\tbufSize := c.UDPSize\n\t\tif bufSize == 0 {\n\t\t\tbufSize = dns.MinMsgSize\n\t\t}\n\t\tresponse := make([]byte, bufSize)\n\t\tn, err := conn.Read(response)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn response[:n], nil\n\t}\n\n\t// If we got here, this is a TCP connection\n\t// so we should read a 2-byte prefix first\n\treturn readPrefixed(conn)\n}", "func (o *PayReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewPayOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewPayBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 404:\n\t\tresult := NewPayNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 409:\n\t\tresult := NewPayConflict()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\tdata, err := ioutil.ReadAll(response.Body())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn nil, fmt.Errorf(\"Requested POST /platform/public/namespaces/{namespace}/payment/orders/{paymentOrderNo}/pay returns an error %d: %s\", response.Code(), string(data))\n\t}\n}", "func (o *CountReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewCountOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 400:\n\t\tresult := NewCountBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *PostNodesIdentifierObmIdentifyReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 201:\n\t\tresult := NewPostNodesIdentifierObmIdentifyCreated()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 404:\n\t\tresult := NewPostNodesIdentifierObmIdentifyNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\tresult := NewPostNodesIdentifierObmIdentifyDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (o *DeleteEventsEventIDReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 204:\n\t\tresult := NewDeleteEventsEventIDNoContent()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 401:\n\t\tresult := NewDeleteEventsEventIDUnauthorized()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tcase 404:\n\t\tresult := NewDeleteEventsEventIDNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *GetInterpreterReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewGetInterpreterOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 404:\n\t\tresult := NewGetInterpreterNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *UtilityServiceReadyReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewUtilityServiceReadyOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\tresult := NewUtilityServiceReadyDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (o *SubscriptionReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewSubscriptionOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *HTTPGetPersistenceItemDataReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewHTTPGetPersistenceItemDataOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 404:\n\t\tresult := NewHTTPGetPersistenceItemDataNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *FrontSessionReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewFrontSessionOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *PostEquipmentIoExpandersMoidReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 201:\n\t\tresult := NewPostEquipmentIoExpandersMoidCreated()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\tresult := NewPostEquipmentIoExpandersMoidDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (pr *PushedRequest) ReadResponse(ctx context.Context) (*http.Response, error) {\n\tselect {\n\tcase <-ctx.Done():\n\t\tpr.Cancel()\n\t\tpr.pushedStream.bufPipe.CloseWithError(ctx.Err())\n\t\treturn nil, ctx.Err()\n\tcase <-pr.pushedStream.peerReset:\n\t\treturn nil, pr.pushedStream.resetErr\n\tcase resErr := <-pr.pushedStream.resc:\n\t\tif resErr.err != nil {\n\t\t\tfmt.Println(resErr.err.Error())\n\t\t\tpr.Cancel()\n\t\t\tpr.pushedStream.bufPipe.CloseWithError(resErr.err)\n\t\t\treturn nil, resErr.err\n\t\t}\n\t\tresErr.res.Request = pr.Promise\n\t\tresErr.res.TLS = pr.pushedStream.cc.tlsState\n\t\treturn resErr.res, resErr.err\n\t}\n}", "func (o *DeleteFirmwareUpgradesMoidReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewDeleteFirmwareUpgradesMoidOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 404:\n\t\tresult := NewDeleteFirmwareUpgradesMoidNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tdefault:\n\t\tresult := NewDeleteFirmwareUpgradesMoidDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (o *GetZippedReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tresult := NewGetZippedDefault(response.Code())\n\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\treturn nil, err\n\t}\n\tif response.Code()/100 == 2 {\n\t\treturn result, nil\n\t}\n\treturn nil, result\n}", "func (o *GetEtherPhysicalPortsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewGetEtherPhysicalPortsOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\tresult := NewGetEtherPhysicalPortsDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (o *ZoneStreamReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewZoneStreamOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}", "func (o *ByNamespaceReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewByNamespaceOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 404:\n\t\tresult := NewByNamespaceNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func (o *GetRequestTrackerReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewGetRequestTrackerOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 403:\n\t\tresult := NewGetRequestTrackerForbidden()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 404:\n\t\tresult := NewGetRequestTrackerNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"response status code does not match any response statuses defined for this endpoint in the swagger spec\", response, response.Code())\n\t}\n}" ]
[ "0.7641552", "0.760927", "0.7521922", "0.750983", "0.74814624", "0.74742043", "0.74354094", "0.74259144", "0.73762655", "0.73685175", "0.7360411", "0.73561996", "0.73510104", "0.73482704", "0.734754", "0.73415536", "0.73377526", "0.7324228", "0.73168004", "0.7316417", "0.7311354", "0.73091406", "0.7291807", "0.7287296", "0.72832924", "0.72756135", "0.7275194", "0.7266383", "0.7265615", "0.7264117", "0.7255932", "0.7251189", "0.72506493", "0.7249641", "0.7242019", "0.72261906", "0.72253126", "0.7220679", "0.7216396", "0.7213257", "0.721161", "0.7210565", "0.7210295", "0.72016865", "0.7199123", "0.71989113", "0.71943253", "0.7179123", "0.7175289", "0.7175108", "0.7166381", "0.71561456", "0.7150776", "0.7150642", "0.7149044", "0.71451586", "0.7144637", "0.71424055", "0.714066", "0.7137857", "0.71376", "0.7137453", "0.7137089", "0.7136602", "0.7135016", "0.7132145", "0.71262056", "0.71257114", "0.71205187", "0.71203804", "0.71203476", "0.7115563", "0.71064895", "0.7101775", "0.7099803", "0.70997036", "0.7099398", "0.7098946", "0.70977116", "0.70975214", "0.7095279", "0.7094677", "0.7088814", "0.7087195", "0.7084482", "0.7082464", "0.7080233", "0.70786047", "0.7078161", "0.70769817", "0.7071432", "0.7069931", "0.7069408", "0.7068374", "0.70682615", "0.7062746", "0.70626354", "0.7061132", "0.70586556", "0.70551085", "0.7052274" ]
0.0
-1
NewProtectedGetPlaytimeGameTelemetryV1ProtectedSteamIdsSteamIDPlaytimeGetOK creates a ProtectedGetPlaytimeGameTelemetryV1ProtectedSteamIdsSteamIDPlaytimeGetOK with default headers values
func NewProtectedGetPlaytimeGameTelemetryV1ProtectedSteamIdsSteamIDPlaytimeGetOK() *ProtectedGetPlaytimeGameTelemetryV1ProtectedSteamIdsSteamIDPlaytimeGetOK { return &ProtectedGetPlaytimeGameTelemetryV1ProtectedSteamIdsSteamIDPlaytimeGetOK{} }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewProtectedGetPlaytimeGameTelemetryV1ProtectedSteamIdsSteamIDPlaytimeGetUnprocessableEntity() *ProtectedGetPlaytimeGameTelemetryV1ProtectedSteamIdsSteamIDPlaytimeGetUnprocessableEntity {\n\treturn &ProtectedGetPlaytimeGameTelemetryV1ProtectedSteamIdsSteamIDPlaytimeGetUnprocessableEntity{}\n}", "func (o *ProtectedGetPlaytimeGameTelemetryV1ProtectedSteamIdsSteamIDPlaytimeGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewProtectedGetPlaytimeGameTelemetryV1ProtectedSteamIdsSteamIDPlaytimeGetOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 422:\n\t\tresult := NewProtectedGetPlaytimeGameTelemetryV1ProtectedSteamIdsSteamIDPlaytimeGetUnprocessableEntity()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tdefault:\n\t\tdata, err := ioutil.ReadAll(response.Body())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn nil, fmt.Errorf(\"Requested GET /game-telemetry/v1/protected/steamIds/{steamId}/playtime returns an error %d: %s\", response.Code(), string(data))\n\t}\n}", "func (o *GetClockParams) WithDefaults() *GetClockParams {\n\to.SetDefaults()\n\treturn o\n}", "func NewGetClockParams() *GetClockParams {\n\treturn &GetClockParams{\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}", "func (a *OpenapiApiService) OpenapiV3Get(ctx _context.Context) ApiOpenapiV3GetRequest {\n\treturn ApiOpenapiV3GetRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t}\n}", "func NewGetClockParamsWithHTTPClient(client *http.Client) *GetClockParams {\n\treturn &GetClockParams{\n\t\tHTTPClient: client,\n\t}\n}", "func NewGetVCenterUsingGETForbidden() *GetVCenterUsingGETForbidden {\n\treturn &GetVCenterUsingGETForbidden{}\n}", "func (vk VK) GiftsGet(params map[string]string) (response GiftsGetResponse, vkErr Error) {\n\trawResponse, vkErr := vk.Request(\"gifts.get\", params)\n\tif vkErr.Code != 0 {\n\t\treturn\n\t}\n\n\terr := json.Unmarshal(rawResponse, &response)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn\n}", "func NewGetPracticesDefault(code int) *GetPracticesDefault {\n\tif code <= 0 {\n\t\tcode = 500\n\t}\n\n\treturn &GetPracticesDefault{\n\t\t_statusCode: code,\n\t}\n}", "func Get(alg string, bits int) ([]byte, error) {\n\tcfg := newJwks(alg, bits)\n\tdata, err := cfg.generateJwksSecret()\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"unable to generate key key\")\n\t}\n\n\tpatchContent := []jwksPatchJSON{{\n\t\tOp: \"add\",\n\t\tPath: \"/data\",\n\t\tValue: jwksPatchJSONValue{\n\t\t\tJwks: data,\n\t\t},\n\t}}\n\n\tpatchDataJSON, err := json.Marshal(patchContent)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"unable to marshal key\")\n\t}\n\n\treturn patchDataJSON, nil\n}", "func NewGetVCenterUsingGETUnauthorized() *GetVCenterUsingGETUnauthorized {\n\treturn &GetVCenterUsingGETUnauthorized{}\n}", "func (kvclient *MockResKVClient) Get(ctx context.Context, key string) (*kvstore.KVPair, error) {\n\tlogger.Debugw(ctx, \"Warning Warning Warning: Get of MockKVClient called\", log.Fields{\"key\": key})\n\tif key != \"\" {\n\t\tif strings.Contains(key, MeterConfig) {\n\t\t\tvar bands []*ofp.OfpMeterBandHeader\n\t\t\tbands = append(bands, &ofp.OfpMeterBandHeader{Type: ofp.OfpMeterBandType_OFPMBT_DSCP_REMARK,\n\t\t\t\tRate: 1024, Data: &ofp.OfpMeterBandHeader_DscpRemark{DscpRemark: &ofp.OfpMeterBandDscpRemark{PrecLevel: 2}}})\n\n\t\t\tbands = append(bands, &ofp.OfpMeterBandHeader{Type: ofp.OfpMeterBandType_OFPMBT_DSCP_REMARK,\n\t\t\t\tRate: 1024, Data: &ofp.OfpMeterBandHeader_DscpRemark{DscpRemark: &ofp.OfpMeterBandDscpRemark{PrecLevel: 3}}})\n\n\t\t\tsep := strings.Split(key, \"/\")[1]\n\t\t\tval, _ := strconv.ParseInt(strings.Split(sep, \",\")[1], 10, 32)\n\t\t\tif uint32(val) > 1 {\n\t\t\t\tmeterConfig := &ofp.OfpMeterConfig{MeterId: uint32(val), Bands: bands}\n\t\t\t\tstr, _ := json.Marshal(meterConfig)\n\n\t\t\t\treturn kvstore.NewKVPair(key, str, \"mock\", 3000, 1), nil\n\t\t\t}\n\t\t\treturn nil, errors.New(\"invalid meter\")\n\t\t}\n\t\tif strings.Contains(key, FlowIDpool) || strings.Contains(key, GemportIDPool) || strings.Contains(key, AllocIDPool) {\n\t\t\tlogger.Debug(ctx, \"Error Error Error Key:\", FlowIDpool, GemportIDPool, AllocIDPool)\n\t\t\tdata := make(map[string]interface{})\n\t\t\tdata[\"pool\"] = \"1024\"\n\t\t\tdata[\"start_idx\"] = 1\n\t\t\tdata[\"end_idx\"] = 1024\n\t\t\tstr, _ := json.Marshal(data)\n\t\t\treturn kvstore.NewKVPair(key, str, \"mock\", 3000, 1), nil\n\t\t}\n\t\tif strings.Contains(key, FlowIDInfo) || strings.Contains(key, FlowIDs) {\n\t\t\tlogger.Debug(ctx, \"Error Error Error Key:\", FlowIDs, FlowIDInfo)\n\t\t\tstr, _ := json.Marshal([]uint32{1, 2})\n\t\t\treturn kvstore.NewKVPair(key, str, \"mock\", 3000, 1), nil\n\t\t}\n\t\tif strings.Contains(key, AllocIDs) || strings.Contains(key, GemportIDs) {\n\t\t\tlogger.Debug(ctx, \"Error Error Error Key:\", AllocIDs, GemportIDs)\n\t\t\tstr, _ := json.Marshal(1)\n\t\t\treturn kvstore.NewKVPair(key, str, \"mock\", 3000, 1), nil\n\t\t}\n\t\tif strings.Contains(key, McastQueuesForIntf) {\n\t\t\tlogger.Debug(ctx, \"Error Error Error Key:\", McastQueuesForIntf)\n\t\t\tmcastQueues := make(map[uint32][]uint32)\n\t\t\tmcastQueues[10] = []uint32{4000, 0}\n\t\t\tstr, _ := json.Marshal(mcastQueues)\n\t\t\treturn kvstore.NewKVPair(key, str, \"mock\", 3000, 1), nil\n\t\t}\n\t\tif strings.Contains(key, \"flow_groups\") && !strings.Contains(key, \"1000\") {\n\t\t\tgroupInfo := GroupInfo{GroupID: 2, OutPorts: []uint32{2}}\n\t\t\tstr, _ := json.Marshal(groupInfo)\n\t\t\treturn kvstore.NewKVPair(key, str, \"mock\", 3000, 1), nil\n\t\t}\n\n\t\tmaps := make(map[string]*kvstore.KVPair)\n\t\tmaps[key] = &kvstore.KVPair{Key: key}\n\t\treturn maps[key], nil\n\t}\n\treturn nil, errors.New(\"key didn't find\")\n}", "func (o *GetClockParams) WithTimeout(timeout time.Duration) *GetClockParams {\n\to.SetTimeout(timeout)\n\treturn o\n}", "func (client *Client) GetPlayInfoWithOptions(request *GetPlayInfoRequest, runtime *util.RuntimeOptions) (_result *GetPlayInfoResponse, _err error) {\n\t_err = util.ValidateModel(request)\n\tif _err != nil {\n\t\treturn _result, _err\n\t}\n\tquery := map[string]interface{}{}\n\tif !tea.BoolValue(util.IsUnset(request.AdditionType)) {\n\t\tquery[\"AdditionType\"] = request.AdditionType\n\t}\n\n\tif !tea.BoolValue(util.IsUnset(request.AuthTimeout)) {\n\t\tquery[\"AuthTimeout\"] = request.AuthTimeout\n\t}\n\n\tif !tea.BoolValue(util.IsUnset(request.Definition)) {\n\t\tquery[\"Definition\"] = request.Definition\n\t}\n\n\tif !tea.BoolValue(util.IsUnset(request.Formats)) {\n\t\tquery[\"Formats\"] = request.Formats\n\t}\n\n\tif !tea.BoolValue(util.IsUnset(request.OutputType)) {\n\t\tquery[\"OutputType\"] = request.OutputType\n\t}\n\n\tif !tea.BoolValue(util.IsUnset(request.PlayConfig)) {\n\t\tquery[\"PlayConfig\"] = request.PlayConfig\n\t}\n\n\tif !tea.BoolValue(util.IsUnset(request.ReAuthInfo)) {\n\t\tquery[\"ReAuthInfo\"] = request.ReAuthInfo\n\t}\n\n\tif !tea.BoolValue(util.IsUnset(request.ResultType)) {\n\t\tquery[\"ResultType\"] = request.ResultType\n\t}\n\n\tif !tea.BoolValue(util.IsUnset(request.StreamType)) {\n\t\tquery[\"StreamType\"] = request.StreamType\n\t}\n\n\tif !tea.BoolValue(util.IsUnset(request.VideoId)) {\n\t\tquery[\"VideoId\"] = request.VideoId\n\t}\n\n\treq := &openapi.OpenApiRequest{\n\t\tQuery: openapiutil.Query(query),\n\t}\n\tparams := &openapi.Params{\n\t\tAction: tea.String(\"GetPlayInfo\"),\n\t\tVersion: tea.String(\"2017-03-21\"),\n\t\tProtocol: tea.String(\"HTTPS\"),\n\t\tPathname: tea.String(\"/\"),\n\t\tMethod: tea.String(\"POST\"),\n\t\tAuthType: tea.String(\"AK\"),\n\t\tStyle: tea.String(\"RPC\"),\n\t\tReqBodyType: tea.String(\"formData\"),\n\t\tBodyType: tea.String(\"json\"),\n\t}\n\t_result = &GetPlayInfoResponse{}\n\t_body, _err := client.CallApi(params, req, runtime)\n\tif _err != nil {\n\t\treturn _result, _err\n\t}\n\t_err = tea.Convert(_body, &_result)\n\treturn _result, _err\n}", "func NewGetTeamsOK() *GetTeamsOK {\n\n\treturn &GetTeamsOK{}\n}", "func NewGetTeamsForbidden() *GetTeamsForbidden {\n\n\treturn &GetTeamsForbidden{}\n}", "func NewGetTimesheetTimeClockOK() *GetTimesheetTimeClockOK {\n\treturn &GetTimesheetTimeClockOK{}\n}", "func (o *GetClockParams) WithHTTPClient(client *http.Client) *GetClockParams {\n\to.SetHTTPClient(client)\n\treturn o\n}", "func (o *SyncStatusUsingGETParams) WithDefaults() *SyncStatusUsingGETParams {\n\to.SetDefaults()\n\treturn o\n}", "func NewGetCustomIntegrationVersionByIDUsingGETForbidden() *GetCustomIntegrationVersionByIDUsingGETForbidden {\n\treturn &GetCustomIntegrationVersionByIDUsingGETForbidden{}\n}", "func NewGetSessionInfoOK() *GetSessionInfoOK {\n\treturn &GetSessionInfoOK{}\n}", "func NewGetTournamentOK() *GetTournamentOK {\n\n\treturn &GetTournamentOK{}\n}", "func NewGetMeDefault(code int) *GetMeDefault {\n\treturn &GetMeDefault{\n\t\t_statusCode: code,\n\t}\n}", "func (client *DeviceSettingsClient) getTimeSettingsCreateRequest(ctx context.Context, deviceName string, resourceGroupName string, managerName string, options *DeviceSettingsClientGetTimeSettingsOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.StorSimple/managers/{managerName}/devices/{deviceName}/timeSettings/default\"\n\turlPath = strings.ReplaceAll(urlPath, \"{deviceName}\", deviceName)\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", client.subscriptionID)\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", resourceGroupName)\n\turlPath = strings.ReplaceAll(urlPath, \"{managerName}\", managerName)\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2017-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func GetParams(opts *Options) (*api.Client, *models.Game, *models.GamePackage, *semver.Version, string, int64, string, *files.GetResult, int64, error) {\n\tpath := opts.Args.File\n\tfilesize, checksum, err := getFileData(path)\n\tif err != nil {\n\t\treturn nil, nil, nil, nil, \"\", 0, \"\", nil, 0, err\n\t}\n\n\tapiClient, user, err := Authenticate(opts.Token)\n\tif err != nil {\n\t\treturn nil, nil, nil, nil, \"\", 0, \"\", nil, 0, err\n\t}\n\n\tui.Success(\"Hello, %s\\n\\n\", user.Username)\n\tgame, err := GetGame(apiClient, opts.GameID)\n\tif err != nil {\n\t\treturn nil, nil, nil, nil, \"\", 0, \"\", nil, 0, err\n\t}\n\n\tgamePackage, err := GetGamePackage(apiClient, game.ID, opts.PackageID)\n\tif err != nil {\n\t\treturn nil, nil, nil, nil, \"\", 0, \"\", nil, 0, err\n\t}\n\n\treleaseSemver, err := GetGameRelease(apiClient, opts.ReleaseVersion)\n\tif err != nil {\n\t\treturn nil, nil, nil, nil, \"\", 0, \"\", nil, 0, err\n\t}\n\n\tfileStatus, err := apiClient.FileStatus(game.ID, filesize, checksum)\n\tif err != nil {\n\t\treturn nil, nil, nil, nil, \"\", 0, \"\", nil, 0, err\n\t}\n\n\tchunkSize := int64(opts.Advanced.ChunkSize) * 1024 * 1024\n\tif chunkSize <= 0 {\n\t\tchunkSize = config.ChunkSize\n\t}\n\n\treturn apiClient, game, gamePackage, releaseSemver, path, filesize, checksum, fileStatus, chunkSize, nil\n}", "func NewGetMeetupsDefault(code int) *GetMeetupsDefault {\n\tif code <= 0 {\n\t\tcode = 500\n\t}\n\n\treturn &GetMeetupsDefault{\n\t\t_statusCode: code,\n\t}\n}", "func NewGetParamsWithTimeout(timeout time.Duration) *GetParams {\n\tvar (\n\t\tdeviceOSDefault = string(\"Android 9\")\n\t\tsendToEmailDefault = string(\"no\")\n\t)\n\treturn &GetParams{\n\t\tDeviceOS: &deviceOSDefault,\n\t\tSendToEmail: &sendToEmailDefault,\n\n\t\ttimeout: timeout,\n\t}\n}", "func NewGetPassesDefault(code int) *GetPassesDefault {\n\treturn &GetPassesDefault{\n\t\t_statusCode: code,\n\t}\n}", "func (cli *OpsGenieTeamClient) Get(req team.GetTeamRequest) (*team.GetTeamResponse, error) {\n\treq.APIKey = cli.apiKey\n\tresp, err := cli.sendRequest(cli.buildGetRequest(teamURL, req))\n\n\tif resp == nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tvar getTeamResp team.GetTeamResponse\n\n\tif err = resp.Body.FromJsonTo(&getTeamResp); err != nil {\n\t\tmessage := \"Server response can not be parsed, \" + err.Error()\n\t\tlogging.Logger().Warn(message)\n\t\treturn nil, errors.New(message)\n\t}\n\treturn &getTeamResp, nil\n}", "func NewGetPublicIPByUUIDUsingGETForbidden() *GetPublicIPByUUIDUsingGETForbidden {\n\treturn &GetPublicIPByUUIDUsingGETForbidden{}\n}", "func (m *ItemPrimaryChannelSharedWithTeamsSharedWithChannelTeamInfoItemRequestBuilder) Get(ctx context.Context, requestConfiguration *ItemPrimaryChannelSharedWithTeamsSharedWithChannelTeamInfoItemRequestBuilderGetRequestConfiguration)(iadcd81124412c61e647227ecfc4449d8bba17de0380ddda76f641a29edf2b242.SharedWithChannelTeamInfoable, error) {\n requestInfo, err := m.ToGetRequestInformation(ctx, requestConfiguration);\n if err != nil {\n return nil, err\n }\n errorMapping := i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.ErrorMappings {\n \"4XX\": ia572726a95efa92ddd544552cd950653dc691023836923576b2f4bf716cf204a.CreateODataErrorFromDiscriminatorValue,\n \"5XX\": ia572726a95efa92ddd544552cd950653dc691023836923576b2f4bf716cf204a.CreateODataErrorFromDiscriminatorValue,\n }\n res, err := m.BaseRequestBuilder.RequestAdapter.Send(ctx, requestInfo, iadcd81124412c61e647227ecfc4449d8bba17de0380ddda76f641a29edf2b242.CreateSharedWithChannelTeamInfoFromDiscriminatorValue, errorMapping)\n if err != nil {\n return nil, err\n }\n if res == nil {\n return nil, nil\n }\n return res.(iadcd81124412c61e647227ecfc4449d8bba17de0380ddda76f641a29edf2b242.SharedWithChannelTeamInfoable), nil\n}", "func NewListSmsSendOverviewUsingGETRequestWithoutParam() *ListSmsSendOverviewUsingGETRequest {\n\n return &ListSmsSendOverviewUsingGETRequest{\n JDCloudRequest: core.JDCloudRequest{\n URL: \"/smsApps/{appId}:overview\",\n Method: \"GET\",\n Header: nil,\n Version: \"v1\",\n },\n }\n}", "func NewGetLargeScreenDataRequestWithoutParam() *GetLargeScreenDataRequest {\n\n return &GetLargeScreenDataRequest{\n JDCloudRequest: core.JDCloudRequest{\n URL: \"/regions/{regionId}/getData\",\n Method: \"GET\",\n Header: nil,\n Version: \"v1\",\n },\n }\n}", "func (o *GetPrivateOrderstateParams) WithTimeout(timeout time.Duration) *GetPrivateOrderstateParams {\n\to.SetTimeout(timeout)\n\treturn o\n}", "func NewGetSopPaymentResponseUsingGETForbidden() *GetSopPaymentResponseUsingGETForbidden {\n\treturn &GetSopPaymentResponseUsingGETForbidden{}\n}", "func (client *LiveOutputsClient) getCreateRequest(ctx context.Context, resourceGroupName string, accountName string, liveEventName string, liveOutputName string, options *LiveOutputsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaservices/{accountName}/liveEvents/{liveEventName}/liveOutputs/{liveOutputName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif accountName == \"\" {\n\t\treturn nil, errors.New(\"parameter accountName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{accountName}\", url.PathEscape(accountName))\n\tif liveEventName == \"\" {\n\t\treturn nil, errors.New(\"parameter liveEventName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{liveEventName}\", url.PathEscape(liveEventName))\n\tif liveOutputName == \"\" {\n\t\treturn nil, errors.New(\"parameter liveOutputName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{liveOutputName}\", url.PathEscape(liveOutputName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func NewGetClockParamsWithTimeout(timeout time.Duration) *GetClockParams {\n\treturn &GetClockParams{\n\t\ttimeout: timeout,\n\t}\n}", "func NewGetLicenseSmartlicenseTokensDefault(code int) *GetLicenseSmartlicenseTokensDefault {\n\treturn &GetLicenseSmartlicenseTokensDefault{\n\t\t_statusCode: code,\n\t}\n}", "func (sc *LoanMarketShareContract) Get(ctx contractapi.TransactionContextInterface, key string) (*LoanMarketShareObj, error) {\n\t\n existingObj, err := ctx.GetStub().GetState(key)\n if err != nil {\n return nil, err\n }\n\n if existingObj == nil {\n return nil, fmt.Errorf(\"Cannot read world state pair with key %s. Does not exist\", key)\n }\n\n\tloanMarketShareObj := new(LoanMarketShareObj)\n\tif err := json.Unmarshal(existingObj, loanMarketShareObj); err != nil {\n\t\treturn nil, fmt.Errorf(\"Data retrieved from world state for key %s was not of type LoanMarketShareObj\", key)\n\t}\n return loanMarketShareObj, nil\n}", "func NewDescribeThingShadowRequestWithoutParam() *DescribeThingShadowRequest {\n\n return &DescribeThingShadowRequest{\n JDCloudRequest: core.JDCloudRequest{\n URL: \"/regions/{regionId}/products/{productKey}/devices/{identifier}/shadow\",\n Method: \"GET\",\n Header: nil,\n Version: \"v2\",\n },\n }\n}", "func NewGetCustomIntegrationVersionByIDUsingGETUnauthorized() *GetCustomIntegrationVersionByIDUsingGETUnauthorized {\n\treturn &GetCustomIntegrationVersionByIDUsingGETUnauthorized{}\n}", "func get(stub shim.ChaincodeStubInterface, args []string) (string, error) {\n if len(args) != 1 {\n return \"\", fmt.Errorf(\"Incorrect arguments. Expecting a key\")\n }\n\n PatientBytes, err := stub.GetState(args[0])\n if err != nil {\n return \"\", fmt.Errorf(\"Failed to get asset: %s with error: %s\", args[0], err)\n }\n if PatientBytes == nil {\n return \"\", fmt.Errorf(\"Asset not found: %s\", args[0])\n }\n return string(PatientBytes), nil\n}", "func (c *client) Get(sType, org, name, path string) (*library.Secret, error) {\n\t// create log fields from secret metadata\n\tfields := logrus.Fields{\n\t\t\"org\": org,\n\t\t\"repo\": name,\n\t\t\"secret\": path,\n\t\t\"type\": sType,\n\t}\n\n\t// check if secret is a shared secret\n\tif strings.EqualFold(sType, constants.SecretShared) {\n\t\t// update log fields from secret metadata\n\t\tfields = logrus.Fields{\n\t\t\t\"org\": org,\n\t\t\t\"team\": name,\n\t\t\t\"secret\": path,\n\t\t\t\"type\": sType,\n\t\t}\n\t}\n\n\tc.Logger.WithFields(fields).Tracef(\"getting native %s secret %s for %s/%s\", sType, path, org, name)\n\n\t// capture the secret from the native service\n\ts, err := c.Database.GetSecret(sType, org, name, path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn s, nil\n}", "func (s *TimeAndBillingEntryRecentHourCostTypesEndpoint) Get(ctx context.Context, division int, id *types.GUID) (*TimeAndBillingEntryRecentHourCostTypes, error) {\n\tb, _ := s.client.ResolvePathWithDivision(\"/api/v1/{division}/read/project/TimeAndBillingEntryRecentHourCostTypes\", division) // #nosec\n\tu, err := api.AddOdataKeyToURL(b, id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\te := &TimeAndBillingEntryRecentHourCostTypes{}\n\t_, _, requestError := s.client.NewRequestAndDo(ctx, \"GET\", u.String(), nil, e)\n\treturn e, requestError\n}", "func Get(app, provider, pArn string, duration int64) (*aws.Credentials, error) {\n\t// Get provider config\n\tp, err := config.GetOktaProvider(provider)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"reading provider config: %v\", err)\n\t}\n\n\t// Get app config\n\ta, err := config.GetOktaApp(app)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"reading config for app %s: %v\", app, err)\n\t}\n\n\t// Initialize Okta client\n\tc, err := NewClient(p.BaseURL)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"initializing Okta client: %v\", err)\n\t}\n\n\t// Get user credentials\n\tuser := p.Username\n\tif user == \"\" {\n\t\t// Get credentials from the user\n\t\tfmt.Print(\"Okta username: \")\n\t\tfmt.Scanln(&user)\n\t}\n\n\tpass, err := keyChain.Get(provider)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"getting key chain: %v\", err)\n\t}\n\n\t// Initialize spinner\n\tvar s = spinner.New()\n\n\t// Get session token\n\ts.Start()\n\tresp, err := c.GetSessionToken(&GetSessionTokenParams{\n\t\tUsername: user,\n\t\tPassword: string(pass),\n\t})\n\ts.Stop()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"getting session token: %v\", err)\n\t}\n\n\tvar st string\n\n\t// TODO Handle multiple MFA devices (allow user to choose)\n\tswitch resp.Status {\n\tcase StatusSuccess:\n\t\tst = resp.SessionToken\n\tcase StatusMFARequired:\n\t\tfactor := resp.Embedded.Factors[0]\n\t\tstateToken := resp.StateToken\n\n\t\tvar vfResp *VerifyFactorResponse\n\n\t\tswitch factor.FactorType {\n\t\tcase MFATypePush:\n\t\t\t// Okta Verify push notification:\n\t\t\t// https://developer.okta.com/docs/api/resources/authn/#verify-push-factor\n\t\t\t// Keep polling authentication transactions with WAITING result until the challenge\n\t\t\t// completes or expires.\n\t\t\tfmt.Println(\"Please approve request on Okta Verify app\")\n\t\t\ts.Start()\n\t\t\tvfResp, err = c.VerifyFactor(&VerifyFactorParams{\n\t\t\t\tFactorID: factor.ID,\n\t\t\t\tStateToken: stateToken,\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"verifying MFA: %v\", err)\n\t\t\t}\n\n\t\t\tfor vfResp.FactorResult == VerifyFactorStatusWaiting {\n\t\t\t\tvfResp, err = c.VerifyFactor(&VerifyFactorParams{\n\t\t\t\t\tFactorID: factor.ID,\n\t\t\t\t\tStateToken: stateToken,\n\t\t\t\t})\n\t\t\t\ttime.Sleep(2 * time.Second)\n\t\t\t}\n\t\t\ts.Stop()\n\t\tcase MFATypeTOTP:\n\t\t\tfmt.Print(\"Please enter the OTP from your MFA device: \")\n\t\t\tvar otp string\n\t\t\tfmt.Scanln(&otp)\n\n\t\t\ts.Start()\n\t\t\tvfResp, err = c.VerifyFactor(&VerifyFactorParams{\n\t\t\t\tFactorID: factor.ID,\n\t\t\t\tPassCode: otp,\n\t\t\t\tStateToken: stateToken,\n\t\t\t})\n\t\t\ts.Stop()\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"unsupported MFA type '%s'\", factor.FactorType)\n\t\t}\n\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"verifying MFA: %v\", err)\n\t\t}\n\n\t\t// Handle failed MFA verification (verification rejected or timed out)\n\t\tif vfResp.Status != VerifyFactorStatusSuccess {\n\t\t\treturn nil, fmt.Errorf(\"MFA verification failed\")\n\t\t}\n\n\t\tst = vfResp.SessionToken\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"Invalid status %s\", resp.Status)\n\t}\n\n\t// Launch Okta app with session token\n\ts.Start()\n\tsamlAssertion, err := c.LaunchApp(&LaunchAppParams{SessionToken: st, URL: a.URL})\n\ts.Stop()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error launching app: %v\", err)\n\t}\n\n\tarn, err := saml.Get(*samlAssertion, pArn)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ts.Start()\n\tcreds, err := aws.AssumeSAMLRole(arn.Provider, arn.Role, *samlAssertion, duration)\n\ts.Stop()\n\n\tif err != nil {\n\t\tif err.Error() == aws.ErrDurationExceeded {\n\t\t\tlog.Println(color.YellowString(aws.DurationExceededMessage))\n\t\t\ts.Start()\n\t\t\tcreds, err = aws.AssumeSAMLRole(arn.Provider, arn.Role, *samlAssertion, 3600)\n\t\t\ts.Stop()\n\t\t}\n\t}\n\n\treturn creds, err\n}", "func getTeam(params martini.Params, w http.ResponseWriter, r *http.Request) {\n\tid := params[\"team\"]\n\tteam := models.NewTeam(id)\n\tskue.Read(view, team, nil, w, r)\n}", "func (r *NiftyCreatePrivateLanResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func ExamplePrivateCloudsClient_Get_privateCloudsGetStretched() {\n\tcred, err := azidentity.NewDefaultAzureCredential(nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to obtain a credential: %v\", err)\n\t}\n\tctx := context.Background()\n\tclientFactory, err := armavs.NewClientFactory(\"<subscription-id>\", cred, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create client: %v\", err)\n\t}\n\tres, err := clientFactory.NewPrivateCloudsClient().Get(ctx, \"group1\", \"cloud1\", nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to finish the request: %v\", err)\n\t}\n\t// You could use response here. We use blank identifier for just demo purposes.\n\t_ = res\n\t// If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes.\n\t// res.PrivateCloud = armavs.PrivateCloud{\n\t// \tName: to.Ptr(\"cloud1\"),\n\t// \tType: to.Ptr(\"Microsoft.AVS/privateClouds\"),\n\t// \tID: to.Ptr(\"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.AVS/privateClouds/cloud1\"),\n\t// \tLocation: to.Ptr(\"eastus2\"),\n\t// \tTags: map[string]*string{\n\t// \t},\n\t// \tProperties: &armavs.PrivateCloudProperties{\n\t// \t\tAvailability: &armavs.AvailabilityProperties{\n\t// \t\t\tSecondaryZone: to.Ptr[int32](2),\n\t// \t\t\tStrategy: to.Ptr(armavs.AvailabilityStrategyDualZone),\n\t// \t\t\tZone: to.Ptr[int32](1),\n\t// \t\t},\n\t// \t\tIdentitySources: []*armavs.IdentitySource{\n\t// \t\t\t{\n\t// \t\t\t\tName: to.Ptr(\"group1\"),\n\t// \t\t\t\tAlias: to.Ptr(\"groupAlias\"),\n\t// \t\t\t\tBaseGroupDN: to.Ptr(\"ou=baseGroup\"),\n\t// \t\t\t\tBaseUserDN: to.Ptr(\"ou=baseUser\"),\n\t// \t\t\t\tDomain: to.Ptr(\"domain1\"),\n\t// \t\t\t\tPrimaryServer: to.Ptr(\"ldaps://1.1.1.1:636/\"),\n\t// \t\t\t\tSecondaryServer: to.Ptr(\"ldaps://1.1.1.2:636/\"),\n\t// \t\t\t\tSSL: to.Ptr(armavs.SSLEnumEnabled),\n\t// \t\t}},\n\t// \t\tInternet: to.Ptr(armavs.InternetEnumDisabled),\n\t// \t\tManagementCluster: &armavs.ManagementCluster{\n\t// \t\t\tClusterID: to.Ptr[int32](1),\n\t// \t\t\tClusterSize: to.Ptr[int32](4),\n\t// \t\t\tHosts: []*string{\n\t// \t\t\t\tto.Ptr(\"fakehost18.nyc1.kubernetes.center\"),\n\t// \t\t\t\tto.Ptr(\"fakehost19.nyc1.kubernetes.center\"),\n\t// \t\t\t\tto.Ptr(\"fakehost20.nyc1.kubernetes.center\"),\n\t// \t\t\t\tto.Ptr(\"fakehost21.nyc1.kubernetes.center\")},\n\t// \t\t\t},\n\t// \t\t\tCircuit: &armavs.Circuit{\n\t// \t\t\t\tExpressRouteID: to.Ptr(\"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/tnt13-41a90db2-9d5e-4bd5-a77a-5ce7b58213d6-eastus2/providers/Microsoft.Network/expressroutecircuits/tnt13-41a90db2-9d5e-4bd5-a77a-5ce7b58213d6-eastus2-xconnect\"),\n\t// \t\t\t\tExpressRoutePrivatePeeringID: to.Ptr(\"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/tnt42-cust-p01-dmo01/providers/Microsoft.Network/expressroutecircuits/tnt42-cust-p01-dmo01-er/peerings/AzurePrivatePeering\"),\n\t// \t\t\t\tPrimarySubnet: to.Ptr(\"192.168.53.0/30\"),\n\t// \t\t\t\tSecondarySubnet: to.Ptr(\"192.168.53.4/30\"),\n\t// \t\t\t},\n\t// \t\t\tEndpoints: &armavs.Endpoints{\n\t// \t\t\t\tHcxCloudManager: to.Ptr(\"https://192.168.50.4/\"),\n\t// \t\t\t\tNsxtManager: to.Ptr(\"https://192.168.50.3/\"),\n\t// \t\t\t\tVcsa: to.Ptr(\"https://192.168.50.2/\"),\n\t// \t\t\t},\n\t// \t\t\tExternalCloudLinks: []*string{\n\t// \t\t\t\tto.Ptr(\"/subscriptions/12341234-1234-1234-1234-123412341234/resourceGroups/mygroup/providers/Microsoft.AVS/privateClouds/cloud2\")},\n\t// \t\t\t\tNetworkBlock: to.Ptr(\"192.168.48.0/22\"),\n\t// \t\t\t\tProvisioningState: to.Ptr(armavs.PrivateCloudProvisioningStateSucceeded),\n\t// \t\t\t\tSecondaryCircuit: &armavs.Circuit{\n\t// \t\t\t\t\tExpressRouteID: to.Ptr(\"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/tnt13-41a90db2-9d5e-4bd5-a77a-5ce7b58213d6-eastus2/providers/Microsoft.Network/expressroutecircuits/tnt13-41a90db2-9d5e-4bd5-a77a-5ce7b58213d6-eastus2-xconnect\"),\n\t// \t\t\t\t\tExpressRoutePrivatePeeringID: to.Ptr(\"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/tnt42-cust-p01-dmo01/providers/Microsoft.Network/expressroutecircuits/tnt42-cust-p01-dmo01-er2/peerings/AzurePrivatePeering\"),\n\t// \t\t\t\t\tPrimarySubnet: to.Ptr(\"192.168.53.0/30\"),\n\t// \t\t\t\t\tSecondarySubnet: to.Ptr(\"192.168.53.4/30\"),\n\t// \t\t\t\t},\n\t// \t\t\t},\n\t// \t\t\tSKU: &armavs.SKU{\n\t// \t\t\t\tName: to.Ptr(\"AV36\"),\n\t// \t\t\t},\n\t// \t\t}\n}", "func (m *MockInternalClient) FlowHashGet(ctx context.Context, in *FlowHashGetRequestMsg, opts ...grpc.CallOption) (*FlowHashGetResponseMsg, error) {\n\tvarargs := []interface{}{ctx, in}\n\tfor _, a := range opts {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"FlowHashGet\", varargs...)\n\tret0, _ := ret[0].(*FlowHashGetResponseMsg)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func Get(ctx *grumble.Context) error {\n\tclient, execCtx, cancel := newClientAndCtx(ctx, 5*time.Second)\n\tdefer cancel()\n\tval, err := client.Get(execCtx, &ldProto.Key{Key: ctx.Args.String(\"key\")})\n\tif err != nil || val.Key == \"\" {\n\t\treturn err\n\t}\n\treturn exec(ctx, handleKeyValueReturned(val))\n}", "func Get(method, url string, params map[string]string, vPtr interface{}) error {\n\taccount, token, err := LoginWithSelectedAccount()\n\tif err != nil {\n\t\treturn LogError(\"Couldn't get account details or login token\", err)\n\t}\n\turl = fmt.Sprintf(\"%s%s\", account.ServerURL, url)\n\n\treq, err := http.NewRequest(method, url, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif token != \"\" {\n\t\treq.Header.Add(\"Authorization\", fmt.Sprintf(\"Bearer %s\", token))\n\t}\n\tq := req.URL.Query()\n\tfor k, v := range params {\n\t\tq.Add(k, v)\n\t}\n\treq.URL.RawQuery = q.Encode()\n\tresp, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdefer CloseTheCloser(resp.Body)\n\n\tdata, _ := ioutil.ReadAll(resp.Body)\n\n\tif resp.StatusCode != 200 {\n\t\trespBody := map[string]interface{}{}\n\t\tif err := json.Unmarshal(data, &respBody); err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_ = LogError(fmt.Sprintf(\"error while getting service got http status code %s - %s\", resp.Status, respBody[\"error\"]), nil)\n\t\treturn fmt.Errorf(\"received invalid status code (%d)\", resp.StatusCode)\n\t}\n\n\tif err := json.Unmarshal(data, vPtr); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (o *GetClockParams) WithContext(ctx context.Context) *GetClockParams {\n\to.SetContext(ctx)\n\treturn o\n}", "func (m *ManagedDeviceEnrollmentAbandonmentDetailsWithSkipWithTopWithFilterWithSkipTokenRequestBuilder) Get(ctx context.Context, requestConfiguration *ManagedDeviceEnrollmentAbandonmentDetailsWithSkipWithTopWithFilterWithSkipTokenRequestBuilderGetRequestConfiguration)(ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be.Reportable, error) {\n requestInfo, err := m.ToGetRequestInformation(ctx, requestConfiguration);\n if err != nil {\n return nil, err\n }\n errorMapping := i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.ErrorMappings {\n \"4XX\": i20a3050780ee0b0cde0a884a4f35429a20d60067e3bcda382ec5400079147459.CreateODataErrorFromDiscriminatorValue,\n \"5XX\": i20a3050780ee0b0cde0a884a4f35429a20d60067e3bcda382ec5400079147459.CreateODataErrorFromDiscriminatorValue,\n }\n res, err := m.BaseRequestBuilder.RequestAdapter.Send(ctx, requestInfo, ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be.CreateReportFromDiscriminatorValue, errorMapping)\n if err != nil {\n return nil, err\n }\n if res == nil {\n return nil, nil\n }\n return res.(ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be.Reportable), nil\n}", "func NewGetParams() *GetParams {\n\tvar (\n\t\tdeviceOSDefault = string(\"Android 9\")\n\t\tsendToEmailDefault = string(\"no\")\n\t)\n\treturn &GetParams{\n\t\tDeviceOS: &deviceOSDefault,\n\t\tSendToEmail: &sendToEmailDefault,\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}", "func (a *DefaultApiService) ProjectUsernameProjectCheckoutKeyGet(ctx context.Context, username string, project string) (Keys, *http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Get\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t \tsuccessPayload Keys\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/project/{username}/{project}/checkout-key\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"username\"+\"}\", fmt.Sprintf(\"%v\", username), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"project\"+\"}\", fmt.Sprintf(\"%v\", project), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{ \"application/json; charset=utf-8\", }\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\n\t\t\"application/json\",\n\t\t}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tif ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := ctx.Value(ContextAPIKey).(APIKey); ok {\n\t\t\tvar key string\n\t\t\tif auth.Prefix != \"\" {\n\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t} else {\n\t\t\t\tkey = auth.Key\n\t\t\t}\n\t\t\tlocalVarQueryParams.Add(\"circle-token\", key)\n\t\t}\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn successPayload, nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn successPayload, localVarHttpResponse, err\n\t}\n\tdefer localVarHttpResponse.Body.Close()\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tbodyBytes, _ := ioutil.ReadAll(localVarHttpResponse.Body)\n\t\treturn successPayload, localVarHttpResponse, reportError(\"Status: %v, Body: %s\", localVarHttpResponse.Status, bodyBytes)\n\t}\n\n\tif err = json.NewDecoder(localVarHttpResponse.Body).Decode(&successPayload); err != nil {\n\t\treturn successPayload, localVarHttpResponse, err\n\t}\n\n\n\treturn successPayload, localVarHttpResponse, err\n}", "func NewGetSecurityGroupOK() *GetSecurityGroupOK {\n\treturn &GetSecurityGroupOK{}\n}", "func GetPSK(t *testing.T) *PresharedKey {\n\tt.Helper()\n\n\tk, _ := GeneratePSK()\n\treturn &k\n}", "func (o *MetroclusterInterconnectGetParams) WithDefaults() *MetroclusterInterconnectGetParams {\n\to.SetDefaults()\n\treturn o\n}", "func GetAccessTocken(force bool, wechatCorpID string, wechatCorpSecret string) (err error) {\n\n\t// never get the access token yet, call the wechat api\n\tif accessToken.timeStamp == 0 {\n\t\treturn getAccessToken(wechatCorpID, wechatCorpSecret)\n\t}\n\n\t// token expired, refresh\n\tif accessToken.timeStamp+int64(accessToken.ExpiresIn) < time.Now().Unix() {\n\t\treturn getAccessToken(wechatCorpID, wechatCorpSecret)\n\t}\n\n\tif force == true {\n\t\treturn getAccessToken(wechatCorpID, wechatCorpSecret)\n\t}\n\n\treturn nil\n}", "func ExamplePrivateCloudsClient_Get_privateCloudsGet() {\n\tcred, err := azidentity.NewDefaultAzureCredential(nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to obtain a credential: %v\", err)\n\t}\n\tctx := context.Background()\n\tclientFactory, err := armavs.NewClientFactory(\"<subscription-id>\", cred, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create client: %v\", err)\n\t}\n\tres, err := clientFactory.NewPrivateCloudsClient().Get(ctx, \"group1\", \"cloud1\", nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to finish the request: %v\", err)\n\t}\n\t// You could use response here. We use blank identifier for just demo purposes.\n\t_ = res\n\t// If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes.\n\t// res.PrivateCloud = armavs.PrivateCloud{\n\t// \tName: to.Ptr(\"cloud1\"),\n\t// \tType: to.Ptr(\"Microsoft.AVS/privateClouds\"),\n\t// \tID: to.Ptr(\"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.AVS/privateClouds/cloud1\"),\n\t// \tLocation: to.Ptr(\"eastus2\"),\n\t// \tTags: map[string]*string{\n\t// \t},\n\t// \tIdentity: &armavs.PrivateCloudIdentity{\n\t// \t\tType: to.Ptr(armavs.ResourceIdentityTypeSystemAssigned),\n\t// \t\tPrincipalID: to.Ptr(\"881e5573-063f-49e4-8c08-79d7df0169d8\"),\n\t// \t\tTenantID: to.Ptr(\"881e5573-063f-49e4-8c08-79d7df0169d8\"),\n\t// \t},\n\t// \tProperties: &armavs.PrivateCloudProperties{\n\t// \t\tAvailability: &armavs.AvailabilityProperties{\n\t// \t\t\tStrategy: to.Ptr(armavs.AvailabilityStrategySingleZone),\n\t// \t\t\tZone: to.Ptr[int32](1),\n\t// \t\t},\n\t// \t\tEncryption: &armavs.Encryption{\n\t// \t\t\tKeyVaultProperties: &armavs.EncryptionKeyVaultProperties{\n\t// \t\t\t\tKeyName: to.Ptr(\"keyname1\"),\n\t// \t\t\t\tKeyState: to.Ptr(armavs.EncryptionKeyStatusConnected),\n\t// \t\t\t\tKeyVaultURL: to.Ptr(\"https://keyvault1-kmip-kvault.vault.azure.net/\"),\n\t// \t\t\t\tKeyVersion: to.Ptr(\"ver1.0\"),\n\t// \t\t\t\tVersionType: to.Ptr(armavs.EncryptionVersionTypeFixed),\n\t// \t\t\t},\n\t// \t\t\tStatus: to.Ptr(armavs.EncryptionStateEnabled),\n\t// \t\t},\n\t// \t\tIdentitySources: []*armavs.IdentitySource{\n\t// \t\t\t{\n\t// \t\t\t\tName: to.Ptr(\"group1\"),\n\t// \t\t\t\tAlias: to.Ptr(\"groupAlias\"),\n\t// \t\t\t\tBaseGroupDN: to.Ptr(\"ou=baseGroup\"),\n\t// \t\t\t\tBaseUserDN: to.Ptr(\"ou=baseUser\"),\n\t// \t\t\t\tDomain: to.Ptr(\"domain1\"),\n\t// \t\t\t\tPrimaryServer: to.Ptr(\"ldaps://1.1.1.1:636/\"),\n\t// \t\t\t\tSecondaryServer: to.Ptr(\"ldaps://1.1.1.2:636/\"),\n\t// \t\t\t\tSSL: to.Ptr(armavs.SSLEnumEnabled),\n\t// \t\t}},\n\t// \t\tInternet: to.Ptr(armavs.InternetEnumDisabled),\n\t// \t\tManagementCluster: &armavs.ManagementCluster{\n\t// \t\t\tClusterID: to.Ptr[int32](1),\n\t// \t\t\tClusterSize: to.Ptr[int32](4),\n\t// \t\t\tHosts: []*string{\n\t// \t\t\t\tto.Ptr(\"fakehost18.nyc1.kubernetes.center\"),\n\t// \t\t\t\tto.Ptr(\"fakehost19.nyc1.kubernetes.center\"),\n\t// \t\t\t\tto.Ptr(\"fakehost20.nyc1.kubernetes.center\"),\n\t// \t\t\t\tto.Ptr(\"fakehost21.nyc1.kubernetes.center\")},\n\t// \t\t\t},\n\t// \t\t\tCircuit: &armavs.Circuit{\n\t// \t\t\t\tExpressRouteID: to.Ptr(\"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/tnt13-41a90db2-9d5e-4bd5-a77a-5ce7b58213d6-eastus2/providers/Microsoft.Network/expressroutecircuits/tnt13-41a90db2-9d5e-4bd5-a77a-5ce7b58213d6-eastus2-xconnect\"),\n\t// \t\t\t\tExpressRoutePrivatePeeringID: to.Ptr(\"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/tnt42-cust-p01-dmo01/providers/Microsoft.Network/expressroutecircuits/tnt42-cust-p01-dmo01-er/peerings/AzurePrivatePeering\"),\n\t// \t\t\t\tPrimarySubnet: to.Ptr(\"192.168.53.0/30\"),\n\t// \t\t\t\tSecondarySubnet: to.Ptr(\"192.168.53.4/30\"),\n\t// \t\t\t},\n\t// \t\t\tEndpoints: &armavs.Endpoints{\n\t// \t\t\t\tHcxCloudManager: to.Ptr(\"https://192.168.50.4/\"),\n\t// \t\t\t\tNsxtManager: to.Ptr(\"https://192.168.50.3/\"),\n\t// \t\t\t\tVcsa: to.Ptr(\"https://192.168.50.2/\"),\n\t// \t\t\t},\n\t// \t\t\tExternalCloudLinks: []*string{\n\t// \t\t\t\tto.Ptr(\"/subscriptions/12341234-1234-1234-1234-123412341234/resourceGroups/mygroup/providers/Microsoft.AVS/privateClouds/cloud2\")},\n\t// \t\t\t\tNetworkBlock: to.Ptr(\"192.168.48.0/22\"),\n\t// \t\t\t\tProvisioningState: to.Ptr(armavs.PrivateCloudProvisioningStateSucceeded),\n\t// \t\t\t},\n\t// \t\t\tSKU: &armavs.SKU{\n\t// \t\t\t\tName: to.Ptr(\"AV36\"),\n\t// \t\t\t},\n\t// \t\t}\n}", "func NewGetBestBlockHashReturn(blockhash string) []byte {\n\tout, _ := json.Marshal(BestBlockHashReturn{BlockHash: blockhash})\n\treturn out\n}", "func NewGetWalletOK() *GetWalletOK {\n\treturn &GetWalletOK{}\n}", "func NewGetPublicIPByUUIDUsingGETUnauthorized() *GetPublicIPByUUIDUsingGETUnauthorized {\n\treturn &GetPublicIPByUUIDUsingGETUnauthorized{}\n}", "func BenchmarkGetWithoutDecryption(b *testing.B) {\n\tm := gorpmapper.New()\n\n\tm.Register(m.NewTableMapping(gorpmapper.TestEncryptedData{}, \"test_encrypted_data\", true, \"id\"))\n\n\tdb, _, _, end := test.SetupPGToCancel(b, m, sdk.TypeAPI)\n\tb.Cleanup(end)\n\n\tvar d = gorpmapper.TestEncryptedData{\n\t\tData: \"data\",\n\t\tSensitiveData: \"sensitive-data\",\n\t\tAnotherSensitiveData: \"another-sensitive-data\",\n\t}\n\n\trequire.NoError(b, m.Insert(db, &d))\n\n\tfor n := 0; n < b.N; n++ {\n\t\tquery := gorpmapper.NewQuery(\"select * from test_encrypted_data where id = $1\").Args(d.ID)\n\t\tvar d2 gorpmapper.TestEncryptedData\n\t\t_, err := m.Get(context.TODO(), db, query, &d2)\n\t\trequire.NoError(b, err)\n\t}\n}", "func (a *DefaultApiService) ProjectUsernameProjectCheckoutKeyFingerprintGet(ctx context.Context, username string, project string, fingerprint string) (Key, *http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Get\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t \tsuccessPayload Key\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/project/{username}/{project}/checkout-key/{fingerprint}\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"username\"+\"}\", fmt.Sprintf(\"%v\", username), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"project\"+\"}\", fmt.Sprintf(\"%v\", project), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"fingerprint\"+\"}\", fmt.Sprintf(\"%v\", fingerprint), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{ \"application/json; charset=utf-8\", }\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\n\t\t\"application/json\",\n\t\t}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tif ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := ctx.Value(ContextAPIKey).(APIKey); ok {\n\t\t\tvar key string\n\t\t\tif auth.Prefix != \"\" {\n\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t} else {\n\t\t\t\tkey = auth.Key\n\t\t\t}\n\t\t\tlocalVarQueryParams.Add(\"circle-token\", key)\n\t\t}\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn successPayload, nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn successPayload, localVarHttpResponse, err\n\t}\n\tdefer localVarHttpResponse.Body.Close()\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tbodyBytes, _ := ioutil.ReadAll(localVarHttpResponse.Body)\n\t\treturn successPayload, localVarHttpResponse, reportError(\"Status: %v, Body: %s\", localVarHttpResponse.Status, bodyBytes)\n\t}\n\n\tif err = json.NewDecoder(localVarHttpResponse.Body).Decode(&successPayload); err != nil {\n\t\treturn successPayload, localVarHttpResponse, err\n\t}\n\n\n\treturn successPayload, localVarHttpResponse, err\n}", "func (r *DescribeSubscribedWorkteamResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func createDeviceTwinResultDealTypeGet(baseMessage BaseMessage) DeviceTwinResult {\n\tresultDealType0Twin := make(map[string]*MsgTwin)\n\tresultDealType0Twin[\"empty\"] = nil\n\tresultDealType0Twin[\"updated\"] = generateTwinActualExpected(dtcommon.TypeUpdated, \"\", \"\")\n\tdevTwinResult := DeviceTwinResult{\n\t\tBaseMessage: baseMessage,\n\t\tTwin: resultDealType0Twin,\n\t}\n\treturn devTwinResult\n}", "func NewGetV0AuthCallbackDefault(code int) *GetV0AuthCallbackDefault {\n\tif code <= 0 {\n\t\tcode = 500\n\t}\n\n\treturn &GetV0AuthCallbackDefault{\n\t\t_statusCode: code,\n\t}\n}", "func (r *GetPublicKeyResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func NewGetPrivateOrderstateParamsWithTimeout(timeout time.Duration) *GetPrivateOrderstateParams {\n\tvar ()\n\treturn &GetPrivateOrderstateParams{\n\n\t\ttimeout: timeout,\n\t}\n}", "func (*CMsgSteamAuthNeeded) Descriptor() ([]byte, []int) {\n\treturn file_steam_htmlmessages_proto_rawDescGZIP(), []int{116}\n}", "func (a *DefaultApiService) Oauth2TokensTokenGet(token string) (InlineResponse2004, *http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Get\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t \tsuccessPayload InlineResponse2004\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/oauth2_tokens/{token}\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"token\"+\"}\", fmt.Sprintf(\"%v\", token), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{ \"application/json\", }\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\n\t\t\"application/json\",\n\t\t}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(nil, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn successPayload, nil, err\n\t}\n\n\t localVarHttpResponse, err := a.client.callAPI(r)\n\t if err != nil || localVarHttpResponse == nil {\n\t\t return successPayload, localVarHttpResponse, err\n\t }\n\t defer localVarHttpResponse.Body.Close()\n\t if localVarHttpResponse.StatusCode >= 300 {\n\t\treturn successPayload, localVarHttpResponse, reportError(localVarHttpResponse.Status)\n\t }\n\t\n\tif err = json.NewDecoder(localVarHttpResponse.Body).Decode(&successPayload); err != nil {\n\t \treturn successPayload, localVarHttpResponse, err\n\t}\n\n\n\treturn successPayload, localVarHttpResponse, err\n}", "func NewGetTimestampResponseDefault(code int) *GetTimestampResponseDefault {\n\treturn &GetTimestampResponseDefault{\n\t\t_statusCode: code,\n\t}\n}", "func Get(client *gophercloud.ServiceClient, id string, bearer map[string]string) (r volumes.GetResult) {\n\t_, r.Err = client.Get(getURL(client, id), &r.Body, &gophercloud.RequestOpts{\n\t\tMoreHeaders: bearer,\n\t})\n\treturn\n}", "func (o *SummaryResponse) GetTimeOk() (*SummaryTimeCounterResponse, bool) {\n\tif o == nil || o.Time == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Time, true\n}", "func (va ClawbackVestingAccount) GetVestedOnly(blockTime time.Time) sdk.Coins {\n\treturn ReadSchedule(va.StartTime, va.EndTime, va.VestingPeriods, va.OriginalVesting, blockTime.Unix())\n}", "func (a API) GetHashesPerSec(cmd *None) (e error) {\n\tRPCHandlers[\"gethashespersec\"].Call <-API{a.Ch, cmd, nil}\n\treturn\n}", "func (a *Client) GetUserIncomingFriendsWithTime(params *GetUserIncomingFriendsWithTimeParams, authInfo runtime.ClientAuthInfoWriter) (*GetUserIncomingFriendsWithTimeOK, *GetUserIncomingFriendsWithTimeBadRequest, *GetUserIncomingFriendsWithTimeUnauthorized, *GetUserIncomingFriendsWithTimeForbidden, *GetUserIncomingFriendsWithTimeNotFound, *GetUserIncomingFriendsWithTimeInternalServerError, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetUserIncomingFriendsWithTimeParams()\n\t}\n\n\tif params.Context == nil {\n\t\tparams.Context = context.Background()\n\t}\n\n\tif params.RetryPolicy != nil {\n\t\tparams.SetHTTPClientTransport(params.RetryPolicy)\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"getUserIncomingFriendsWithTime\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/friends/namespaces/{namespace}/me/incoming-time\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &GetUserIncomingFriendsWithTimeReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, nil, nil, nil, nil, nil, err\n\t}\n\n\tswitch v := result.(type) {\n\n\tcase *GetUserIncomingFriendsWithTimeOK:\n\t\treturn v, nil, nil, nil, nil, nil, nil\n\n\tcase *GetUserIncomingFriendsWithTimeBadRequest:\n\t\treturn nil, v, nil, nil, nil, nil, nil\n\n\tcase *GetUserIncomingFriendsWithTimeUnauthorized:\n\t\treturn nil, nil, v, nil, nil, nil, nil\n\n\tcase *GetUserIncomingFriendsWithTimeForbidden:\n\t\treturn nil, nil, nil, v, nil, nil, nil\n\n\tcase *GetUserIncomingFriendsWithTimeNotFound:\n\t\treturn nil, nil, nil, nil, v, nil, nil\n\n\tcase *GetUserIncomingFriendsWithTimeInternalServerError:\n\t\treturn nil, nil, nil, nil, nil, v, nil\n\n\tdefault:\n\t\treturn nil, nil, nil, nil, nil, nil, fmt.Errorf(\"Unexpected Type %v\", reflect.TypeOf(v))\n\t}\n}", "func GetMatchmakingTicket(settings *playfab.Settings, postData *GetMatchmakingTicketRequestModel, entityToken string) (*GetMatchmakingTicketResultModel, error) {\n if entityToken == \"\" {\n return nil, playfab.NewCustomError(\"entityToken should not be an empty string\", playfab.ErrorGeneric)\n }\n b, errMarshal := json.Marshal(postData)\n if errMarshal != nil {\n return nil, playfab.NewCustomError(errMarshal.Error(), playfab.ErrorMarshal)\n }\n\n sourceMap, err := playfab.Request(settings, b, \"/Match/GetMatchmakingTicket\", \"X-EntityToken\", entityToken)\n if err != nil {\n return nil, err\n }\n \n result := &GetMatchmakingTicketResultModel{}\n\n config := mapstructure.DecoderConfig{\n DecodeHook: playfab.StringToDateTimeHook,\n Result: result,\n }\n \n decoder, errDecoding := mapstructure.NewDecoder(&config)\n if errDecoding != nil {\n return nil, playfab.NewCustomError(errDecoding.Error(), playfab.ErrorDecoding)\n }\n \n errDecoding = decoder.Decode(sourceMap)\n if errDecoding != nil {\n return nil, playfab.NewCustomError(errDecoding.Error(), playfab.ErrorDecoding)\n }\n\n return result, nil\n}", "func (_obj *DataService) GetUserInfo(wx_id string, userInfo *UserInfo, _opt ...map[string]string) (ret int32, err error) {\n\n\tvar length int32\n\tvar have bool\n\tvar ty byte\n\t_os := codec.NewBuffer()\n\terr = _os.Write_string(wx_id, 1)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\terr = (*userInfo).WriteBlock(_os, 2)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tvar _status map[string]string\n\tvar _context map[string]string\n\tif len(_opt) == 1 {\n\t\t_context = _opt[0]\n\t} else if len(_opt) == 2 {\n\t\t_context = _opt[0]\n\t\t_status = _opt[1]\n\t}\n\t_resp := new(requestf.ResponsePacket)\n\ttarsCtx := context.Background()\n\n\terr = _obj.s.Tars_invoke(tarsCtx, 0, \"getUserInfo\", _os.ToBytes(), _status, _context, _resp)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\t_is := codec.NewReader(tools.Int8ToByte(_resp.SBuffer))\n\terr = _is.Read_int32(&ret, 0, true)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\terr = (*userInfo).ReadBlock(_is, 2, true)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tif len(_opt) == 1 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t} else if len(_opt) == 2 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t\tfor k := range _status {\n\t\t\tdelete(_status, k)\n\t\t}\n\t\tfor k, v := range _resp.Status {\n\t\t\t_status[k] = v\n\t\t}\n\n\t}\n\t_ = length\n\t_ = have\n\t_ = ty\n\treturn ret, nil\n}", "func NewGetTokenDefault(code int) *GetTokenDefault {\n\treturn &GetTokenDefault{\n\t\t_statusCode: code,\n\t}\n}", "func getPartyShaders(c *gin.Context) {\n discordID := c.Query(\"id\")\n\n result := findUser(discordID)\n\n switch returnCode := validate(discordID); returnCode {\n case 200:\n ///////// Start Success Condition\n\n // Get the membership IDs of the party members\n client := &http.Client{}\n reqURL := \"https://www.bungie.net/platform/Destiny2/3/Profile/\" +\n result.ActiveMembership +\n \"/?components=1000\"\n req, _ := http.NewRequest(\"GET\", reqURL, nil)\n req.Header.Add(\"X-API-Key\", os.Getenv(\"API_KEY\"))\n\n resp, _ := client.Do(req)\n\n if resp.StatusCode == http.StatusOK {\n } else {\n c.String(resp.StatusCode, \"Error getting profile information\")\n return\n }\n\n var jsonResponse interface{}\n err := json.NewDecoder(resp.Body).Decode(&jsonResponse)\n if err != nil {\n fmt.Println(err)\n }\n resp.Body.Close()\n\n partyMIDs := make([]string, 0)\n\n profileMap, ok := jsonResponse.(map[string]interface{})[\"Response\"].(map[string]interface{})[\"profileTransitoryData\"].(map[string]interface{})\n if ok {\n data, ok := profileMap[\"data\"].(map[string]interface{})\n if !ok {\n partyMIDs = append(partyMIDs, result.ActiveMembership)\n } else {\n members, ok := data[\"partyMembers\"].([]interface{})\n if !ok {\n partyMIDs = append(partyMIDs, result.ActiveMembership)\n } else {\n for _, u := range members {\n fmt.Println(\"we're in here\")\n valuesMap := u.(map[string]interface{})\n partyMIDs = append(partyMIDs, valuesMap[\"membershipId\"].(string))\n }\n }\n }\n } else {\n partyMIDs = append(partyMIDs, result.ActiveMembership)\n fmt.Println(\"No active fireteam, using own shaders\")\n }\n \n fmt.Println(partyMIDs)\n\n // Now we need to get the active character id for every membership ID\n apiQueries := SafeSlice{s: make([]string, 0)}\n\n for _, u := range partyMIDs {\n cid := getActiveCharacter(u)\n apiQueries.mux.Lock()\n apiQueries.s = append(apiQueries.s, u + \"/Character/\" + cid)\n fmt.Println(u + \" \" + cid)\n apiQueries.mux.Unlock()\n }\n \n fmt.Println(apiQueries.s)\n\n // Now we have every character ID in the party, we need to get shader information for every character\n shaderHashes := SafeMap{m: make(map[string]int)}\n numCharacters := len(apiQueries.s)\n \n var wg sync.WaitGroup\n fmt.Println(\"Getting common shaders for group of \" + strconv.Itoa(numCharacters))\n for _, query := range apiQueries.s {\n wg.Add(1)\n go func(q string, wait *sync.WaitGroup) {\n defer wait.Done()\n\n fmt.Println(\"Grabbing shaders for \" + q)\n shaderURL := \"https://www.bungie.net/platform/Destiny2/3/Profile/\" +\n q +\n \"/Collectibles/\" +\n \"2063273188/\" +\n \"?components=800\"\n shaderReq, _ := http.NewRequest(\"GET\", shaderURL, nil)\n shaderReq.Header.Add(\"X-API-Key\", os.Getenv(\"API_KEY\"))\n shaderResp, _ := client.Do(shaderReq)\n\n if shaderResp.StatusCode == http.StatusOK {\n var shaderJSON interface{}\n err := json.NewDecoder(shaderResp.Body).Decode(&shaderJSON)\n shaderResp.Body.Close()\n\n hashData := shaderJSON.(map[string]interface{})[\"Response\"].(map[string]interface{})[\"collectibles\"].(map[string]interface{})[\"data\"].(map[string]interface{})[\"collectibles\"].(map[string]interface{})\n\n for hash, value := range hashData {\n // We will track the counts of shader hashes present using a hash\n // If the hash value int is equal to the number of characters in the party, then everyone has the shader\n state := value.(map[string]interface{})[\"state\"].(float64)\n if state != 0 {\n continue\n }\n shaderHashes.mux.Lock()\n count, ok := shaderHashes.m[hash]\n shaderHashes.mux.Unlock()\n if ok {\n shaderHashes.mux.Lock()\n shaderHashes.m[hash] = count + 1\n shaderHashes.mux.Unlock()\n } else {\n shaderHashes.mux.Lock()\n shaderHashes.m[hash] = 1\n shaderHashes.mux.Unlock()\n }\n\n if ( err != nil ) {\n fmt.Println(err)\n }\n }\n } else {\n c.String(shaderResp.StatusCode, \"Error getting shader information\")\n return\n }\n }(query, &wg)\n }\n wg.Wait()\n\n commonHashes := make([]string, 0)\n for hash, count := range shaderHashes.m {\n if count == numCharacters {\n commonHashes = append(commonHashes, hash)\n }\n }\n\n shaders := make([]Shader, 0)\n for _, hash := range commonHashes {\n shaders = append(shaders, matchCollectibleHash(hash))\n }\n\n c.JSON(200, shaders)\n ///////// End Success Condition\n case 300:\n c.String(300, \"Please select a membership ID to continue request\")\n case 401:\n c.String(401, \"User must register\")\n default:\n c.String(500, \"Unexpected error\")\n }\n}", "func NewGetAllProjectsUsingGETUnauthorized() *GetAllProjectsUsingGETUnauthorized {\n\treturn &GetAllProjectsUsingGETUnauthorized{}\n}", "func NewGetOK() *GetOK {\n\treturn &GetOK{}\n}", "func NewGetAPIPublicV1TeamForbidden() *GetAPIPublicV1TeamForbidden {\n\treturn &GetAPIPublicV1TeamForbidden{}\n}", "func readSnapshots( asset string, tm time.Time, userId, sessionId, privateKey string, client chan models.Opponent) {\n\n snapData, err := mixin.NetworkSnapshots(asset, tm, true, 100, userId, sessionId, privateKey)\n if err != nil {\n log.Println(err)\n }\n var snapInfo map[string]interface{}\n err = json.Unmarshal([]byte(snapData), &snapInfo)\n if err != nil {\n log.Fatal(err)\n }\n var ctm string\n var op models.Opponent\n for _, v := range (snapInfo[\"data\"].([]interface{})) {\n if v.(map[string]interface{})[\"opponent_id\"] != nil {\n log.Println(\"OMG,i find it ----------------------------------------\")\n op.OpponentID = v.(map[string]interface{})[\"opponent_id\"].(string)\n op.Amount = v.(map[string]interface{})[\"amount\"].(string)\n }\n // log.Println(v)\n // log.Println(val)\n ctm = v.(map[string]interface{})[\"created_at\"].(string)\n }\n op.TimeStamp = ctm\n log.Println(ctm)\n client <- op\n}", "func NewSteamAPI(key string) *SteamAPI {\n\treturn &SteamAPI{key: key}\n}", "func NewOnlineMeetingInfo()(*OnlineMeetingInfo) {\n m := &OnlineMeetingInfo{\n }\n m.backingStore = ie8677ce2c7e1b4c22e9c3827ecd078d41185424dd9eeb92b7d971ed2d49a392e.BackingStoreFactoryInstance();\n m.SetAdditionalData(make(map[string]any))\n return m\n}", "func (o *GetParams) WithTimeout(timeout time.Duration) *GetParams {\n\to.SetTimeout(timeout)\n\treturn o\n}", "func NewGetSopPaymentResponseUsingGETUnauthorized() *GetSopPaymentResponseUsingGETUnauthorized {\n\treturn &GetSopPaymentResponseUsingGETUnauthorized{}\n}", "func (o *GetPageDataUsingGETParams) WithTimeout(timeout time.Duration) *GetPageDataUsingGETParams {\n\to.SetTimeout(timeout)\n\treturn o\n}", "func (client *KpiClient) getCreateRequest(ctx context.Context, resourceGroupName string, hubName string, kpiName string, options *KpiClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CustomerInsights/hubs/{hubName}/kpi/{kpiName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif hubName == \"\" {\n\t\treturn nil, errors.New(\"parameter hubName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{hubName}\", url.PathEscape(hubName))\n\tif kpiName == \"\" {\n\t\treturn nil, errors.New(\"parameter kpiName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{kpiName}\", url.PathEscape(kpiName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2017-04-26\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func NewGetByIDUsingGETForbidden() *GetByIDUsingGETForbidden {\n\treturn &GetByIDUsingGETForbidden{}\n}", "func (a *LaborApiService) GetShift(ctx context.Context, id string) (GetShiftResponse, *http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Get\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue GetShiftResponse\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/v2/labor/shifts/{id}\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"id\"+\"}\", fmt.Sprintf(\"%v\", id), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t}\n\n\tif localVarHttpResponse.StatusCode < 300 {\n\t\t// If we succeed, return the data, otherwise pass on to decode error.\n\t\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"));\n\t\tif err == nil { \n\t\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t\t}\n\t}\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 200 {\n\t\t\tvar v GetShiftResponse\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"));\n\t\t\t\tif err != nil {\n\t\t\t\t\tnewErr.error = err.Error()\n\t\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t\t}\n\t\t\t\tnewErr.model = v\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHttpResponse, nil\n}", "func (s *FrontendServer) Get(ctx context.Context, req *pb.GetRequest) (*pb.GetResponse, error) {\n\tvar err error\n\tvar returnErr error = nil\n\tvar returnRes *pb.GetResponse\n\tvar res *clientv3.GetResponse\n\tvar val string\n\tctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)\n\tdefer cancel()\n\tkey := req.GetKey()\n\tswitch req.GetType() {\n\tcase utils.LocalData:\n\t\tres, err = s.localSt.Get(ctx, key) // get the key itself, no hashes used\n\t\t// log.Println(\"All local keys in this edge group\")\n\t\t// log.Println(s.localSt.Get(ctx, \"\", clientv3.WithPrefix()))\n\tcase utils.GlobalData:\n\t\tisHashed := req.GetIsHashed()\n\t\tif s.gateway == nil {\n\t\t\treturn returnRes, fmt.Errorf(\"RangeGet request failed: gateway node not initialized at the edge\")\n\t\t}\n\t\tstate, err := s.gateway.GetStateRPC()\n\t\tif err != nil {\n\t\t\treturn returnRes, fmt.Errorf(\"failed to get state of gateway node\")\n\t\t}\n\t\tif state < dht.Ready { // could happen if gateway node was created but didnt join dht\n\t\t\treturn returnRes, fmt.Errorf(\"edge node %s not connected to dht yet or gateway node not ready\", s.print())\n\t\t}\n\t\t// log.Println(\"All global keys in this edge group\")\n\t\t// log.Println(s.globalSt.Get(ctx, \"\", clientv3.WithPrefix()))\n\t\tif !isHashed {\n\t\t\tkey = s.gateway.Conf.IDFunc(key)\n\t\t}\n\t\tans, er := s.gateway.CanStoreRPC(key)\n\t\tif er != nil {\n\t\t\tlog.Fatalf(\"Get request failed: communication with gateway node failed\")\n\t\t}\n\t\tif ans {\n\t\t\tres, err = s.globalSt.Get(ctx, key)\n\t\t} else {\n\t\t\tval, err = s.gateway.GetKVRPC(key)\n\t\t}\n\t}\n\t// cancel()\n\treturnErr = checkError(err)\n\tif (res != nil) && (returnErr == nil) {\n\t\t// TODO: what if Kvs returns more than one kv-pair, is that possible?\n\t\tif len(res.Kvs) > 0 {\n\t\t\tkv := res.Kvs[0]\n\t\t\tval = string(kv.Value)\n\t\t\t// log.Printf(\"Key: %s, Value: %s\\n\", kv.Key, kv.Value)\n\t\t\treturnRes = &pb.GetResponse{Value: val, Size: int32(len(val))}\n\t\t} else {\n\t\t\treturnErr = status.Errorf(codes.NotFound, \"Key Not Found: %s\", req.GetKey())\n\t\t}\n\t} else {\n\t\tif returnErr == nil {\n\t\t\t// we already have the value from a remote group\n\t\t\treturnRes = &pb.GetResponse{Value: val, Size: int32(len(val))}\n\t\t}\n\t}\n\treturn returnRes, returnErr\n}", "func (me *PROTECTIONJOBS_IMPL) GetProtectionJobs (\r\n includeLastRunAndStats *bool,\r\n policyIds []string,\r\n isActive *bool,\r\n isDeleted *bool,\r\n onlyReturnBasicSummary *bool,\r\n environments []models.EnvironmentsEnum,\r\n tenantIds []string,\r\n allUnderHierarchy *bool,\r\n ids []int64,\r\n names []string) ([]*models.ProtectionJob, error) {\r\n //the endpoint path uri\r\n _pathUrl := \"/public/protectionJobs\"\r\n\r\n //variable to hold errors\r\n var err error = nil\r\n //the base uri for api requests\r\n _queryBuilder := configuration.GetBaseURI(configuration.DEFAULT_HOST,me.config);\r\n\r\n //prepare query string for API call\r\n _queryBuilder = _queryBuilder + _pathUrl\r\n\r\n //process optional query parameters\r\n _queryBuilder, err = apihelper.AppendUrlWithQueryParameters(_queryBuilder, map[string]interface{} {\r\n \"includeLastRunAndStats\" : includeLastRunAndStats,\r\n \"policyIds\" : policyIds,\r\n \"isActive\" : isActive,\r\n \"isDeleted\" : isDeleted,\r\n \"onlyReturnBasicSummary\" : onlyReturnBasicSummary,\r\n \"environments\" : models.EnvironmentsEnumArrayToValue(environments),\r\n \"tenantIds\" : tenantIds,\r\n \"allUnderHierarchy\" : allUnderHierarchy,\r\n \"ids\" : ids,\r\n \"names\" : names,\r\n })\r\n if err != nil {\r\n //error in query param handling\r\n return nil, err\r\n }\r\n\r\n //validate and preprocess url\r\n _queryBuilder, err = apihelper.CleanUrl(_queryBuilder)\r\n if err != nil {\r\n //error in url validation or cleaning\r\n return nil, err\r\n }\r\n if me.config.AccessToken() == nil {\r\n return nil, errors.New(\"Access Token not set. Please authorize the client using client.Authorize()\");\r\n }\r\n //prepare headers for the outgoing request\r\n headers := map[string]interface{} {\r\n \"user-agent\" : \"cohesity-Go-sdk-6.2.0\",\r\n \"accept\" : \"application/json\",\r\n \"Authorization\" : fmt.Sprintf(\"%s %s\",*me.config.AccessToken().TokenType, *me.config.AccessToken().AccessToken),\r\n }\r\n\r\n //prepare API request\r\n _request := unirest.Get(_queryBuilder, headers)\r\n //and invoke the API call request to fetch the response\r\n _response, err := unirest.AsString(_request,me.config.SkipSSL());\r\n if err != nil {\r\n //error in API invocation\r\n return nil, err\r\n }\r\n\r\n //error handling using HTTP status codes\r\n if (_response.Code == 0) {\r\n err = apihelper.NewAPIError(\"Error\", _response.Code, _response.RawBody)\r\n } else if (_response.Code < 200) || (_response.Code > 206) { //[200,206] = HTTP OK\r\n err = apihelper.NewAPIError(\"HTTP Response Not OK\", _response.Code, _response.RawBody)\r\n }\r\n if(err != nil) {\r\n //error detected in status code validation\r\n return nil, err\r\n }\r\n\r\n //returning the response\r\n var retVal []*models.ProtectionJob\r\n err = json.Unmarshal(_response.RawBody, &retVal)\r\n\r\n if err != nil {\r\n //error in parsing\r\n return nil, err\r\n }\r\n return retVal, nil\r\n\r\n}", "func NewGetTallyOK() *GetTallyOK {\n\treturn &GetTallyOK{}\n}", "func (r *CreateLayerResponse) SDKResponseMetdata() *aws.Response {\n\treturn r.response\n}", "func ExampleHeatMapClient_Get_heatMapGetWithTopLeftBotRight() {\n\tcred, err := azidentity.NewDefaultAzureCredential(nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to obtain a credential: %v\", err)\n\t}\n\tctx := context.Background()\n\tclientFactory, err := armtrafficmanager.NewClientFactory(\"<subscription-id>\", cred, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create client: %v\", err)\n\t}\n\tres, err := clientFactory.NewHeatMapClient().Get(ctx, \"azuresdkfornetautoresttrafficmanager1323\", \"azuresdkfornetautoresttrafficmanager3880\", &armtrafficmanager.HeatMapClientGetOptions{TopLeft: []float64{\n\t\t10,\n\t\t50.001},\n\t\tBotRight: []float64{\n\t\t\t-50.001,\n\t\t\t80},\n\t})\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to finish the request: %v\", err)\n\t}\n\t// You could use response here. We use blank identifier for just demo purposes.\n\t_ = res\n\t// If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes.\n\t// res.HeatMapModel = armtrafficmanager.HeatMapModel{\n\t// \tName: to.Ptr(\"default\"),\n\t// \tType: to.Ptr(\"Microsoft.Network/trafficManagerProfiles/heatMaps/latencyVolumeByLocation\"),\n\t// \tID: to.Ptr(\"/subscriptions/{subscription-id}/resourceGroups/azuresdkfornetautoresttrafficmanager1323/providers/Microsoft.Network/trafficManagerProfiles/azuresdkfornetautoresttrafficmanager3880/heatMaps/latencyVolumeByLocation\"),\n\t// \tProperties: &armtrafficmanager.HeatMapProperties{\n\t// \t\tEndTime: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, \"2017-08-22T12:50:00Z\"); return t}()),\n\t// \t\tEndpoints: []*armtrafficmanager.HeatMapEndpoint{\n\t// \t\t\t{\n\t// \t\t\t\tEndpointID: to.Ptr[int32](1),\n\t// \t\t\t\tResourceID: to.Ptr(\"/subscriptions/{subscription-id}/resourceGroups/azuresdkfornetautoresttrafficmanager1323/providers/Microsoft.Network/trafficManagerProfiles/azuresdkfornetautoresttrafficmanager3880/externalEndpoints/azuresdkfornetautoresttrafficmanager3880\"),\n\t// \t\t\t},\n\t// \t\t\t{\n\t// \t\t\t\tEndpointID: to.Ptr[int32](2),\n\t// \t\t\t\tResourceID: to.Ptr(\"/subscriptions/562d4115-c01e-4m67-9bbd-c11c2d58ad73/resourceGroups/azuresdkfornetautoresttrafficmanager1300/providers/Microsoft.Network/trafficManagerProfiles/azuresdkfornetautoresttrafficmanager3885/externalEndpoints/azuresdkfornetautoresttrafficmanager3881\"),\n\t// \t\t}},\n\t// \t\tStartTime: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, \"2017-08-15T12:00:00Z\"); return t}()),\n\t// \t\tTrafficFlows: []*armtrafficmanager.TrafficFlow{\n\t// \t\t\t{\n\t// \t\t\t\tLatitude: to.Ptr[float64](9.99),\n\t// \t\t\t\tLongitude: to.Ptr[float64](75.01),\n\t// \t\t\t\tQueryExperiences: []*armtrafficmanager.QueryExperience{\n\t// \t\t\t\t\t{\n\t// \t\t\t\t\t\tEndpointID: to.Ptr[int32](1),\n\t// \t\t\t\t\t\tLatency: to.Ptr[float64](99.222),\n\t// \t\t\t\t\t\tQueryCount: to.Ptr[int32](1000000),\n\t// \t\t\t\t\t},\n\t// \t\t\t\t\t{\n\t// \t\t\t\t\t\tEndpointID: to.Ptr[int32](2),\n\t// \t\t\t\t\t\tLatency: to.Ptr[float64](1.222),\n\t// \t\t\t\t\t\tQueryCount: to.Ptr[int32](1),\n\t// \t\t\t\t}},\n\t// \t\t\t\tSourceIP: to.Ptr(\"1.1.1.1\"),\n\t// \t\t\t},\n\t// \t\t\t{\n\t// \t\t\t\tLatitude: to.Ptr[float64](-49.99),\n\t// \t\t\t\tLongitude: to.Ptr[float64](51),\n\t// \t\t\t\tQueryExperiences: []*armtrafficmanager.QueryExperience{\n\t// \t\t\t\t\t{\n\t// \t\t\t\t\t\tEndpointID: to.Ptr[int32](1),\n\t// \t\t\t\t\t\tLatency: to.Ptr[float64](96.222),\n\t// \t\t\t\t\t\tQueryCount: to.Ptr[int32](100),\n\t// \t\t\t\t\t},\n\t// \t\t\t\t\t{\n\t// \t\t\t\t\t\tEndpointID: to.Ptr[int32](2),\n\t// \t\t\t\t\t\tLatency: to.Ptr[float64](4.222),\n\t// \t\t\t\t\t\tQueryCount: to.Ptr[int32](500),\n\t// \t\t\t\t}},\n\t// \t\t\t\tSourceIP: to.Ptr(\"2.255.1.1\"),\n\t// \t\t}},\n\t// \t},\n\t// }\n}", "func get(url string, qparms rest.QParms) ([]byte, error) {\n\theaders := rest.Headers{\"Authorization\": \"Bearer \" + token}\n\tfor k, v := range defaultHeaders {\n\t\theaders[k] = v\n\t}\n\tclient := rest.NewClient(headers, qparms)\n\n\tbody, err := client.Get(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn body, nil\n}" ]
[ "0.52899104", "0.447495", "0.4440635", "0.44148892", "0.4270405", "0.42306632", "0.42255187", "0.42208046", "0.41822582", "0.41728333", "0.41548944", "0.4154055", "0.4148511", "0.41375974", "0.41100857", "0.41024846", "0.40931123", "0.4068021", "0.40467957", "0.4037121", "0.40335768", "0.40267405", "0.4015609", "0.40059656", "0.4002533", "0.39967093", "0.39835176", "0.3976981", "0.3963352", "0.39491954", "0.39387867", "0.39353967", "0.39263752", "0.39217684", "0.39152035", "0.390621", "0.38971394", "0.38968664", "0.38963562", "0.38923877", "0.38892323", "0.38787007", "0.38724378", "0.38712716", "0.38707605", "0.38686973", "0.38568473", "0.38477165", "0.38476464", "0.38475272", "0.3845701", "0.38432112", "0.38423124", "0.38401586", "0.3836561", "0.3828503", "0.38197973", "0.38196483", "0.38163346", "0.38154233", "0.38144892", "0.38131186", "0.380592", "0.38050386", "0.38021243", "0.38019788", "0.38003767", "0.3796064", "0.3787119", "0.37861487", "0.37734342", "0.37703046", "0.37620813", "0.37606868", "0.37592828", "0.37570456", "0.37559086", "0.374966", "0.37456986", "0.374245", "0.37383133", "0.37364027", "0.3734519", "0.37316602", "0.37282395", "0.37275276", "0.37230438", "0.371063", "0.37091574", "0.37084138", "0.3707311", "0.37027672", "0.3694036", "0.3692459", "0.3692078", "0.36918616", "0.3690306", "0.36892518", "0.36884683", "0.3687746" ]
0.7406249
0
NewProtectedGetPlaytimeGameTelemetryV1ProtectedSteamIdsSteamIDPlaytimeGetUnprocessableEntity creates a ProtectedGetPlaytimeGameTelemetryV1ProtectedSteamIdsSteamIDPlaytimeGetUnprocessableEntity with default headers values
func NewProtectedGetPlaytimeGameTelemetryV1ProtectedSteamIdsSteamIDPlaytimeGetUnprocessableEntity() *ProtectedGetPlaytimeGameTelemetryV1ProtectedSteamIdsSteamIDPlaytimeGetUnprocessableEntity { return &ProtectedGetPlaytimeGameTelemetryV1ProtectedSteamIdsSteamIDPlaytimeGetUnprocessableEntity{} }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewGetTaskDetailsUnprocessableEntity() *GetTaskDetailsUnprocessableEntity {\n\n\treturn &GetTaskDetailsUnprocessableEntity{}\n}", "func NewGetAPIPublicV1TeamUnprocessableEntity() *GetAPIPublicV1TeamUnprocessableEntity {\n\treturn &GetAPIPublicV1TeamUnprocessableEntity{}\n}", "func NewWeaviateKeyCreateUnprocessableEntity() *WeaviateKeyCreateUnprocessableEntity {\n\treturn &WeaviateKeyCreateUnprocessableEntity{}\n}", "func NewGetFlagWorkflowsUnprocessableEntity() *GetFlagWorkflowsUnprocessableEntity {\n\treturn &GetFlagWorkflowsUnprocessableEntity{}\n}", "func NewGetPaymentRequestEDIUnprocessableEntity() *GetPaymentRequestEDIUnprocessableEntity {\n\n\treturn &GetPaymentRequestEDIUnprocessableEntity{}\n}", "func NewGetDeltaUnprocessableEntity() *GetDeltaUnprocessableEntity {\n\treturn &GetDeltaUnprocessableEntity{}\n}", "func NewGetDocumentUnprocessableEntity() *GetDocumentUnprocessableEntity {\n\n\treturn &GetDocumentUnprocessableEntity{}\n}", "func NewGetNodeUnprocessableEntity() *GetNodeUnprocessableEntity {\n\treturn &GetNodeUnprocessableEntity{}\n}", "func NewModifyCryptokeyUnprocessableEntity() *ModifyCryptokeyUnprocessableEntity {\n\treturn &ModifyCryptokeyUnprocessableEntity{}\n}", "func NewCreateanewRtcSessionUnprocessableEntity() *CreateanewRtcSessionUnprocessableEntity {\n\treturn &CreateanewRtcSessionUnprocessableEntity{}\n}", "func NewProtectedGetPlaytimeGameTelemetryV1ProtectedSteamIdsSteamIDPlaytimeGetOK() *ProtectedGetPlaytimeGameTelemetryV1ProtectedSteamIdsSteamIDPlaytimeGetOK {\n\treturn &ProtectedGetPlaytimeGameTelemetryV1ProtectedSteamIdsSteamIDPlaytimeGetOK{}\n}", "func NewWeaviateThingsPatchUnprocessableEntity() *WeaviateThingsPatchUnprocessableEntity {\n\treturn &WeaviateThingsPatchUnprocessableEntity{}\n}", "func (o *GetTaskDetailsUnprocessableEntity) WithPayload(payload *models.ValidationError) *GetTaskDetailsUnprocessableEntity {\n\to.Payload = payload\n\treturn o\n}", "func NewSendMailUnprocessableEntity() *SendMailUnprocessableEntity {\n\treturn &SendMailUnprocessableEntity{}\n}", "func (r *Response) UnprocessableEntity(v interface{}) {\n\tr.writeResponse(http.StatusUnprocessableEntity, v)\n}", "func NewWeaviateActionsPatchUnprocessableEntity() *WeaviateActionsPatchUnprocessableEntity {\n\n\treturn &WeaviateActionsPatchUnprocessableEntity{}\n}", "func NewCreateLookmlModelUnprocessableEntity() *CreateLookmlModelUnprocessableEntity {\n\treturn &CreateLookmlModelUnprocessableEntity{}\n}", "func (r *Responder) UnprocessableEntity() { r.write(http.StatusUnprocessableEntity) }", "func RenderUnprocessableEntity(w http.ResponseWriter, message ...interface{}) {\n\tRender(w, UnprocessableEntity(message...))\n}", "func NewCreateClaimUnprocessableEntity() *CreateClaimUnprocessableEntity {\n\treturn &CreateClaimUnprocessableEntity{}\n}", "func (o *GetDocumentUnprocessableEntity) WithPayload(payload *ghcmessages.ValidationError) *GetDocumentUnprocessableEntity {\n\to.Payload = payload\n\treturn o\n}", "func NewRecalculatePaymentRequestUnprocessableEntity() *RecalculatePaymentRequestUnprocessableEntity {\n\n\treturn &RecalculatePaymentRequestUnprocessableEntity{}\n}", "func NewGetLargeScreenDataRequestWithoutParam() *GetLargeScreenDataRequest {\n\n return &GetLargeScreenDataRequest{\n JDCloudRequest: core.JDCloudRequest{\n URL: \"/regions/{regionId}/getData\",\n Method: \"GET\",\n Header: nil,\n Version: \"v1\",\n },\n }\n}", "func (r Response) UnprocessableEntity(code string, payload Payload, header ...ResponseHeader) {\n\tr.Response(code, http.UnprocessableEntity, payload, header...)\n}", "func NewDeleteAPIPublicV1TeamTeamUnprocessableEntity() *DeleteAPIPublicV1TeamTeamUnprocessableEntity {\n\treturn &DeleteAPIPublicV1TeamTeamUnprocessableEntity{}\n}", "func UnprocessableEntity(message ...interface{}) Err {\n\treturn Boomify(http.StatusUnprocessableEntity, message...)\n}", "func NewDeleteShipmentUnprocessableEntity() *DeleteShipmentUnprocessableEntity {\n\n\treturn &DeleteShipmentUnprocessableEntity{}\n}", "func (o *GetPaymentRequestEDIUnprocessableEntity) WithPayload(payload *supportmessages.ValidationError) *GetPaymentRequestEDIUnprocessableEntity {\n\to.Payload = payload\n\treturn o\n}", "func NewDeleteDebugRequestUnprocessableEntity() *DeleteDebugRequestUnprocessableEntity {\n\treturn &DeleteDebugRequestUnprocessableEntity{}\n}", "func NewGetPrimeEntitlementsForbidden() *GetPrimeEntitlementsForbidden {\n\treturn &GetPrimeEntitlementsForbidden{}\n}", "func NewCreateChannelUnprocessableEntity() *CreateChannelUnprocessableEntity {\n\treturn &CreateChannelUnprocessableEntity{}\n}", "func NewPcloudPvminstancesPutUnprocessableEntity() *PcloudPvminstancesPutUnprocessableEntity {\n\treturn &PcloudPvminstancesPutUnprocessableEntity{}\n}", "func NewCreateLocationUnprocessableEntity() *CreateLocationUnprocessableEntity {\n\treturn &CreateLocationUnprocessableEntity{}\n}", "func NewUpdateStaticIDPUnprocessableEntity() *UpdateStaticIDPUnprocessableEntity {\n\treturn &UpdateStaticIDPUnprocessableEntity{}\n}", "func NewUpdateMTOPostCounselingInformationUnprocessableEntity() *UpdateMTOPostCounselingInformationUnprocessableEntity {\n\treturn &UpdateMTOPostCounselingInformationUnprocessableEntity{}\n}", "func NewCreateClientUnprocessableEntity() *CreateClientUnprocessableEntity {\n\treturn &CreateClientUnprocessableEntity{}\n}", "func NewUpdateUserUnprocessableEntity() *UpdateUserUnprocessableEntity {\n\n\treturn &UpdateUserUnprocessableEntity{}\n}", "func NewSetRoleGroupsUnprocessableEntity() *SetRoleGroupsUnprocessableEntity {\n\treturn &SetRoleGroupsUnprocessableEntity{}\n}", "func NewCreateWidgetUnprocessableEntity() *CreateWidgetUnprocessableEntity {\n\treturn &CreateWidgetUnprocessableEntity{}\n}", "func NewLeaderboardGetUnauthorized() *LeaderboardGetUnauthorized {\n\treturn &LeaderboardGetUnauthorized{}\n}", "func NewCheckTransactionCreditLimitUnprocessableEntity() *CheckTransactionCreditLimitUnprocessableEntity {\n\treturn &CheckTransactionCreditLimitUnprocessableEntity{}\n}", "func NewCreateThemeUnprocessableEntity() *CreateThemeUnprocessableEntity {\n\treturn &CreateThemeUnprocessableEntity{}\n}", "func NewSearchLogQueryUnprocessableEntity() *SearchLogQueryUnprocessableEntity {\n\treturn &SearchLogQueryUnprocessableEntity{}\n}", "func NewCreateProductUnprocessableEntity() *CreateProductUnprocessableEntity {\n\treturn &CreateProductUnprocessableEntity{}\n}", "func NewPatchNoticesNoticeIDUnprocessableEntity() *PatchNoticesNoticeIDUnprocessableEntity {\n\treturn &PatchNoticesNoticeIDUnprocessableEntity{}\n}", "func (client MultipleResponsesClient) Get202None204NoneDefaultNone400InvalidPreparer() (*http.Request, error) {\n preparer := autorest.CreatePreparer(\n autorest.AsGet(),\n autorest.WithBaseURL(client.BaseURI),\n autorest.WithPath(\"/http/payloads/202/none/204/none/default/none/response/400/invalid\"))\n return preparer.Prepare(&http.Request{})\n}", "func BenchmarkGetWithoutDecryption(b *testing.B) {\n\tm := gorpmapper.New()\n\n\tm.Register(m.NewTableMapping(gorpmapper.TestEncryptedData{}, \"test_encrypted_data\", true, \"id\"))\n\n\tdb, _, _, end := test.SetupPGToCancel(b, m, sdk.TypeAPI)\n\tb.Cleanup(end)\n\n\tvar d = gorpmapper.TestEncryptedData{\n\t\tData: \"data\",\n\t\tSensitiveData: \"sensitive-data\",\n\t\tAnotherSensitiveData: \"another-sensitive-data\",\n\t}\n\n\trequire.NoError(b, m.Insert(db, &d))\n\n\tfor n := 0; n < b.N; n++ {\n\t\tquery := gorpmapper.NewQuery(\"select * from test_encrypted_data where id = $1\").Args(d.ID)\n\t\tvar d2 gorpmapper.TestEncryptedData\n\t\t_, err := m.Get(context.TODO(), db, query, &d2)\n\t\trequire.NoError(b, err)\n\t}\n}", "func GetLeaderboardAroundEntity(settings *playfab.Settings, postData *GetLeaderboardAroundEntityRequestModel, entityToken string) (*GetEntityLeaderboardResponseModel, error) {\n if entityToken == \"\" {\n return nil, playfab.NewCustomError(\"entityToken should not be an empty string\", playfab.ErrorGeneric)\n }\n b, errMarshal := json.Marshal(postData)\n if errMarshal != nil {\n return nil, playfab.NewCustomError(errMarshal.Error(), playfab.ErrorMarshal)\n }\n\n sourceMap, err := playfab.Request(settings, b, \"/Leaderboard/GetLeaderboardAroundEntity\", \"X-EntityToken\", entityToken)\n if err != nil {\n return nil, err\n }\n \n result := &GetEntityLeaderboardResponseModel{}\n\n config := mapstructure.DecoderConfig{\n DecodeHook: playfab.StringToDateTimeHook,\n Result: result,\n }\n \n decoder, errDecoding := mapstructure.NewDecoder(&config)\n if errDecoding != nil {\n return nil, playfab.NewCustomError(errDecoding.Error(), playfab.ErrorDecoding)\n }\n \n errDecoding = decoder.Decode(sourceMap)\n if errDecoding != nil {\n return nil, playfab.NewCustomError(errDecoding.Error(), playfab.ErrorDecoding)\n }\n\n return result, nil\n}", "func NewGetTeamsForbidden() *GetTeamsForbidden {\n\n\treturn &GetTeamsForbidden{}\n}", "func NewCreatePackageUnprocessableEntity() *CreatePackageUnprocessableEntity {\n\treturn &CreatePackageUnprocessableEntity{}\n}", "func NewPatchParkingOperatorsCentreIDUnprocessableEntity() *PatchParkingOperatorsCentreIDUnprocessableEntity {\n\treturn &PatchParkingOperatorsCentreIDUnprocessableEntity{}\n}", "func NewDescribeThingShadowRequestWithoutParam() *DescribeThingShadowRequest {\n\n return &DescribeThingShadowRequest{\n JDCloudRequest: core.JDCloudRequest{\n URL: \"/regions/{regionId}/products/{productKey}/devices/{identifier}/shadow\",\n Method: \"GET\",\n Header: nil,\n Version: \"v2\",\n },\n }\n}", "func NewGetForbidden(body *GetForbiddenResponseBody) *goa.ServiceError {\n\tv := &goa.ServiceError{\n\t\tName: *body.Name,\n\t\tID: *body.ID,\n\t\tMessage: *body.Message,\n\t\tTemporary: *body.Temporary,\n\t\tTimeout: *body.Timeout,\n\t\tFault: *body.Fault,\n\t}\n\n\treturn v\n}", "func NewCreateMTOServiceItemUnprocessableEntity() *CreateMTOServiceItemUnprocessableEntity {\n\treturn &CreateMTOServiceItemUnprocessableEntity{}\n}", "func NewGraphqlPostUnprocessableEntity() *GraphqlPostUnprocessableEntity {\n\n\treturn &GraphqlPostUnprocessableEntity{}\n}", "func NewCreateMergeQueryUnprocessableEntity() *CreateMergeQueryUnprocessableEntity {\n\treturn &CreateMergeQueryUnprocessableEntity{}\n}", "func NewGetCustomIntegrationVersionByIDUsingGETForbidden() *GetCustomIntegrationVersionByIDUsingGETForbidden {\n\treturn &GetCustomIntegrationVersionByIDUsingGETForbidden{}\n}", "func NewQueryForbiddenInfoListRequestWithoutParam() *QueryForbiddenInfoListRequest {\n\n return &QueryForbiddenInfoListRequest{\n JDCloudRequest: core.JDCloudRequest{\n URL: \"/forbiddenInfo:query\",\n Method: \"POST\",\n Header: nil,\n Version: \"v1\",\n },\n }\n}", "func NewPostProductModelProposalUnprocessableEntity() *PostProductModelProposalUnprocessableEntity {\n\treturn &PostProductModelProposalUnprocessableEntity{}\n}", "func NewPublicQueryUserStatItems2UnprocessableEntity() *PublicQueryUserStatItems2UnprocessableEntity {\n\treturn &PublicQueryUserStatItems2UnprocessableEntity{}\n}", "func NewUpdateHomepageUnprocessableEntity() *UpdateHomepageUnprocessableEntity {\n\treturn &UpdateHomepageUnprocessableEntity{}\n}", "func NewCreateEvaluationReportUnprocessableEntity() *CreateEvaluationReportUnprocessableEntity {\n\n\treturn &CreateEvaluationReportUnprocessableEntity{}\n}", "func NewPostPunchInUnprocessableEntity() *PostPunchInUnprocessableEntity {\n\treturn &PostPunchInUnprocessableEntity{}\n}", "func (client MultipleResponsesClient) Get202None204NoneDefaultNone400InvalidSender(req *http.Request) (*http.Response, error) {\n return autorest.SendWithSender(client, req)\n}", "func (client MultipleResponsesClient) Get202None204NoneDefaultNone202InvalidPreparer() (*http.Request, error) {\n preparer := autorest.CreatePreparer(\n autorest.AsGet(),\n autorest.WithBaseURL(client.BaseURI),\n autorest.WithPath(\"/http/payloads/202/none/204/none/default/none/response/202/invalid\"))\n return preparer.Prepare(&http.Request{})\n}", "func NewDeleteComponentUnprocessableEntity() *DeleteComponentUnprocessableEntity {\n\treturn &DeleteComponentUnprocessableEntity{}\n}", "func NewGetPublicIPByUUIDUsingGETForbidden() *GetPublicIPByUUIDUsingGETForbidden {\n\treturn &GetPublicIPByUUIDUsingGETForbidden{}\n}", "func (f *FakePrivilegedProjectProvider) GetUnsecured(projectInternalName string, options *provider.ProjectGetOptions) (*kubermaticapiv1.Project, error) {\n\tif NoExistingFakeProjectID == projectInternalName {\n\t\treturn nil, createError(http.StatusNotFound, \"\")\n\t}\n\tif ForbiddenFakeProjectID == projectInternalName {\n\t\treturn nil, createError(http.StatusForbidden, \"\")\n\t}\n\n\treturn nil, nil\n}", "func NewObjectsPatchUnprocessableEntity() *ObjectsPatchUnprocessableEntity {\n\n\treturn &ObjectsPatchUnprocessableEntity{}\n}", "func NewPatchAssetCodeUnprocessableEntity() *PatchAssetCodeUnprocessableEntity {\n\treturn &PatchAssetCodeUnprocessableEntity{}\n}", "func NewObjectsClassPutUnprocessableEntity() *ObjectsClassPutUnprocessableEntity {\n\n\treturn &ObjectsClassPutUnprocessableEntity{}\n}", "func NewGetPrimeEntitlementsUnauthorized() *GetPrimeEntitlementsUnauthorized {\n\treturn &GetPrimeEntitlementsUnauthorized{}\n}", "func NewBatchGetWaitEstimatesForbidden(body *BatchGetWaitEstimatesForbiddenResponseBody) *goa.ServiceError {\n\tv := &goa.ServiceError{\n\t\tName: *body.Name,\n\t\tID: *body.ID,\n\t\tMessage: *body.Message,\n\t\tTemporary: *body.Temporary,\n\t\tTimeout: *body.Timeout,\n\t\tFault: *body.Fault,\n\t}\n\n\treturn v\n}", "func NewCreateSsoEmbedURLUnprocessableEntity() *CreateSsoEmbedURLUnprocessableEntity {\n\treturn &CreateSsoEmbedURLUnprocessableEntity{}\n}", "func NewPutTeamsTeamIDMembersUsernameUnprocessableEntity() *PutTeamsTeamIDMembersUsernameUnprocessableEntity {\n\treturn &PutTeamsTeamIDMembersUsernameUnprocessableEntity{}\n}", "func (r *ImpossibleTravelRiskEventRequest) Get(ctx context.Context) (resObj *ImpossibleTravelRiskEvent, err error) {\n\tvar query string\n\tif r.query != nil {\n\t\tquery = \"?\" + r.query.Encode()\n\t}\n\terr = r.JSONRequest(ctx, \"GET\", query, nil, &resObj)\n\treturn\n}", "func NewGetAPIPublicV1TeamForbidden() *GetAPIPublicV1TeamForbidden {\n\treturn &GetAPIPublicV1TeamForbidden{}\n}", "func NewInformationProtection()(*InformationProtection) {\n m := &InformationProtection{\n Entity: *NewEntity(),\n }\n return m\n}", "func NewGetBicsForbidden() *GetBicsForbidden {\n\treturn &GetBicsForbidden{}\n}", "func (x *fastReflection_LightClientAttackEvidence) New() protoreflect.Message {\n\treturn new(fastReflection_LightClientAttackEvidence)\n}", "func NewGetStockReceiptItemForbidden() *GetStockReceiptItemForbidden {\n\treturn &GetStockReceiptItemForbidden{}\n}", "func NewGetKeysForbidden() *GetKeysForbidden {\n\treturn &GetKeysForbidden{}\n}", "func NewGetVulnerabilitiesForbidden() *GetVulnerabilitiesForbidden {\n\treturn &GetVulnerabilitiesForbidden{}\n}", "func (o *GraphqlPostUnprocessableEntity) WithPayload(payload *models.ErrorResponse) *GraphqlPostUnprocessableEntity {\n\to.Payload = payload\n\treturn o\n}", "func NewPutPeoplePersonIDKidsUnprocessableEntity() *PutPeoplePersonIDKidsUnprocessableEntity {\n\treturn &PutPeoplePersonIDKidsUnprocessableEntity{}\n}", "func (a *Client) GetProtectedEntityInfo(params *GetProtectedEntityInfoParams, opts ...ClientOption) (*GetProtectedEntityInfoOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetProtectedEntityInfoParams()\n\t}\n\top := &runtime.ClientOperation{\n\t\tID: \"getProtectedEntityInfo\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/astrolabe/{service}/{protectedEntityID}\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &GetProtectedEntityInfoReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t}\n\tfor _, opt := range opts {\n\t\topt(op)\n\t}\n\n\tresult, err := a.transport.Submit(op)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*GetProtectedEntityInfoOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for getProtectedEntityInfo: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}", "func NewPatchEventsEventIDUnprocessableEntity() *PatchEventsEventIDUnprocessableEntity {\n\treturn &PatchEventsEventIDUnprocessableEntity{}\n}", "func NewGetLatestIntelRuleFileForbidden() *GetLatestIntelRuleFileForbidden {\n\treturn &GetLatestIntelRuleFileForbidden{}\n}", "func NewEntity(template *x509.Certificate, privateKey crypto.PrivateKey) (*Entity, error) {\n\tpriv, err := rsa.GenerateKey(randReader, rsaBitSize)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to generate key: %v\", err)\n\t}\n\tif privateKey != nil {\n\t\tpriv = privateKey.(*rsa.PrivateKey)\n\t}\n\tif template.SubjectKeyId, err = keyID(priv.Public()); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to generate Subject Key ID: %v\", err)\n\t}\n\n\tif template.SerialNumber, err = rand.Int(randReader, bigInt); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to randomize a big int: %v\", err)\n\t}\n\n\treturn &Entity{Template: template, PrivateKey: priv, PublicKey: priv.Public(), Certificate: nil}, nil\n}", "func fakeEntity(pubkey *packet.PublicKey, privkey *packet.PrivateKey) *openpgp.Entity {\n\tcfg := packet.Config{\n\t\tDefaultCipher: packet.CipherAES128,\n\t\tDefaultHash: crypto.SHA384,\n\t\tDefaultCompressionAlgo: packet.CompressionNone,\n\t\tRSABits: 2048,\n\t}\n\n\tentity := openpgp.Entity{\n\t\tPrimaryKey: pubkey,\n\t\tPrivateKey: privkey,\n\t\tIdentities: make(map[string]*openpgp.Identity),\n\t}\n\n\t// Create fake identity\n\t// We need an identity, not because it is used or important in any way but because Sign(...) will use the\n\t// same hash function (hardcoded) or fall back to RIPEMD...\n\ttrueVal := true\n\thid, _ := s2k.HashToHashId(crypto.SHA256)\n\tentity.Identities[\"\"] = &openpgp.Identity{\n\t\tName: \"\",\n\t\tUserId: packet.NewUserId(\"\", \"\", \"\"),\n\t\tSelfSignature: &packet.Signature{\n\t\t\tCreationTime: time.Now(),\n\t\t\tSigType: packet.SigTypePositiveCert,\n\t\t\tPubKeyAlgo: packet.PubKeyAlgoRSA,\n\t\t\tHash: cfg.Hash(),\n\t\t\tIsPrimaryId: &trueVal,\n\t\t\tFlagSign: true,\n\t\t\tFlagsValid: true,\n\t\t\tFlagCertify: true,\n\t\t\tIssuerKeyId: &pubkey.KeyId,\n\t\t\tPreferredHash: []uint8{hid},\n\t\t},\n\t}\n\n\treturn &entity\n}", "func NewObjectsGetForbidden() *ObjectsGetForbidden {\n\treturn &ObjectsGetForbidden{}\n}", "func NewGetHostGroupsForbidden() *GetHostGroupsForbidden {\n\treturn &GetHostGroupsForbidden{}\n}", "func NewDescribeSlowLogRequestWithoutParam() *DescribeSlowLogRequest {\n\n return &DescribeSlowLogRequest{\n JDCloudRequest: core.JDCloudRequest{\n URL: \"/regions/{regionId}/instance/{instanceGid}/slowLog\",\n Method: \"GET\",\n Header: nil,\n Version: \"v2\",\n },\n }\n}", "func GetExclusionGroupTraffic(settings *playfab.Settings, postData *GetExclusionGroupTrafficRequestModel, entityToken string) (*GetExclusionGroupTrafficResultModel, error) {\r\n if entityToken == \"\" {\n return nil, playfab.NewCustomError(\"entityToken should not be an empty string\", playfab.ErrorGeneric)\n }\r\n b, errMarshal := json.Marshal(postData)\r\n if errMarshal != nil {\r\n return nil, playfab.NewCustomError(errMarshal.Error(), playfab.ErrorMarshal)\r\n }\r\n\r\n sourceMap, err := playfab.Request(settings, b, \"/Experimentation/GetExclusionGroupTraffic\", \"X-EntityToken\", entityToken)\r\n if err != nil {\r\n return nil, err\r\n }\r\n \r\n result := &GetExclusionGroupTrafficResultModel{}\r\n\r\n config := mapstructure.DecoderConfig{\r\n DecodeHook: playfab.StringToDateTimeHook,\r\n Result: result,\r\n }\r\n \r\n decoder, errDecoding := mapstructure.NewDecoder(&config)\r\n if errDecoding != nil {\r\n return nil, playfab.NewCustomError(errDecoding.Error(), playfab.ErrorDecoding)\r\n }\r\n \r\n errDecoding = decoder.Decode(sourceMap)\r\n if errDecoding != nil {\r\n return nil, playfab.NewCustomError(errDecoding.Error(), playfab.ErrorDecoding)\r\n }\r\n\r\n return result, nil\r\n}", "func NewObjectsCreateUnprocessableEntity() *ObjectsCreateUnprocessableEntity {\n\treturn &ObjectsCreateUnprocessableEntity{}\n}", "func NewGetTiersForbidden() *GetTiersForbidden {\n\treturn &GetTiersForbidden{}\n}", "func NewGetPaymentRequestEDIForbidden() *GetPaymentRequestEDIForbidden {\n\n\treturn &GetPaymentRequestEDIForbidden{}\n}", "func NewPcloudPlacementgroupsPostUnprocessableEntity() *PcloudPlacementgroupsPostUnprocessableEntity {\n\treturn &PcloudPlacementgroupsPostUnprocessableEntity{}\n}", "func NewGetByIDUsingGETForbidden() *GetByIDUsingGETForbidden {\n\treturn &GetByIDUsingGETForbidden{}\n}", "func (x *fastReflection_MsgWithdrawValidatorCommissionResponse) GetUnknown() protoreflect.RawFields {\n\treturn x.unknownFields\n}" ]
[ "0.5650617", "0.5636281", "0.54643387", "0.5256545", "0.5237649", "0.5215986", "0.5137212", "0.5104093", "0.5101223", "0.4990614", "0.49280345", "0.48145685", "0.48031926", "0.47842333", "0.47759265", "0.4742333", "0.4735349", "0.46556872", "0.46432883", "0.46251956", "0.46010885", "0.45765197", "0.4573171", "0.45671433", "0.4551833", "0.45438296", "0.45315838", "0.45244145", "0.45160195", "0.44924897", "0.44864422", "0.44750097", "0.4405481", "0.43973786", "0.43947887", "0.43845528", "0.4368276", "0.4364844", "0.43419713", "0.43337", "0.43109614", "0.42955077", "0.42943418", "0.4282988", "0.42694992", "0.42614007", "0.42599735", "0.42501056", "0.4248255", "0.42381456", "0.42380393", "0.42374688", "0.42360082", "0.4216785", "0.42084515", "0.4207991", "0.42055407", "0.41844404", "0.4182576", "0.41722402", "0.41474038", "0.4147333", "0.4145554", "0.4128967", "0.4127886", "0.41259897", "0.41243187", "0.41236714", "0.41186303", "0.4114765", "0.41089895", "0.41086826", "0.40821946", "0.4074077", "0.40610552", "0.40330175", "0.4028709", "0.40155575", "0.40143746", "0.4008534", "0.40047765", "0.40036485", "0.39963457", "0.3996144", "0.39889818", "0.39884216", "0.39875978", "0.39830026", "0.3979779", "0.39762068", "0.39745197", "0.39731818", "0.39624122", "0.39530632", "0.39516965", "0.39497498", "0.394801", "0.39474902", "0.3944791", "0.39431486" ]
0.7384049
0
NewPaddingTLV creates a new padding TLV
func NewPaddingTLV(length uint8) *PaddingTLV { return &PaddingTLV{ TLVType: PaddingType, TLVLength: length, PaddingData: make([]byte, length), } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (p *PaddingTLV) Length() uint8 {\n\treturn p.TLVLength\n}", "func pad(unpadded []byte, desiredLength int) []byte {\n\tif len(unpadded) == desiredLength {\n\t\treturn unpadded\n\t}\n\ttoAppend := desiredLength - len(unpadded)\n\treturn append(unpadded, bytes.Repeat([]byte{byte(0x00)}, toAppend)...)\n}", "func (p *PaddingTLV) Serialize(buf *bytes.Buffer) {\n\tbuf.WriteByte(p.TLVType)\n\tbuf.WriteByte(p.TLVLength)\n\tbuf.Write(p.PaddingData)\n}", "func getPadding(packetLen int) int {\n\tif packetLen%4 == 0 {\n\t\treturn 0\n\t}\n\treturn 4 - (packetLen % 4)\n}", "func setupPadding() {\n\n\tpaddingMap[0] = \"10101010101010101010101010101010\"\n\tpaddingMap[1] = \"0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f\"\n\tpaddingMap[2] = \"0e0e0e0e0e0e0e0e0e0e0e0e0e0e\"\n\tpaddingMap[3] = \"0d0d0d0d0d0d0d0d0d0d0d0d0d\"\n\tpaddingMap[4] = \"0c0c0c0c0c0c0c0c0c0c0c0c\"\n\tpaddingMap[5] = \"0b0b0b0b0b0b0b0b0b0b0b\"\n\tpaddingMap[6] = \"0a0a0a0a0a0a0a0a0a0a\"\n\tpaddingMap[7] = \"090909090909090909\"\n\tpaddingMap[8] = \"0808080808080808\"\n\tpaddingMap[9] = \"07070707070707\"\n\tpaddingMap[10] = \"060606060606\"\n\tpaddingMap[11] = \"0505050505\"\n\tpaddingMap[12] = \"04040404\"\n\tpaddingMap[13] = \"030303\"\n\tpaddingMap[14] = \"0202\"\n\tpaddingMap[15] = \"01\"\n}", "func (p *PaddingTLV) Type() uint8 {\n\treturn p.TLVType\n}", "func (enc Encoding) WithPadding(padding rune) *Encoding {\n\tswitch {\n\tcase padding < NoPadding || padding == '\\r' || padding == '\\n' || padding > 0xff:\n\t\tpanic(\"invalid padding\")\n\tcase padding != NoPadding && enc.decodeMap[byte(padding)] != invalidIndex:\n\t\tpanic(\"padding contained in alphabet\")\n\t}\n\tenc.padChar = padding\n\treturn &enc\n}", "func (g *GroupedAVP) Padding() int {\n\treturn 0\n}", "func (p *PaddingTLV) Value() interface{} {\n\treturn p\n}", "func pad(in []byte, length int) []byte {\n\tpadding := length - (len(in) % length)\n\tif padding == 0 {\n\t\tpadding = length\n\t}\n\tfor i := 0; i < padding; i++ {\n\t\tin = append(in, byte(padding))\n\t}\n\treturn in\n}", "func padding(message []byte, identifier string) []byte {\n\t// create padding for the strings email, firstname, lastname - RFC6234 multiple of 512\n\n\t// calculate length\n\tmessageSize := binary.Size(message) * 8\n\tlog.Printf(\"%s size: %dBit\\n\", identifier, messageSize)\n\t// ( L + 1 + K ) mod 512 = 448 -> calculate k\n\tmessageL := (messageSize % 512) + 1\n\n\tmessageK := messageL\n\tif messageL > 448 {\n\t\tmessageK = 448 + (512 - messageL)\n\t} else {\n\t\tmessageK = 448 - messageL\n\t}\n\n\t// create buffer to add bytewise\n\tmessageBuffer := bytes.NewBuffer(make([]byte, 0, 512))\n\tbinary.Write(messageBuffer, binary.BigEndian, message)\n\n\t// add 1 - add k - Work with bytes 8bit - add: 1000 0000 | k-7 * 0 - all Strings: string % 8 = 0\n\tbinary.Write(messageBuffer, binary.BigEndian, uint8(0x80))\n\n\t// itearate through the String length K and fill the buffer with 0s\n\tmessageK -= 7\n\n\t// error Handling - if the padding failed\n\tif messageK < 0 || messageK%8 != 0 {\n\t\tlog.Fatalf(\"%s Length of Bits is to long: %d\", identifier, messageK)\n\t}\n\n\t// iteration\n\tfor i := 0; i < messageK/8; i++ {\n\t\tbinary.Write(messageBuffer, binary.BigEndian, uint8(0x00))\n\t}\n\n\t// 64-bit/8Byte block that is L in binary -> L original length\n\tbinary.Write(messageBuffer, binary.BigEndian, uint64(messageSize))\n\n\tlog.Printf(\"Padding for %s: %x(%dBytes|%dBits)\\n\", identifier, messageBuffer.Bytes(), binary.Size(messageBuffer.Bytes()), binary.Size(messageBuffer.Bytes())*8)\n\treturn messageBuffer.Bytes()\n}", "func (p *Patch) SetPadding(value mat.AABB) {\n\tp.Padding = value\n\tp.SetRegion(p.Region)\n}", "func padToLength(source string, prefix int) string {\n\treturn fmt.Sprintf(fmt.Sprintf(\"%%-%ds\", prefix), source)\n}", "func pad(message []byte) []byte {\n lenPadding := aes.BlockSize - (len(message) % aes.BlockSize)\n for i := 0; i < lenPadding; i++ {\n message = append(message, byte(lenPadding))\n }\n return message\n}", "func padding(size int) string {\n\tresult := \"\"\n\tfor i := 0; i < size; i++ {\n\t\tresult += \" \"\n\t}\n\treturn result\n}", "func NewPacket(data []byte, len uint32) *Packet {\n\treturn &Packet{\n\t\tTime: time.Now(),\n\t\tCaplen: len,\n\t\tLen: len,\n\t\tData: data,\n\t}\n}", "func PKCSSPadding(ciphertext []byte, blockSize int) []byte {\n\tpadding := blockSize - len(ciphertext)%blockSize\n\tpadtext := bytes.Repeat([]byte{byte(padding)}, padding)\n\treturn append(ciphertext, padtext...)\n}", "func NewMDMkt(val string) MDMktField {\n\treturn MDMktField{quickfix.FIXString(val)}\n}", "func EncodeBytesWithPadding(data []byte, targetLength int) []byte {\n\tvar buf bytes.Buffer\n\n\tfor i := 0; i < targetLength-len(data); i++ {\n\t\tbuf.WriteByte(0)\n\t}\n\n\tbuf.Write(data)\n\treturn buf.Bytes()\n}", "func PKCS(data []byte, mode string) (padded_data []byte) {\r\n\tvar pad_num int\r\n\r\n\tif mode == \"add\" {\r\n\t\trem := len(data) % userlib.AESBlockSizeBytes\r\n\t\tpad_num = userlib.AESBlockSizeBytes - rem //number to pad by\r\n\t\t//pad := make([]byte, pad_num) //pad array we are appending later\r\n\t\tpadded_data = data[:]\r\n\t\tfor i := 0; i < pad_num; i++ {\r\n\t\t\t//pad = append(pad, byte(pad_num))\r\n\t\t\tpadded_data = append(padded_data, byte(pad_num))\r\n\t\t}\r\n\r\n\t\t//userlib.DebugMsg(\"%d\", padded_data)\r\n\t} else { //remove padding\r\n\t\t//last byte is amount of padding there is\r\n\t\t//ex: d = [1022] means 2 bytes of padding so return d[:2] which is [10]\r\n\r\n\t\tnum := len(data) - 1\r\n\t\tpad_num = len(data) - int(data[num]) //piazza: convert to byte > hex string > int?\r\n\t\tpadded_data = data[:pad_num]\r\n\t}\r\n\r\n\treturn padded_data\r\n}", "func pad(src []byte) []byte {\n\tpadding := aes.BlockSize - len(src)%aes.BlockSize\n\tpadtext := bytes.Repeat([]byte{byte(padding)}, padding)\n\n\treturn append(src, padtext...)\n}", "func EncodeStringWithPadding(data string, targetLength int) []byte {\n\tvar buf bytes.Buffer\n\n\tif len(data) < targetLength {\n\t\tfor i := 0; i < targetLength-len(data); i++ {\n\t\t\tbuf.WriteByte(0)\n\t\t}\n\t}\n\n\tbuf.Write([]byte(data))\n\treturn buf.Bytes()\n}", "func (w *messageWriter) pad(alignment int) error {\n\tn, err := w.Write(padding[:w.pos%alignment])\n\tif err != nil {\n\t\treturn err\n\t}\n\tw.pos += n\n\treturn nil\n}", "func generateInjectionPad(i2 []byte, padLength int) []byte {\n\tblockSize := len(i2)\n\n\tif padLength < 1 {\n\t\tpanic(\"generateInjectionPad: invalid padLength\")\n\t}\n\n\trandomPrefix, _ := cryptopals.GenerateRandomBytes(blockSize - padLength)\n\n\t// Generate slice of padded values XORed with the intermediate block for C1'\n\tpad := bytes.Repeat([]byte{byte(padLength + 1)}, padLength)\n\tcryptopals.FixedXOR(pad, i2[len(i2)-padLength:])\n\n\treturn append(randomPrefix, pad...)\n}", "func PKCS7Padding(text string, length int) string {\n\tpaddingLength := length - (len(text) % length)\n\n\tbs := make([]byte, 1)\n\tbinary.PutUvarint(bs, uint64(paddingLength))\n\n\tpadding := bytes.Repeat(bs, paddingLength)\n\n\treturn text + string(padding)\n}", "func pad(data []byte, blockSize int, padder Padder) []byte {\n \tdataLen := len(data)\n\tpadLen := blockSize - (dataLen % blockSize)\n\tpadding := padder(padLen)\n\treturn append(data, padding...)\n}", "func Pad(prefix string, reqd int) string {\n\treturn strings.Repeat(\"0\", 5-len(prefix)) + prefix\n}", "func NewPacket(code, identifier uint8, data []byte, additionalCapacity ...uint) Packet {\n\tl := len(data) + EapHeaderLen\n\tpacketCap := l\n\tif len(additionalCapacity) > 0 && l < int(EapMaxLen) {\n\t\tac := additionalCapacity[0]\n\t\tpacketCap = l + int(ac)\n\t\tif packetCap > int(EapMaxLen) {\n\t\t\tpacketCap = int(EapMaxLen)\n\t\t}\n\t}\n\tp := make([]byte, EapHeaderLen, packetCap)\n\tif l > EapHeaderLen {\n\t\tp = append(p, data...)\n\t}\n\tp[EapMsgCode], p[EapMsgIdentifier], p[EapMsgLenLow], p[EapMsgLenHigh] = code, identifier, uint8(l), uint8(l>>8)\n\treturn p\n}", "func pad(d []byte, n int) []byte {\n\td = append(d, make([]byte, n)...)\n\treturn d\n}", "func (p *Service) Pad(bytesValue []byte, blockSize int) ([]byte, error) {\n\tif len(bytesValue) == 0 {\n\t\treturn nil, errZeroLengthValue\n\t}\n\n\tif blockSize <= 0 {\n\t\treturn nil, errLesserThanOneBlockSize\n\t}\n\n\tpadSize := blockSize - (len(bytesValue) % blockSize)\n\tif padSize == 0 {\n\t\tpadSize = blockSize\n\t}\n\n\tpad := bytes.Repeat(\n\t\t[]byte{\n\t\t\tbyte(padSize),\n\t\t},\n\t\tpadSize,\n\t)\n\n\treturn append(bytesValue, pad...), nil\n}", "func (s IpNetwork_PowerboxTag) NewEncryption() (IpNetwork_PowerboxTag_Encryption, error) {\n\tss, err := NewIpNetwork_PowerboxTag_Encryption(s.Struct.Segment())\n\tif err != nil {\n\t\treturn IpNetwork_PowerboxTag_Encryption{}, err\n\t}\n\terr = s.Struct.SetPtr(0, ss.Struct.ToPtr())\n\treturn ss, err\n}", "func padding(size int, alignment int) int {\n\tunalignedPart := size % alignment\n\treturn (alignment - unalignedPart) % alignment\n}", "func pad(blockSize int, buf []byte) []byte {\n\tpadLen := blockSize - (len(buf) % blockSize)\n\tpadding := bytes.Repeat([]byte{byte(padLen)}, padLen)\n\treturn append(buf, padding...)\n}", "func New(typ Type, ver kcapver.Version, l, size uint32) Section {\n\tvar s Section\n\ts[0] = uint8(typ)\n\ts[1] = uint8(ver)\n\tcopy(s[2:6], bytes.WriteUint32(l))\n\tcopy(s[6:], bytes.WriteUint32(size))\n\treturn s\n}", "func (e *EDNS0_PADDING) Option() uint16 { return EDNS0PADDING }", "func GetPaddingPayload(payloadSize int) ([]byte, error) {\n\tpayload := make([]byte, payloadSize)\n\tfor index := range payload {\n\t\tpayload[index] = 0xff\n\t}\n\treturn payload, nil\n}", "func blockPadding(offset int64) (n int64) {\n\treturn -offset & (blockSize - 1)\n}", "func pad(b []byte) []byte {\n\tpadSize := aes.BlockSize - (len(b) % aes.BlockSize)\n\tfmt.Printf(\"aes.BlockSize: %d\\n\", aes.BlockSize) // 16\n\tfmt.Printf(\"padSize : %d\\n\", padSize)\n\tpad := bytes.Repeat([]byte{byte(padSize)}, padSize)\n\treturn append(b, pad...)\n}", "func New(length MnemonicLength, dict Dictionary) (string, error) {\n\tvar entLen EntropyLength\n\n\tswitch length {\n\tcase Words12:\n\t\tentLen = Entropy128\n\tcase Words15:\n\t\tentLen = Entropy160\n\tcase Words18:\n\t\tentLen = Entropy192\n\tcase Words21:\n\t\tentLen = Entropy224\n\tcase Words24:\n\t\tentLen = Entropy256\n\tdefault:\n\t\treturn \"\", errors.New(\"Invalid mnemonic length\")\n\t}\n\n\tent, err := Entropy(entLen)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn NewFromEntropy(ent, dict)\n}", "func pkcs7Padding(ciphertext []byte, blockSize int) []byte {\n\t// The bytes need to padding.\n\tpadding := blockSize - len(ciphertext)%blockSize\n\tpadtext := bytes.Repeat([]byte{byte(padding)}, padding)\n\treturn append(ciphertext, padtext...)\n}", "func StringPadding(str string, le int) string {\n\tl := le - len(str)\n\tif l > 0 {\n\t\tfor i := 0; i < l; i++ {\n\t\t\tstr = str + \" \"\n\t\t}\n\t}\n\treturn str\n}", "func (pkcs NullPadding) Apply(unpadded []byte, blockSize int) []byte {\n\treturn []byte{}\n}", "func NewPacket(data []byte, code byte, length int) (p *Packet) {\n\tp = &Packet{\n\t\tData: data,\n\t\tCode: code,\n\t\tLength: length,\n\t}\n\treturn p\n}", "func PadMissingLenth(t []byte, l int) []byte {\n\tmissing := l - len(t)%l\n\tpad := make([]byte, missing, missing)\n\tfmt.Printf(\"will pad with missing %d\\n\", missing)\n\tpad[missing-1] = byte(missing)\n\tfmt.Printf(\"padded with %d\\n\", int(pad[missing-1]))\n\treturn append(t, pad...)\n}", "func pad(msg string) string {\n\twidth := defaultWidth\n\tsize, err := ts.GetSize()\n\tif err == nil {\n\t\t// If `ts.GetSize()` was successful, set the width to the number\n\t\t// of columns present in the terminal LFS is attached to.\n\t\t// Otherwise, fall-back to `defaultWidth`.\n\t\twidth = size.Col()\n\t}\n\n\t// Pad the string with whitespace so that printing at the start of the\n\t// line removes all traces from the last print.removes all traces from\n\t// the last print.\n\tpadding := strings.Repeat(\" \", maxInt(0, width-len(msg)))\n\n\treturn msg + padding\n}", "func NewPacket(opcode byte, payload []byte) *Packet {\n\treturn &Packet{opcode, payload, false, 0, 0, 0}\n}", "func NewEncodedAllocTextLen(val int) EncodedAllocTextLenField {\n\treturn EncodedAllocTextLenField{quickfix.FIXInt(val)}\n}", "func newGCMWithTagSize(cipher goCipher.Block, tagSize int) (aeadIf, error) {\n\treturn newGCMWithNonceAndTagSize(cipher, gcmStandardNonceSize, tagSize)\n}", "func CellPadding(value string) attributes.Attribute {\n\treturn attributes.Attribute{\n\t\tName: \"CellPadding\",\n\t\tTempl: `{{define \"CellPadding\"}}cellpadding=\"` + value + `\"{{end}}`,\n\t}\n}", "func NewAVPacket() (av *AVPacket) {\n\tav = new(AVPacket)\n\tav.MessageType = TYPEAV\n\tav.Flag = FLAGAV\n\tav.AutoCrypt = false\n\tav.Key = AESKEYPREFIX\n\n\treturn\n}", "func New(key []byte, alg func([]byte) (cipher.Block, error)) (cipher.AEAD, error) {\n\tmac, err := alg(key[:(len(key) / 2)])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tenc, err := alg(key[(len(key) / 2):])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &siv{\n\t\tenc: enc,\n\t\tmac: mac,\n\t}, nil\n}", "func padID(id string) string {\n\texpectedLen := 0\n\tswitch {\n\tcase len(id) < 16:\n\t\texpectedLen = 16\n\tcase len(id) > 16 && len(id) < 32:\n\t\texpectedLen = 32\n\tdefault:\n\t\treturn id\n\t}\n\n\treturn pads[expectedLen-len(id)] + id\n}", "func padBytesNeeded(elementLen int) int {\n\treturn 4*(elementLen/4+1) - elementLen\n}", "func padData(rawData []byte) []byte {\n\tneedPadding := aes.BlockSize - ((len(rawData) + 2) % aes.BlockSize)\n\n\tvar dataBuf bytes.Buffer\n\tdataBuf.Grow(2 + len(rawData) + (aes.BlockSize % (len(rawData) + 2)))\n\n\tdataBuf.Write([]byte(\"|\"))\n\tdataBuf.Write(rawData)\n\tdataBuf.Write([]byte(\"|\"))\n\n\tfor i := 0; i < needPadding; i++ {\n\t\tdataBuf.Write([]byte(\" \"))\n\t}\n\n\treturn dataBuf.Bytes()\n}", "func lattigo_encodeNTTAtLvlNew(paramHandle Handle2, encoderHandle Handle2, realValues *C.constDouble, logLen uint64, level uint64, scale float64) Handle2 {\n\tvar params *ckks.Parameters\n\tparams = getStoredParameters(paramHandle)\n\n\tvar encoder *ckks.Encoder\n\tencoder = getStoredEncoder(encoderHandle)\n\n\tcomplexValues := CDoubleVecToGoComplex(realValues, uint64(math.Pow(2, float64(logLen))))\n\tvar plaintext *ckks.Plaintext\n\tplaintext = ckks.NewPlaintext(*params, int(level), scale)\n\t(*encoder).EncodeNTT(plaintext, complexValues, int(logLen))\n\treturn marshal.CrossLangObjMap.Add(unsafe.Pointer(plaintext))\n}", "func PaddingKey(key []byte) []byte {\n\t// Initially set to 0\n\t// Becuase it's hex byte array, so 128bit -> 32byte\n\tpaddingBytes := make([]byte, 32-len(key))\n\tkey = append(key, paddingBytes...)\n\n\treturn key\n}", "func NewCapacity(expectedCapacity int) *Tree23 {\n\n\tvar t Tree23\n\tt.initializeTree(expectedCapacity)\n\treturn &t\n}", "func newNode(allc *Allocator, valAllc *Allocator, height uint8, key []byte, val []byte) (*node, uint32) {\n\ttruncatedSize := (DefaultMaxHeight - int(height)) * LayerSize\n\tkeyOffset := allc.putBytes(key)\n\tnodeOffset := allc.makeNode(uint32(truncatedSize))\n\tvalOffset := valAllc.putBytes(val)\n\tnode := allc.getNode(nodeOffset)\n\tnode.height = height\n\tnode.keyOffset = keyOffset\n\t// TODO key length should not exceed uint16 size, assert.\n\tnode.keySize = uint16(len(key))\n\tnode.encodeValue(valOffset, uint32(len(val)))\n\treturn node, nodeOffset\n}", "func New(mac string) (*MagicPacket, error) {\n\tvar packet MagicPacket\n\tvar macAddr WolMac\n\n\thwAddr, err := net.ParseMAC(mac)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// We only support 6 byte MAC addresses since it is much harder to use the\n\t// binary.Write(...) interface when the size of the MagicPacket is dynamic.\n\treg := regexp.MustCompile(`^([0-9a-fA-F]{2}[:-]){5}([0-9a-fA-F]{2})$`)\n\tif !reg.MatchString(mac) {\n\t\treturn nil, fmt.Errorf(\"%s is not a IEEE 802 MAC-48 address\", mac)\n\t}\n\n\t// Copy bytes from the returned HardwareAddr -> a fixed size WolMac.\n\tfor idx := range macAddr {\n\t\tmacAddr[idx] = hwAddr[idx]\n\t}\n\n\t// Setup the header which is 6 repetitions of 0xFF.\n\tfor idx := range packet.header {\n\t\tpacket.header[idx] = 0xFF\n\t}\n\n\t// Setup the payload which is 16 repetitions of the MAC addr.\n\tfor idx := range packet.payload {\n\t\tpacket.payload[idx] = macAddr\n\t}\n\n\treturn &packet, nil\n}", "func PKCS7Padding(ciphertext []byte, blockSize int) []byte {\n\tpadding := blockSize - len(ciphertext)%blockSize\n\tpadtext := bytes.Repeat([]byte{byte(padding)}, padding)\n\treturn append(ciphertext, padtext...)\n}", "func PKCS7Padding(ciphertext []byte, blockSize int) []byte {\n\tpadding := blockSize - len(ciphertext)%blockSize\n\tpadtext := bytes.Repeat([]byte{byte(padding)}, padding)\n\treturn append(ciphertext, padtext...)\n}", "func (p *Packet) AvNewPacket(s int) int {\n\treturn int(C.av_new_packet((*C.struct_AVPacket)(p), C.int(s)))\n}", "func unpad(message []byte) ([]byte, error) {\n if len(message) == 0 {\n return nil, ErrInvalidPadding\n }\n\n lenPadding := message[len(message) - 1]\n if lenPadding == 0 || lenPadding > aes.BlockSize {\n return nil, ErrInvalidPadding\n }\n\n for i := len(message) - 1; i > len(message) - int(lenPadding) - 1; i-- {\n if message[i] != lenPadding {\n return nil, ErrInvalidPadding\n }\n }\n\n return message[:len(message) - int(lenPadding)], nil\n}", "func padding(level int, node *Node) string {\n\tlinks := make([]string, level+1)\n\n\tfor node.Root != nil {\n\t\tif isLast(node) {\n\t\t\tlinks[level] = strings.Repeat(\" \", IndentSize+1)\n\t\t} else {\n\t\t\tlinks[level] = fmt.Sprintf(\"%s%s\", EdgeTypeLink, strings.Repeat(\" \", IndentSize))\n\t\t}\n\t\tlevel--\n\t\tnode = node.Root\n\t}\n\n\treturn strings.Join(links, \"\")\n}", "func pkcs5Pad(src []byte, blockSize int) []byte {\n\tpadding := blockSize - len(src)%blockSize\n\tpadtext := bytes.Repeat([]byte{byte(padding)}, padding)\n\treturn append(src, padtext...)\n}", "func fixPadding(b64 string) string {\n\tcount := 4 - len(b64)%4\n\tif count > 0 && count < 4 {\n\t\treturn b64 + strings.Repeat(\"=\", count)\n\t}\n\treturn b64\n}", "func PKCS7Padding(plainUnpaddedData []byte, blockSize int) []byte {\n\tpaddingSize := blockSize - len(plainUnpaddedData)%blockSize\n\tpadData := bytes.Repeat([]byte{byte(paddingSize)}, paddingSize)\n\treturn append(plainUnpaddedData, padData...)\n}", "func PKCS7Padding(plainUnpaddedData []byte, blockSize int) []byte {\n\tpaddingSize := blockSize - len(plainUnpaddedData)%blockSize\n\tpadData := bytes.Repeat([]byte{byte(paddingSize)}, paddingSize)\n\treturn append(plainUnpaddedData, padData...)\n}", "func FindPaddingLength() byte {\n\tfor i := byte(0); i <= 255; i++ {\n\t\tencryptedGuess := cryptoutil.NewCipher(EncryptedCipherText)\n\t\tencryptedGuess[3*BlockSize-1] = encryptedGuess[3*BlockSize-1] ^ 1 ^ i\n\t\tstatusCode := SendRequest(encryptedGuess)\n\t\tif statusCode == InvalidMessageErrorCode {\n\t\t\treturn i\n\t\t}\n\t}\n\n\treturn 0\n}", "func newGCMWithNonceSize(cipher goCipher.Block, size int) (aeadIf, error) {\n\treturn newGCMWithNonceAndTagSize(cipher, size, gcmTagSize)\n}", "func PaddingBlocks(plaintext []byte) []byte {\n\tif len(plaintext) == 0 {\n\t\tplaintext = make([]byte, aes.BlockSize)\n\t}\n\tif len(plaintext)%aes.BlockSize != 0 {\n\t\t// initially 0\n\t\tpaddingBytes := make([]byte, aes.BlockSize-len(plaintext)%aes.BlockSize)\n\t\tcopy(paddingBytes, \"\")\n\t\t// Append slice.\n\t\tplaintext = append(plaintext, paddingBytes...)\n\t}\n\n\treturn plaintext\n}", "func (pkcs PKCS7Padding) Apply(unpadded []byte, blockSize int) []byte {\n\tpadding := (blockSize - len(unpadded)%blockSize)\n\tpadtext := bytes.Repeat([]byte{byte(padding)}, padding)\n\treturn append(unpadded, padtext...)\n}", "func PadPKCS7(src []byte, blockSize int) []byte {\n\tmissing := blockSize - (len(src) % blockSize)\n\tnewSize := len(src) + missing\n\tdest := make([]byte, newSize, newSize)\n\t// copy data\n\tfor i := 0; i < len(src); i++ {\n\t\tdest[i] = src[i]\n\t}\n\t// fill in the rest\n\tmissingB := byte(missing)\n\tfor i := newSize - missing; i < newSize; i++ {\n\t\tdest[i] = missingB\n\t}\n\treturn dest\n}", "func PKCS7Pad(buf []byte, blockSize int) []byte {\n\tif blockSize < 0 || blockSize > 0xff {\n\t\tpanic(\"PKCS7Pad: invalid block size\")\n\t}\n\t// Find the number (and value) of padding bytes.\n\tn := blockSize - (len(buf) % blockSize)\n\n\treturn append(dup(buf), bytes.Repeat([]byte{byte(n)}, n)...)\n}", "func New(length int) *BitArray {\n\tlenpad := nwords(length) * _BytesPW\n\treturn &BitArray{\n\t\tlenpad: lenpad,\n\t\tlength: length,\n\t\tbytes: make([]byte, lenpad, lenpad),\n\t}\n}", "func NewCipher(key []byte) (cipher.Block, error) {\n\tl := len(key)\n\tif l%4 != 0 {\n\t\treturn nil, KeySizeError\n\t}\n\tc := rtea{}\n\tc.key = make([]uint32, l/4)\n\tfor i := range c.key {\n\t\tc.key[i] = enc.Uint32(key)\n\t\tkey = key[4:]\n\t}\n\n\treturn c, nil\n}", "func paddedAppend(size uint, dst, src []byte) []byte {\n\tfor i := 0; i < int(size)-len(src); i++ {\n\t\tdst = append(dst, 0)\n\t}\n\treturn append(dst, src...)\n}", "func (k *Keybase) NewKV(team string) KV {\n\treturn KV{\n\t\tkeybase: k,\n\t\tTeam: team,\n\t}\n}", "func NewPacketBuilder(prefix string, precision int64) PacketBuilder {\n\t// Preparing prefix\n\tif len(prefix) > 0 && prefix[len(prefix)-1] != '.' {\n\t\tprefix += \".\"\n\t}\n\n\treturn &packetBuilder{\n\t\tBuffer: bytes.NewBuffer(nil),\n\t\tprefix: []byte(prefix),\n\t\tprecision: precision,\n\t\tparamsAllowed: false,\n\t\twasParams: false,\n\t}\n}", "func GetPadding(option string) (IPaddingStrategy, error) {\n\tswitch option {\n\tcase PKCS5, PKCS7:\n\t\treturn PKCS7Padding{}, nil\n\t}\n\n\treturn NullPadding{}, errors.New(\"error: invalid padding option\")\n}", "func paddKey(key string, val int) string {\n\tif len(key) == val {\n\t\treturn key\n\t}\n\n\tpaddedKey := []byte{}\n\ti := 0\n\tfor len(paddedKey) < val {\n\t\tpaddedKey = append(paddedKey, key[i])\n\n\t\tif i < len(key)-1 {\n\t\t\ti++\n\t\t} else {\n\t\t\ti = 0\n\t\t}\n\t}\n\n\treturn string(paddedKey)\n}", "func PadRigtht(s string, padStr string, totalLen int) string {\n\tvar padCountInt int\n\tpadCountInt = 1 + ((totalLen - len(padStr)) / len(padStr))\n\tvar retStr = s + strings.Repeat(padStr, padCountInt)\n\treturn retStr[:totalLen]\n}", "func ParagraphPadding(paragraph string, padding int) string {\n\tparts := strings.Split(paragraph, \"\\n\")\n\tps := fmt.Sprintf(\"%\"+strconv.Itoa(padding)+\"s\", \" \")\n\n\tfor i := range parts {\n\t\tparts[i] = ps + parts[i]\n\t}\n\n\treturn strings.Join(parts, \"\\n\")\n}", "func (p *Packet) AvPacketNewSideData(t AvPacketSideDataType, s int) *uint8 {\n\treturn (*uint8)(C.av_packet_new_side_data((*C.struct_AVPacket)(p), (C.enum_AVPacketSideDataType)(t), C.int(s)))\n}", "func (d *DecimalAlign) Pad(v interface{}) string {\n\tvar lp int\n\tif s, ok := v.(string); ok {\n\t\t// If a string then look for \".\".\n\t\t// If found then lp=num chars before but excluding it.\n\t\t// If not found then use length of string\n\t\tlp = strings.Index(s, \".\")\n\t\tif lp < 0 {\n\t\t\tlp = len(s)\n\t\t}\n\t} else {\n\t\tvf, _ := util.ToFloat64(v)\n\t\tlp = len(fmt.Sprintf(\"%.0f\", vf))\n\t}\n\treturn strconv.Itoa(d.lp - lp)\n}", "func (a *AdditionalGUTI) SetLen(len uint16) {}", "func newUInt8(value uint8) RootType {\n return &UInt8 { value }\n}", "func (f EncodedAllocTextLenField) Tag() quickfix.Tag { return tag.EncodedAllocTextLen }", "func padWithSpace(source string, prefix, suffix int) string {\n\tif source == \"\" {\n\t\treturn source\n\t}\n\n\treturn strings.Repeat(\" \", prefix) + source + strings.Repeat(\" \", suffix)\n}", "func (pkt *Packet) MarshalTlv() (typ uint32, value []byte, e error) {\n\tpayload, e := pkt.encodeL3()\n\tif e != nil {\n\t\treturn 0, nil, e\n\t}\n\treturn tlv.EncodeTlv(an.TtLpPacket, tlv.MakeElement(an.TtLpFragment, payload))\n}", "func pad(s string, w int, c int) string {\n\tif w <= len(s) {\n\t\treturn s\n\t}\n\treturn s + strings.Repeat(string(c), w-len(s))\n}", "func padWithSpace(source string, prefix, suffix int) string {\n\tif source == \"\" {\n\t\treturn source\n\t}\n\treturn strings.Repeat(\" \", prefix) + source + strings.Repeat(\" \", suffix)\n}", "func newParagraph(initText string, border bool, location int, wid int, ht int) *widgets.Paragraph {\n\tp := widgets.NewParagraph()\n\tp.Text = initText\n\tp.Border = border\n\tp.SetRect(0, location, wid, location+ht)\n\tp.TextStyle.Fg = ui.ColorWhite\n\treturn p\n}", "func StaticPAD(t *testing.T, ad AssocData) *PAD {\n\tpad, err := NewPAD(ad, staticSigningKey, staticVRFKey, 10)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tstr := NewSTR(pad.signKey, pad.ad, staticTree(t), 0, []byte{})\n\tpad.latestSTR = str\n\tpad.snapshots[0] = pad.latestSTR\n\treturn pad\n}", "func PKCS7Pad(bytes []byte, alignAt int) []byte {\n\t//The PKCS #7 padding string consists of a sequence of bytes, each of which is equal to the total number of padding bytes added.\n\tvar padding []byte\n\t//padLen := alignAt - len(bytes)\n\tpadLen := alignAt - (len(bytes) % alignAt)\n\tswitch padLen {\n\tcase 0:\n\t\tpadding = []byte(\"\")\n\tcase 1:\n\t\tpadding = []byte(\"\\x01\")\n\tcase 2:\n\t\tpadding = []byte(\"\\x02\\x02\")\n\tcase 3:\n\t\tpadding = []byte(\"\\x03\\x03\\x03\")\n\tcase 4:\n\t\tpadding = []byte(\"\\x04\\x04\\x04\\x04\")\n\tcase 5:\n\t\tpadding = []byte(\"\\x05\\x05\\x05\\x05\\x05\")\n\tcase 6:\n\t\tpadding = []byte(\"\\x06\\x06\\x06\\x06\\x06\\x06\")\n\tcase 7:\n\t\tpadding = []byte(\"\\x07\\x07\\x07\\x07\\x07\\x07\\x07\")\n\tdefault:\n\t\tlog.Fatalf(\"PKCS7Pad unhandled: %v\", padLen)\n\t}\n\n\tvar paddedBytes []byte\n\tpaddedBytes = append(paddedBytes, bytes...)\n\tpaddedBytes = append(paddedBytes, padding...)\n\n\treturn paddedBytes\n}", "func (a PrefixAnnouncement) MarshalTlv() (typ uint32, value []byte, e error) {\n\tvalidityType, validityValue, _ := a.ValidityPeriod.MarshalTlv()\n\treturn tlv.EncodeTlv(an.TtContent, tlv.MakeElementNNI(an.MgmtExpirationPeriod, a.ExpirationPeriod), tlv.MakeElement(validityType, validityValue))\n}", "func paddedLength(x int) int {\n\treturn (x + 0xf) & -0x10\n}", "func (ctx *Ciphertext) CopyNew() BfvElement {\n\n\tctxCopy := new(Ciphertext)\n\n\tctxCopy.value = make([]*ring.Poly, ctx.Degree()+1)\n\tfor i := range ctx.value {\n\t\tctxCopy.value[i] = ctx.value[i].CopyNew()\n\t}\n\tctxCopy.bfvcontext = ctx.bfvcontext\n\tctxCopy.isNTT = ctx.isNTT\n\n\treturn ctxCopy\n}", "func NewPacketWithParamsBuilder(prefix string, precision int64, allowed map[string]bool) PacketBuilder {\n\t// Preparing prefix\n\tif len(prefix) > 0 && prefix[len(prefix)-1] != '.' {\n\t\tprefix += \".\"\n\t}\n\n\treturn &packetBuilder{\n\t\tBuffer: bytes.NewBuffer(nil),\n\t\tprefix: []byte(prefix),\n\t\tprecision: precision,\n\t\tparamsAllowed: true,\n\t\tparamsMap: allowed,\n\t\twasParams: false,\n\t}\n}", "func addBase64Padding(value string) string {\r\n\tm := len(value) % 4\r\n\tif m != 0 {\r\n\t\tvalue += strings.Repeat(\"=\", 4-m)\r\n\t}\r\n\r\n\treturn value\r\n}" ]
[ "0.5740221", "0.56109005", "0.5459555", "0.5429105", "0.5415254", "0.526596", "0.52202696", "0.5168666", "0.50730824", "0.49269283", "0.49154794", "0.49023673", "0.48499796", "0.48332152", "0.48277208", "0.48276848", "0.47980988", "0.47889626", "0.4785528", "0.47746792", "0.47660834", "0.47569233", "0.47372085", "0.47322044", "0.47315913", "0.4714595", "0.46604815", "0.46517277", "0.46456826", "0.46370992", "0.46225822", "0.4611554", "0.46043146", "0.46022332", "0.45933604", "0.45925584", "0.45864773", "0.45835057", "0.45812842", "0.45337078", "0.4530104", "0.4487057", "0.44835407", "0.44745365", "0.44741526", "0.44728157", "0.44687247", "0.4468432", "0.44587722", "0.44510987", "0.44452825", "0.444356", "0.44217145", "0.43992713", "0.43954343", "0.43709177", "0.43685776", "0.43647432", "0.43638912", "0.43566692", "0.43566692", "0.43542787", "0.4340523", "0.43390906", "0.43357712", "0.4325286", "0.43243235", "0.43243235", "0.43214324", "0.43154058", "0.43132848", "0.43067202", "0.43051746", "0.4303492", "0.42968255", "0.42738673", "0.42648274", "0.42609757", "0.42603812", "0.42588234", "0.42499387", "0.42487365", "0.42465657", "0.42433038", "0.42289189", "0.42280567", "0.4215983", "0.42131802", "0.42055517", "0.42043337", "0.4197372", "0.41943455", "0.4187746", "0.4187728", "0.41797134", "0.41771948", "0.4150901", "0.41483936", "0.41411614", "0.41382885" ]
0.7591314
0
Type gets the type of the TLV
func (p *PaddingTLV) Type() uint8 { return p.TLVType }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (t TLVHead) Type() TLVType {\n\treturn t.TLVType\n}", "func (b *Block) Type() uint32 {\n\treturn b.tlvType\n}", "func (bs endecBytes) Type() byte {\n\treturn bs[0] >> 4\n}", "func (b *Block) Type() string {\n\ttypeNameObj := b.typeName.content.(*identifier)\n\treturn string(typeNameObj.token.Bytes)\n}", "func (data *Instance) Type() Value {\n\treturn data.TypeTag\n}", "func (e *Element) Type() string {\n\treturn e.attrs.Get(\"type\")\n}", "func (a *AudioSampleEntryBox) Type() string {\n\treturn a.name\n}", "func Type(data []byte) (string, error) {\n\tmsg := struct {\n\t\tType string `json:\"msgtype\"`\n\t}{}\n\tif err := json.Unmarshal(data, &msg); err != nil {\n\t\treturn \"\", err\n\t}\n\treturn msg.Type, nil\n}", "func (c *Chunk) Type() string {\n\tvar bf bytes.Buffer\n\tbf.Write(c.typ)\n\treturn bf.String()\n}", "func Type() *dataType {\n\treturn &dataType{str: field.StringType()}\n}", "func (o *Wireless) GetType() string {\n\tif o == nil || o.Type == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Type\n}", "func (o *Ga4ghChemotherapy) GetType() string {\n\tif o == nil || o.Type == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Type\n}", "func (m *MdiaBox) Type() string {\n\treturn \"mdia\"\n}", "func (sun Suncoin) Type() string {\n\treturn Type\n}", "func (o *KubernetesEthernetMatcher) GetType() string {\n\tif o == nil || o.Type == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Type\n}", "func (m *Device) GetType() (val string, set bool) {\n\tif m.Type == nil {\n\t\treturn\n\t}\n\n\treturn *m.Type, true\n}", "func (this *NowStr) Type() value.Type { return value.STRING }", "func (element *Element) Type(value string) *Element {\n\treturn element.Attr(\"type\", value)\n}", "func (d UserData) Type() string {\n\tval := d.ModelData.Get(models.NewFieldName(\"Type\", \"type\"))\n\tif !d.Has(models.NewFieldName(\"Type\", \"type\")) {\n\t\treturn *new(string)\n\t}\n\treturn val.(string)\n}", "func (this *ClockStr) Type() value.Type { return value.STRING }", "func (i ID) Type() string {\n\tstuff := strings.Split(i.String(), \"/\")\n\treturn stuff[0]\n}", "func (t Type) Type() string {\n\treturn t.typeName\n}", "func (this *Element) Type() value.Type { return value.JSON }", "func (d *Decoder) Type() (Type, error) {\n\n\t// start with 1 byte and append to it until we get a clean varint\n\tvar (\n\t\ttag uint64\n\t\ttagBytes []byte\n\t)\n\nreadTagByte:\n\tfor {\n\t\tvar singleByte = make([]byte, 1)\n\t\t_, err := io.ReadFull(d.input, singleByte)\n\t\tif err != nil {\n\t\t\treturn typeUninited, err\n\t\t}\n\t\ttagBytes = append(tagBytes, singleByte[0])\n\n\t\tvar byteCount int\n\t\ttag, byteCount = varint.ConsumeVarint(tagBytes)\n\t\tswitch {\n\t\tcase byteCount == varint.ErrCodeTruncated:\n\t\t\tcontinue readTagByte\n\t\tcase byteCount > 0:\n\t\t\tfmt.Fprintln(dbg, \"\\tvarint byteCount:\", byteCount)\n\t\t\tbreak readTagByte // we got a varint!\n\t\tdefault:\n\t\t\treturn typeUninited, fmt.Errorf(\"bipf: broken varint tag field\")\n\t\t}\n\t}\n\n\tfmt.Fprintf(dbg, \"\\tdecoded %x to tag: %d\\n\", tagBytes, tag)\n\n\t// apply mask to get type\n\td.currentType = Type(tag & tagMask)\n\tif d.currentType >= TypeReserved {\n\t\treturn 0, fmt.Errorf(\"bipf: invalid type: %s\", d.currentType)\n\t}\n\n\t// shift right to get length\n\td.currentLen = uint64(tag >> tagSize)\n\n\t// drop some debugging info\n\tfmt.Fprintln(dbg, \"\\tvalue type:\", d.currentType)\n\tfmt.Fprintln(dbg, \"\\tvalue length:\", d.currentLen)\n\tfmt.Fprintln(dbg)\n\tdbg.Sync()\n\n\treturn d.currentType, nil\n}", "func (obj *SObject) Type() string {\n\tattributes := obj.AttributesField()\n\tif attributes == nil {\n\t\treturn \"\"\n\t}\n\treturn attributes.Type\n}", "func (myOperatingSystemType *OperatingSystemType) Type() (param string) {\n\treturn myOperatingSystemType.Typevar\n}", "func (t tag) GetType() TagType {\n return t.Tagtype\n}", "func (a *AlienVault) Type() string {\n\treturn a.SourceType\n}", "func TypeOf(data []byte) Type {\n\tt := data[0]\n\n\t// FIXME: add additional validation\n\n\tswitch {\n\tdefault:\n\t\treturn invalid\n\tcase t == 'i':\n\t\treturn integer\n\tcase t >= '0' && t <= '9':\n\t\treturn str\n\tcase t == 'l':\n\t\treturn list\n\tcase t == 'd':\n\t\treturn dictionary\n\t}\n}", "func (e *Huobi) GetType() string {\n\treturn e.option.Type\n}", "func (o GuestOsFeatureResponseOutput) Type() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GuestOsFeatureResponse) string { return v.Type }).(pulumi.StringOutput)\n}", "func (this *Value) Type() int {\n\treturn this.parsedType\n}", "func (t TypeField) getType() string {\n\treturn t.IsType\n}", "func (n Name) Type() string {\n\t_, t := n.GetLookupAndType()\n\treturn t\n}", "func (def TypeDefinition) Type() Type {\n\treturn def.theType\n}", "func (t *TOMLParser) Type() string {\n\treturn \"TOML\"\n}", "func (b baseValue) Type() string {\n\treturn string(b.flagType)\n}", "func (t *Token) Type() string {\n\tswitch {\n\tcase strings.EqualFold(t.TokenType, \"bearer\"):\n\t\treturn \"Bearer\"\n\tcase strings.EqualFold(t.TokenType, \"mac\"):\n\t\treturn \"MAC\"\n\tcase strings.EqualFold(t.TokenType, \"basic\"):\n\t\treturn \"Basic\"\n\tcase t.TokenType != \"\":\n\t\treturn t.TokenType\n\tdefault:\n\t\treturn \"Bearer\"\n\t}\n}", "func (n *piName) Type() Type {\n\treturn n.t\n}", "func (wlt Wallet) GetType() string {\n\treturn wlt.Meta[\"type\"]\n}", "func (g BasicPacket) Type() uint8 {\n\treturn g.header.Type\n}", "func (o *SecretValue) GetType() string {\n\tif o == nil || o.Type == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Type\n}", "func (player *Athelete) GetType() string {\n\treturn player.LMType\n}", "func (this *ObjectLength) Type() value.Type { return value.NUMBER }", "func (this *ObjectLength) Type() value.Type { return value.NUMBER }", "func (o *PrivilegedData) GetType() Secrettypes {\n\tif o == nil {\n\t\tvar ret Secrettypes\n\t\treturn ret\n\t}\n\n\treturn o.Type\n}", "func (vl WildCardValue) GetType() int {\n\tpanic(\"Invalid particle type: WildCard\")\n}", "func (msg *RegisterTMRequest) Type() uint16 {\n\treturn TypeRegClt\n}", "func (this *Self) Type() value.Type { return value.JSON }", "func (word ControlWord) Type() ControlType {\n\treturn ControlType((uint32(word) >> 17) & 0x7)\n}", "func (m *EnumLiteral) Type() Type {\n\treturn m.wtype\n}", "func (s *StringEnum) Type() string { return \"string\" }", "func (r Resource) Type() string {\n\treturn r.typ\n}", "func (o MfaPingidOutput) Type() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *MfaPingid) pulumi.StringOutput { return v.Type }).(pulumi.StringOutput)\n}", "func (o FieldResponseOutput) Type() pulumi.StringOutput {\n\treturn o.ApplyT(func(v FieldResponse) string { return v.Type }).(pulumi.StringOutput)\n}", "func (a *Alphabet) Type() string {\n\treturn a.t\n}", "func (a *Alphabet) Type() string {\n\treturn a.t\n}", "func (value *Value) Type() Type {\n\treturn value.valueType\n}", "func (o *SingleSelectFieldField) GetType() string {\n\tif o == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\n\treturn o.Type\n}", "func (t *StringDataType) Type() interface{} {\n\treturn \"\"\n}", "func (f Flags) Type() MessageType { return MessageType(f & 0xe0) }", "func (o *UiNodeInputAttributes) GetType() string {\n\tif o == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\n\treturn o.Type\n}", "func (ft *FieldType) GetType() byte {\n\tif ft.array {\n\t\treturn mysql.TypeJSON\n\t}\n\treturn ft.tp\n}", "func (inst *InstFMul) Type() types.Type {\n\t// Cache type if not present.\n\tif inst.Typ == nil {\n\t\tinst.Typ = inst.X.Type()\n\t}\n\treturn inst.Typ\n}", "func (m *LengthtimeMutation) Type() string {\n\treturn m.typ\n}", "func GetType() string {\n\treturn vboxmanage.GetVMProperty(boxName, \"boxType\")\n}", "func (s *Swift) Type() Type {\n\tif hasBranchCode(s.value) {\n\t\treturn Type11\n\t}\n\treturn Type8\n}", "func (o FieldOutput) Type() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v Field) *string { return v.Type }).(pulumi.StringPtrOutput)\n}", "func (vl BytesValue) GetType() int {\n\treturn ParticleType.BLOB\n}", "func (vl BytesValue) GetType() int {\n\treturn ParticleType.BLOB\n}", "func (o CustomClrSerializationOutput) Type() pulumi.StringOutput {\n\treturn o.ApplyT(func(v CustomClrSerialization) string { return v.Type }).(pulumi.StringOutput)\n}", "func (v *VInteger) Type() string {\n\treturn \"integer\"\n}", "func (r Resource) Type() string {\n\treturn r.terraformType\n}", "func (n *SoupNode) Type() string {\n\treturn \"SoupNode\"\n}", "func (ftp *FTP) Type(t TypeCode) error {\n\t_, err := ftp.cmd(StatusOK, \"TYPE %s\", t)\n\treturn err\n}", "func (o *EquipmentBaseSensor) GetType() string {\n\tif o == nil || o.Type == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Type\n}", "func (script *Script) Type(name string) (Type, bool) {\n\tgslangType, ok := script.types[name]\n\n\treturn gslangType, ok\n}", "func (vl HLLValue) GetType() int {\n\treturn ParticleType.HLL\n}", "func (fd FileDescriptor) Type() byte {\n\treturn fd.TypeAndNameLength >> 4\n}", "func (f *FlagChoice) Type() string {\n\treturn choiceList(f.choices...)\n}", "func (attr *Attribute) Type() Type {\n\treturn attr.typ\n}", "func (Av1C) GetType() gomp4.BoxType {\n\treturn BoxTypeAv1C()\n}", "func (l *CacheMode) Type() string {\n\treturn \"string\"\n}", "func (a ValueNode) GetType() string {\n\treturn \"ValueNode\"\n}", "func (l settableString) Type() string { return \"<string>\" }", "func (o *DeviceParameterValue) GetType() string {\n\tif o == nil || o.Type == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Type\n}", "func (n *node) Type() string {\n\tif n.nodeType == \"\" {\n\t\treturn \"node\"\n\t}\n\treturn n.nodeType\n}", "func (s *Device) GetType() DeviceType {\n\tif s == nil {\n\t\treturn \"\"\n\t}\n\treturn s.Type\n}", "func (t *TreeEntry) GetType() string {\n\tif t == nil || t.Type == nil {\n\t\treturn \"\"\n\t}\n\treturn *t.Type\n}", "func (o *ShowSystem) GetType() string {\n\tif o == nil || IsNil(o.Type) {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Type\n}", "func (f *receiverFactory) Type() string {\n\treturn typeStr\n}", "func (o CustomClrSerializationResponseOutput) Type() pulumi.StringOutput {\n\treturn o.ApplyT(func(v CustomClrSerializationResponse) string { return v.Type }).(pulumi.StringOutput)\n}", "func (ns NodeSolver) GetType() string {\n\treturn ns.MType\n}", "func (pr *PasswordRecord) GetType() string {\n\treturn pr.Type\n}", "func (b *Binary) Type() Type {\n\treturn b.file.Type()\n}", "func (_this *Report) Type() string {\n\tvar ret string\n\tvalue := _this.Value_JS.Get(\"type\")\n\tret = (value).String()\n\treturn ret\n}", "func (t *VLDbl) TypeName() string {\n\treturn \"VLDbl\"\n}", "func (s *Struct) Type() Type {\n\treturn s.ty\n}", "func (vl StringValue) GetType() int {\n\treturn ParticleType.STRING\n}", "func (vl StringValue) GetType() int {\n\treturn ParticleType.STRING\n}" ]
[ "0.7792124", "0.7596322", "0.67068475", "0.66240877", "0.65851456", "0.65079874", "0.6467992", "0.6432543", "0.6431359", "0.6421504", "0.6419574", "0.6413628", "0.64031136", "0.6390112", "0.6384129", "0.636459", "0.6359154", "0.635645", "0.6353648", "0.634136", "0.6336353", "0.6330426", "0.6328633", "0.6327224", "0.6326617", "0.6319665", "0.6318002", "0.6313233", "0.63084155", "0.63082945", "0.6307831", "0.6288007", "0.6277575", "0.62701315", "0.6232474", "0.6230613", "0.6228388", "0.6218027", "0.6210668", "0.61996245", "0.6190296", "0.6183362", "0.61767024", "0.6156925", "0.6156925", "0.61444134", "0.61370856", "0.61341566", "0.6133814", "0.61264", "0.61250806", "0.612458", "0.6114415", "0.6114347", "0.6112405", "0.61104417", "0.61104417", "0.6109444", "0.61058056", "0.6101233", "0.609513", "0.60910124", "0.60765994", "0.60759676", "0.6057914", "0.60557383", "0.6050956", "0.60401917", "0.6034634", "0.6034634", "0.6033679", "0.60334826", "0.60288244", "0.6026174", "0.60244304", "0.60240054", "0.6021495", "0.60205525", "0.6018822", "0.6018734", "0.6015182", "0.6009802", "0.60083044", "0.5999583", "0.599596", "0.599508", "0.59919673", "0.59913987", "0.59896743", "0.5986365", "0.5985054", "0.5984181", "0.5979027", "0.5976231", "0.5974701", "0.59740335", "0.59732145", "0.5967805", "0.59667945", "0.59667945" ]
0.6860522
2
Length gets the length of the TLV
func (p *PaddingTLV) Length() uint8 { return p.TLVLength }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (a *AdditionalGUTI) GetLen() (len uint16) {}", "func (h *ExtendedHeader) GetLength(_ protocol.VersionNumber) protocol.ByteCount {\n\tlength := 1 /* type byte */ + 4 /* version */ + 1 /* dest conn ID len */ + protocol.ByteCount(h.DestConnectionID.Len()) + 1 /* src conn ID len */ + protocol.ByteCount(h.SrcConnectionID.Len()) + protocol.ByteCount(h.PacketNumberLen) + 2 /* length */\n\tif h.Type == protocol.PacketTypeInitial {\n\t\tlength += quicvarint.Len(uint64(len(h.Token))) + protocol.ByteCount(len(h.Token))\n\t}\n\treturn length\n}", "func (a *RequestedNSSAI) GetLen() (len uint8) {}", "func Length(pkt *packet.Packet) uint8 {\n\treturn uint8(pkt[4])\n}", "func (bs endecBytes) Length() uint32 {\n\treturn uint32(len(bs))\n}", "func (adaType *AdaSuperType) Length() uint32 {\n\treturn adaType.length\n}", "func (vp *baseVectorParty) GetLength() int {\n\treturn vp.length\n}", "func (com *hornComponent) Length() int {\n\treturn 222\n}", "func (c *Certificate) Length() (length int) {\n\tlength = c.leng.Int()\n\treturn\n}", "func (rcv *FieldNode) Length() int32 {\n\treturn rcv._tab.GetInt32(rcv._tab.Pos + flatbuffers.UOffsetT(0))\n}", "func (packet *RadiusPacket) GetLength() uint16 {\n\n\treturn packet.length\n\n}", "func (t *dataType) Length(n int) *dataType {\n\tt.str.Length(n)\n\treturn t\n}", "func (t *BaseType) GetLength(v *Value) int32 {\n\treturn -1\n}", "func (p *Packet) GetLength() int {\n\treturn p.Length\n}", "func (sd *PrivateDescriptor) length() int {\n\tlength := 32 // identifier\n\tlength += len(sd.PrivateBytes) * 8 // private_bytes\n\treturn length / 8\n}", "func (a *AvroEncoder) Length() int {\n\treturn 5 + len(a.Content)\n}", "func (hn *HeadNode) Length() int {\n\treturn hn.para[0]\n}", "func (publishBuilderEntry *PublishBuilderEntry) Length() int {\n\tpublishBuilderEntry.ensureEncoded()\n\treturn len(publishBuilderEntry.encoded)\n}", "func (v Value) Length() int {\n\tpanic(message)\n}", "func (d NSData) Length() uint64 {\n\treturn uint64(d.gen_NSData.Length())\n}", "func (p IPPacket) Length() (int, error) {\n\tswitch p.Version() {\n\tcase 4:\n\t\t{\n\t\t\tif len(p) < 4 {\n\t\t\t\treturn -1, ErrTooShort\n\t\t\t}\n\t\t\treturn int(p[2])<<4 + int(p[3]), nil\n\t\t}\n\tcase 6:\n\t\t{\n\t\t\tif len(p) < 6 {\n\t\t\t\treturn -1, ErrTooShort\n\t\t\t}\n\t\t\treturn int(p[4])<<4 + int(p[5]) + IPv6PacketHeadLen, nil\n\t\t}\n\tdefault:\n\t\t{\n\t\t\treturn -1, ErrIPPacketBadVersion\n\t\t}\n\t}\n\treturn -1, nil\n}", "func (a *ReplayedS1UESecurityCapabilities) GetLen() (len uint8) {\n\treturn a.Len\n}", "func (vstr VarString) Length() int {\n\tlength := VarInt(len(vstr))\n\treturn length.Length() + len(vstr)\n}", "func (a *MobileIdentity) GetLen() (len uint16) {\n\treturn a.Len\n}", "func (e *ExtensionField) Len() uint8 {\n\treturn uint8(len(e.Value))\n}", "func (node *GoValueNode) Length() (int, error) {\n\tif node.IsArray() || node.IsMap() || node.IsString() {\n\n\t\treturn node.thisValue.Len(), nil\n\t}\n\n\treturn 0, fmt.Errorf(\"this node identified as \\\"%s\\\" is not referencing an array, slice, map or string\", node.IdentifiedAs())\n}", "func (info *Metadata) Length() int {\n\treturn info.length\n}", "func (a *SMPDUDNRequestContainer) GetLen() (len uint8) {\n\treturn a.Len\n}", "func (s byLength) Len() int {\n return len(s)\n}", "func (s byLength) Len() int{\n\treturn len(s)\n}", "func (a *EmergencyNumberList) GetLen() (len uint8) {}", "func (h *PublicHeader) GetLength(pers protocol.Perspective) (protocol.ByteCount, error) {\n\tif h.VersionFlag && h.ResetFlag {\n\t\treturn 0, errResetAndVersionFlagSet\n\t}\n\n\tif h.VersionFlag && pers == protocol.PerspectiveServer {\n\t\treturn 0, errGetLengthNotForVersionNegotiation\n\t}\n\n\tlength := protocol.ByteCount(1) // 1 byte for public flags\n\n\tif h.hasPacketNumber(pers) {\n\t\tif h.PacketNumberLen != protocol.PacketNumberLen1 && h.PacketNumberLen != protocol.PacketNumberLen2 && h.PacketNumberLen != protocol.PacketNumberLen4 && h.PacketNumberLen != protocol.PacketNumberLen6 {\n\t\t\treturn 0, errPacketNumberLenNotSet\n\t\t}\n\t\tlength += protocol.ByteCount(h.PacketNumberLen)\n\t}\n\n\tif !h.TruncateConnectionID {\n\t\tlength += 8 // 8 bytes for the connection ID\n\t}\n\n\t// Version Number in packets sent by the client\n\tif h.VersionFlag {\n\t\tlength += 4\n\t}\n\n\tlength += protocol.ByteCount(len(h.DiversificationNonce))\n\n\t// If Multipath flag is set, the PathID is present\n\tif h.MultipathFlag {\n\t\tlength += 1\n\t}\n\n\treturn length, nil\n}", "func (f *PingFrame) Length(_ protocol.VersionNumber) protocol.ByteCount {\n\treturn 1\n}", "func (ac *AudioChannel) length() int {\n\tlength := 8 // component_tag\n\tlength += 24 // iso_code\n\tlength += 3 // bit_stream_mode\n\tlength += 4 // num_channels\n\tlength++ // full_srvc_audio\n\treturn length / 8\n}", "func len(v Type) int32 {}", "func (tbs *TSBlobStore) Length(dataSourceID string) (int, error) {\n\tpath := \"/ts/blob/\" + dataSourceID + \"/length\"\n\n\tresp, getErr := tbs.csc.read(path, tbs.contentType)\n\tif getErr != nil {\n\t\treturn 0, getErr\n\t}\n\n\ttype legnthResult struct {\n\t\tLength int `json:\"length\"`\n\t}\n\n\tvar val legnthResult\n\terr := json.Unmarshal(resp, &val)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn val.Length, nil\n}", "func (s *Scorers) GetLength() int {\n\treturn s.length\n}", "func (f RawDataLengthField) Tag() quickfix.Tag { return tag.RawDataLength }", "func (s byLength) Len() int {\n\treturn len(s)\n}", "func (s byLength) Len() int {\n\treturn len(s)\n}", "func (s *String) Len() int {\n\treturn 4 + len(s.Value)\n}", "func (s ByLength) Len() int {\n\treturn len(s)\n}", "func (s ByLength) Len() int {\n\treturn len(s)\n}", "func (o *FakeObject) Length() int { return reflect.ValueOf(o.Value).Len() }", "func (c *ConfigurationProtocolOption) Len() int {\r\n\treturn 3 + len(c.Contents)\r\n}", "func (f *Frame) length() int {\n\t// If payload is less than the required minimum length, we zero-pad up to\n\t// the required minimum length\n\tpl := len(f.Payload)\n\tif pl < minPayload {\n\t\tpl = minPayload\n\t}\n\n\t// Add additional length if VLAN tags are needed.\n\tvar vlanLen int\n\tswitch {\n\tcase f.ServiceVLAN != nil && f.VLAN != nil:\n\t\tvlanLen = 8\n\tcase f.VLAN != nil:\n\t\tvlanLen = 4\n\t}\n\n\t// 6 bytes: destination hardware address\n\t// 6 bytes: source hardware address\n\t// N bytes: VLAN tags (if present)\n\t// 2 bytes: EtherType\n\t// N bytes: payload length (may be padded)\n\treturn 6 + 6 + vlanLen + 2 + pl\n}", "func (sd *AudioDescriptor) length() int {\n\tlength := 32 // identifier\n\tlength += 4 // audio_count\n\tlength += 4 // reserved\n\tfor i := range sd.AudioChannels {\n\t\tlength += sd.AudioChannels[i].length() * 8\n\t}\n\treturn length / 8\n}", "func (subr *SRRecordResponse) Len() (l uint16) {\n\tencoded, _ := subr.Encode()\n\tl = uint16(len(encoded))\n\treturn l\n}", "func (r *Decoder) Len() int { x := r.Uint64(); v := int(x); assert(uint64(v) == x); return v }", "func (c Code) Length() int {\n\treturn len(c.String())\n}", "func length(v interface{}) (int, error) {\n\tswitch val := v.(type) {\n\tcase string:\n\t\treturn len(val), nil\n\tcase []interface{}:\n\t\treturn len(val), nil\n\tcase map[string]interface{}:\n\t\treturn len(val), nil\n\tdefault:\n\t\treturn -1, errors.New(\"invalid type for length\")\n\t}\n}", "func (f *fakeOpa) Len() int {\n\tf.mu.Lock()\n\tdefer f.mu.Unlock()\n\treturn len(f.data)\n}", "func (hm HashMap) Len(ctx context.Context) (int64, error) {\n\treq := newRequest(\"*2\\r\\n$4\\r\\nHLEN\\r\\n$\")\n\treq.addString(hm.name)\n\treturn hm.c.cmdInt(ctx, req)\n}", "func (e Empty) Length() int {\n\treturn len(e.underlying())\n}", "func (f *Frame) length() int {\n\t// Payload length must end on a word (4 byte) boundary.\n\t// If payload is not a multiple of 4 bytes, pad it up\n\t// to the next word boundary.\n\tpl := len(f.Payload)\n\tif r := pl % 4; r != 0 {\n\t\tpl += 4 - r\n\t}\n\n\t// 4 bytes: SOF\n\t// 24 bytes: header\n\t// N bytes: payload\n\t// 4 bytes: CRC\n\t// 4 bytes: EOF\n\treturn 4 + 24 + pl + 4 + 4\n}", "func (a *ChangeRequest) Length() int {\n\treturn 4\n}", "func (radius *RADIUS) Len() (int, error) {\n\tn := radiusMinimumRecordSizeInBytes\n\tfor _, v := range radius.Attributes {\n\t\talen, err := attributeValueLength(v.Value)\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\tn += int(alen) + 2 // Added Type and Length\n\t}\n\treturn n, nil\n}", "func (m TicketAdmission) Len() int64 {\n\treturn AssetTypeLen\n}", "func (v valuer) Len() int {\n\treturn len(v.data)\n}", "func (f BodyLengthField) Tag() quickfix.Tag { return tag.BodyLength }", "func (p Partition) Length() uint32 {\n\treturn p.RawLength & 0x00ffffff\n}", "func (myQueryData *QueryData) Length() (param int) {\n\treturn myQueryData.Lengthvar\n}", "func (bt *Tree) Length() int {\n\treturn bt.length\n}", "func Length(seq Seq) int {\n\treturn len(ToSlice(seq))\n}", "func (n Number) Length() int {\n\treturn 0\n}", "func (b *Builder) Length() *Builder {\n\tb.p.RegisterTransformation(impl.Length())\n\treturn b\n}", "func (r *TestRequest) Length() uint64 {\n\treturn 0\n}", "func (a *BackoffTimerValue) GetLen() (len uint8) {}", "func (p Packet) Len() int {\n\treturn (int(p[EapMsgLenHigh]) << 8) + int(p[EapMsgLenLow])\n}", "func (pdu *pdu) plength() int { return pdu.woff - sizeProtoHdr }", "func (list linkedList) getLength() int {\n\treturn list.length\n}", "func (f *Fields) Len() int", "func (String) Length(c *compiler.Compiler, this compiler.Expression) (expression compiler.Expression) {\n\texpression = c.NewExpression()\n\texpression.Type = Integer{}\n\texpression.Go.WriteString(`ctx.CountString(`)\n\texpression.Go.WriteB(this.Go)\n\texpression.Go.WriteString(`)`)\n\treturn expression\n}", "func (params *headerParams) Length() int {\n\treturn len(params.params)\n}", "func (a AttributeValue) size() int {\n\treturn OCTETSTRING(a).size()\n}", "func (s Section) Len() uint32 { return bytes.ReadUint32(s[2:6]) }", "func (op *OptDomainSearch) Length() int {\n\tvar length int\n\tfor _, label := range op.DomainSearch {\n\t\tlength += len(label) + 2 // add the first and the last length bytes\n\t}\n\treturn length\n}", "func (l lit) Len() int {\n\treturn 1\n}", "func (rcv *Buffer) Length() int64 {\n\treturn rcv._tab.GetInt64(rcv._tab.Pos + flatbuffers.UOffsetT(8))\n}", "func (l LDAPOID) size() int {\n\treturn OCTETSTRING(l).size()\n}", "func sectionLength(psi []byte) uint16 {\n\treturn uint16(psi[1]&3)<<8 | uint16(psi[2])\n}", "func (u *UnknownPathAttribute) WireLength() uint16 {\n\tlength := uint16(len(u.Value))\n\tif length > 255 {\n\t\tlength++ // Extended length\n\t}\n\treturn length + 3\n}", "func (d *Document) Length() uint32 {\n\treturn d.length\n}", "func (s String) Length() int {\n\treturn len(s.underlying())\n}", "func (b *Balance) Len() int { return len(b.Bytes()) }", "func (cp *ConnectPacket) Len() int {\r\n\tml := cp.len()\r\n\treturn headerLen(ml) + ml\r\n}", "func (list *LinkedList[T]) Length() uint {\n\tlist.key.RLock()\n\tdefer list.key.RUnlock()\n\n\treturn list.length\n}", "func (s *SliceOfUint16) Len() int {\n\treturn len(s.items)\n}", "func (body Body) Len() int { return int(body.size) }", "func (p *PCOPayload) Len() int {\r\n\tl := 1\r\n\tfor _, opt := range p.ConfigurationProtocolOptions {\r\n\t\tl += opt.Len()\r\n\t}\r\n\r\n\treturn l\r\n}", "func (l *ListHT) Len() uint64 {\n\treturn l.lt.len\n}", "func (e *metaMultiArgElement) length() int {\n\tif e.size >= 0 {\n\t\treturn e.size\n\t}\n\treturn 1\n}", "func Length(i interface{}) (l int, ok bool) {\n\tv, k := preprocess(i)\n\tswitch k {\n\tcase reflect.Map, reflect.Array, reflect.Slice, reflect.String:\n\t\treturn v.Len(), true\n\t}\n\treturn 0, false\n}", "func (m ShareCommon) Len() int64 {\n\treturn AssetTypeLen\n}", "func (b IPv4Header) Len() int {\n\treturn int(b[0]&0x0f) << 2\n}", "func (ai *actionItem) Length() uint64 {\n\treturn uint64(ai.hai.Extent.Length)\n}", "func (d *Decoder) getStringLength(tag byte) (int32, error) {\n\tvar (\n\t\terr error\n\t\tbuf [2]byte\n\t\tlength int32\n\t)\n\n\tswitch {\n\tcase tag >= BC_STRING_DIRECT && tag <= STRING_DIRECT_MAX:\n\t\treturn int32(tag - 0x00), nil\n\n\tcase tag >= 0x30 && tag <= 0x33:\n\t\t_, err = io.ReadFull(d.reader, buf[:1])\n\t\tif err != nil {\n\t\t\treturn -1, perrors.WithStack(err)\n\t\t}\n\n\t\tlength = int32(tag-0x30)<<8 + int32(buf[0])\n\t\treturn length, nil\n\n\tcase tag == BC_STRING_CHUNK || tag == BC_STRING:\n\t\t_, err = io.ReadFull(d.reader, buf[:2])\n\t\tif err != nil {\n\t\t\treturn -1, perrors.WithStack(err)\n\t\t}\n\t\tlength = int32(buf[0])<<8 + int32(buf[1])\n\t\treturn length, nil\n\n\tdefault:\n\t\treturn -1, perrors.WithStack(err)\n\t}\n}", "func (h Handle) Len() int {\n\tl := 8 + 8 + 4 + len(h.Type) + len(h.Name)\n\tif h.MD != nil {\n\t\tswitch h.MD.(type) {\n\t\tcase *AlpcPortInfo:\n\t\t\tl += 16\n\t\tcase *MutantInfo:\n\t\t\tl += 5\n\t\tcase *FileInfo:\n\t\t\tl++\n\t\t}\n\t}\n\treturn l\n}", "func (t *T) Len() int {\n\treturn t.words\n}", "func Len(input []byte) int {\n\treturn len(input) * 8\n}" ]
[ "0.7318795", "0.7001363", "0.6942401", "0.6891273", "0.6878425", "0.68543977", "0.6744803", "0.6739835", "0.6715174", "0.67149013", "0.6713755", "0.66523576", "0.66506857", "0.66475517", "0.6636951", "0.6624504", "0.65799433", "0.6557597", "0.65531355", "0.65423506", "0.65261734", "0.65092033", "0.6503729", "0.649907", "0.6497043", "0.64797723", "0.6473859", "0.6468216", "0.6458335", "0.6437953", "0.64357513", "0.6433186", "0.64271915", "0.6424686", "0.6402481", "0.6398003", "0.6383301", "0.63780105", "0.63735193", "0.63735193", "0.63499963", "0.63359493", "0.63359493", "0.6316255", "0.63150424", "0.6312005", "0.63064224", "0.62757057", "0.6275652", "0.62615764", "0.6257004", "0.6222057", "0.6219018", "0.62117773", "0.62017053", "0.6198249", "0.6195158", "0.6194995", "0.6194428", "0.6191727", "0.6187948", "0.61776435", "0.6168275", "0.6155285", "0.6154558", "0.6153369", "0.6149751", "0.61492825", "0.6143188", "0.61415696", "0.6139152", "0.6138682", "0.61215615", "0.6113661", "0.6111441", "0.6106697", "0.6105462", "0.6105064", "0.61024743", "0.61018384", "0.60960275", "0.60937244", "0.6079602", "0.60793567", "0.6072155", "0.60702026", "0.6059909", "0.60571367", "0.6055927", "0.60548556", "0.6053191", "0.6046648", "0.6040999", "0.6033847", "0.6028705", "0.60275114", "0.6019811", "0.6018816", "0.60173523", "0.6011021" ]
0.7524238
0
Value gets the TLV itself
func (p *PaddingTLV) Value() interface{} { return p }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (o *SmppTlv) GetValue() string {\n\tif o == nil || o.Value == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Value\n}", "func (o ThingTypeTagOutput) Value() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ThingTypeTag) string { return v.Value }).(pulumi.StringOutput)\n}", "func (b *baseSemanticUTF8String) Value() interface{} {\n\treturn b.V\n}", "func (o unicodeVersion) GetValue() interface{} {\n\treturn string(o)\n}", "func (f *TagField) Value() string {\n\treturn f.value\n}", "func (t *SubStructTag) Value() string {\n\toptions := strings.Join(t.Options, \",\")\n\tif options != \"\" {\n\t\treturn fmt.Sprintf(`%s,%s`, t.Name, options)\n\t}\n\treturn t.Name\n}", "func (o SignalingChannelTagOutput) Value() pulumi.StringOutput {\n\treturn o.ApplyT(func(v SignalingChannelTag) string { return v.Value }).(pulumi.StringOutput)\n}", "func (t *Token) Value() string {\n\treturn t.strBuilder.String()\n}", "func (o *SmppTlv) GetValueOk() (*string, bool) {\n\tif o == nil || o.Value == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Value, true\n}", "func (o CaCertificateTagOutput) Value() pulumi.StringOutput {\n\treturn o.ApplyT(func(v CaCertificateTag) string { return v.Value }).(pulumi.StringOutput)\n}", "func (v Uint) Value() interface{} {\n\tif !v.Valid() {\n\t\treturn nil\n\t}\n\treturn v.Uint\n}", "func (ubt *ubtTree) Value() interface{} {\n\treturn ubt.value\n}", "func (f *Title) Value() string {\n\ts := decode.UTF16(f.data)\n\treturn trim.Nil(s)\n}", "func (this *ClockStr) Value() value.Value {\n\treturn nil\n}", "func (decoder *berDecoder) decodeValue() (snmpBlockType, interface{}, error) {\n\tvalueType, valueLength, err := decoder.decodeHeader()\n\tif err != nil {\n\t\treturn 0, nil, fmt.Errorf(\"Unable to decode value header at pos %d - err: %s\", decoder.pos, err)\n\t}\n\tvar value interface{}\n\tswitch valueType {\n\tcase snmpBlockType_INTEGER:\n\t\tvalue, err = decoder.decodeInteger(valueLength)\n\tcase snmpBlockType_BIT_STRING:\n\t\tvalue, err = decoder.decodeBitString(valueLength)\n\tcase snmpBlockType_OCTET_STRING:\n\t\tvalue, err = decoder.decodeOctetString(valueLength)\n\tcase snmpBlockType_NULL, snmpBlockType_NO_SUCH_OBJECT, snmpBlockType_NO_SUCH_INSTANCE, snmpBlockType_END_OF_MIB_VIEW:\n\t\tvalue = nil\n\tcase snmpBlockType_OBJECT_IDENTIFIER:\n\t\tvalue, err = decoder.decodeObjectIdentifier(valueLength)\n\tcase snmpBlockType_SEQUENCE:\n\t\treturn 0, nil, fmt.Errorf(\"Unexpected value type snmpBlockType_SEQUENCE 0x%x at pos %d\", valueType, decoder.pos)\n\tcase snmpBlockType_IP_ADDRESS:\n\t\tvalue, err = decoder.decodeIPv4Address(valueLength)\n\tcase snmpBlockType_COUNTER_32:\n\t\t// value, err = decoder.decodeCounter32(valueLength)\n\tcase snmpBlockType_GAUGE_32:\n\t\t// value, err = decoder.decodeGauge32(valueLength)\n\tcase snmpBlockType_TIME_TICKS:\n\t\t// value, err = decoder.decodeTimeTicks(valueLength)\n\tcase snmpBlockType_OPAQUE:\n\t\t// value, err = decoder.decodeOpaque(valueLength)\n\tcase snmpBlockType_COUNTER_64:\n\t\t// value, err = decoder.decodeCounter64(valueLength)\n\tcase snmpBlockType_UINT_32:\n\t\t// value, err = decoder.decodeUint32(valueLength)\n\tdefault:\n\t\treturn 0, nil, fmt.Errorf(\"Unknown value type 0x%x\", valueType)\n\t}\n\treturn valueType, value, nil\n}", "func (b *baseSemanticUTF8Base64) Value() interface{} {\n\treturn b.encoded\n}", "func (o HealthCheckTagOutput) Value() pulumi.StringOutput {\n\treturn o.ApplyT(func(v HealthCheckTag) string { return v.Value }).(pulumi.StringOutput)\n}", "func (this *record) Value() interface{} {\n\tswitch this._Type {\n\tcase sensors.OT_DATATYPE_UDEC_0:\n\t\tif value, err := this.UintValue(); err != nil {\n\t\t\treturn nil\n\t\t} else {\n\t\t\treturn value\n\t\t}\n\tcase sensors.OT_DATATYPE_UDEC_4, sensors.OT_DATATYPE_UDEC_8, sensors.OT_DATATYPE_UDEC_12, sensors.OT_DATATYPE_UDEC_16, sensors.OT_DATATYPE_UDEC_20, sensors.OT_DATATYPE_UDEC_24:\n\t\tif value, err := this.FloatValue(); err != nil {\n\t\t\treturn nil\n\t\t} else {\n\t\t\treturn value\n\t\t}\n\tcase sensors.OT_DATATYPE_STRING:\n\t\tif value, err := this.StringValue(); err != nil {\n\t\t\treturn nil\n\t\t} else {\n\t\t\treturn value\n\t\t}\n\tcase sensors.OT_DATATYPE_DEC_0:\n\t\tif value, err := this.IntValue(); err != nil {\n\t\t\treturn nil\n\t\t} else {\n\t\t\treturn value\n\t\t}\n\tcase sensors.OT_DATATYPE_DEC_8, sensors.OT_DATATYPE_DEC_16, sensors.OT_DATATYPE_DEC_24:\n\t\tif value, err := this.FloatValue(); err != nil {\n\t\t\treturn nil\n\t\t} else {\n\t\t\treturn value\n\t\t}\n\tdefault:\n\t\tif value, err := this.Data(); err != nil {\n\t\t\treturn nil\n\t\t} else {\n\t\t\treturn value\n\t\t}\n\t}\n}", "func (o AppTemplateContainerLivenessProbeHeaderOutput) Value() pulumi.StringOutput {\n\treturn o.ApplyT(func(v AppTemplateContainerLivenessProbeHeader) string { return v.Value }).(pulumi.StringOutput)\n}", "func (t Token) Value() (driver.Value, error) {\n\treturn string(t.Bytes()), nil\n}", "func (eln *EmptyLeafNode) GetValue() []byte {\n\treturn nil\n}", "func (value *Value) Value() interface{} {\n\treturn value.value\n}", "func (b *Block) Value() []byte {\n\treturn b.value\n}", "func (rcv *DynamicHeader) Value() []byte {\n\to := flatbuffers.UOffsetT(rcv._tab.Offset(6))\n\tif o != 0 {\n\t\treturn rcv._tab.ByteVector(o + rcv._tab.Pos)\n\t}\n\treturn nil\n}", "func (m Message) Value() []byte {\n\tstart, end, size := m.valueOffsets()\n\tif size == -1 {\n\t\treturn nil\n\t}\n\treturn m[start+4 : end]\n}", "func (n *Node) Value() interface{} {\n\treturn n.value\n}", "func (d *Description) Value() string {\n\tif d == nil {\n\t\treturn \"\"\n\t}\n\tif strings.HasPrefix(d.Raw, `\"\"\"`) {\n\t\treturn parseBlockString(d.Raw)\n\t}\n\treturn parseString(d.Raw)\n}", "func (o GetAppTemplateContainerLivenessProbeHeaderOutput) Value() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GetAppTemplateContainerLivenessProbeHeader) string { return v.Value }).(pulumi.StringOutput)\n}", "func (e *Element) Value() interface{} {\n\treturn e.value\n}", "func (e *Element) Value() interface{} {\n\treturn e.value\n}", "func (ur RawStringReply) Val() interface{} {\n\treturn ur.Value\n}", "func (v *VList) Value() interface{} {\n\treturn v.value\n}", "func (s *SequenceItemValue) GetValue() interface{} { return s.elements }", "func (self Param) Value() string { return self.value }", "func (o StreamTagOutput) Value() pulumi.StringOutput {\n\treturn o.ApplyT(func(v StreamTag) string { return v.Value }).(pulumi.StringOutput)\n}", "func (o DomainTagOutput) Value() pulumi.StringOutput {\n\treturn o.ApplyT(func(v DomainTag) string { return v.Value }).(pulumi.StringOutput)\n}", "func (l *Label) Value() string {\n\treturn l.value\n}", "func (node *SimpleNode) Value() string {\n\tif node == nil {\n\t\treturn \"\"\n\t}\n\n\treturn node.value\n}", "func (f *Field) Value() interface{} {\n\treturn f.value.Interface()\n}", "func (v *Value) Data() interface{} {\n return v.data\n}", "func (src Macaddr) Value() (driver.Value, error) {\n\treturn EncodeValueText(src)\n}", "func Value(value string) *SimpleElement { return newSEString(\"value\", value) }", "func (v Value) Raw() []byte {\n\treturn v.val\n}", "func (c *IssueLabel) Value() interface{} {\n\tswitch c.oneOfField {\n\tcase \"asString\":\n\t\treturn c.asString\n\tcase \"issueLabelAsObject\":\n\t\treturn c.issueLabelAsObject\n\t}\n\treturn nil\n}", "func (sval *ScalarValue) Value() string {\n\tswitch {\n\tcase strings.HasPrefix(sval.Raw, `\"\"\"`):\n\t\treturn parseBlockString(sval.Raw)\n\tcase strings.HasPrefix(sval.Raw, `\"`):\n\t\treturn parseString(sval.Raw)\n\tdefault:\n\t\treturn sval.Raw\n\t}\n}", "func (h *StabNode) Value() interface{} {\n\treturn nil\n}", "func (n *NullableGeneric) Value() generic.T {\n\treturn n.data\n}", "func (i Item) Value() interface{} {\n\treturn i.v\n}", "func (rev PlannerRevision) Value() string { return rev.value }", "func (f *Fieldx) Value() interface{} {\n\treturn f.value.Interface()\n}", "func (r *Record) Value() []byte {\n\treturn r.rd.Value\n}", "func (o TokenPasswordPassword1Output) Value() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v TokenPasswordPassword1) *string { return v.Value }).(pulumi.StringPtrOutput)\n}", "func (this *NowStr) Value() value.Value {\n\treturn nil\n}", "func (adaType *AdaSuperType) Value() (adaValue IAdaValue, err error) {\n\tif Central.IsDebugLevel() {\n\t\tCentral.Log.Debugf(\"Return super descriptor value\")\n\t}\n\tadaValue = newSuperDescriptorValue(adaType)\n\treturn\n}", "func (s *StringChecksum) Value() string {\n\treturn s.value\n}", "func (nData *NaiveData) Value() []gotypes.Value {\n\treturn nData.Val\n}", "func (o *KubernetesEthernetMatcher) GetValue() string {\n\tif o == nil || o.Value == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Value\n}", "func (it *iterator) Value() []byte {\n\treturn it.current.value\n}", "func (t Type) Value() string {\n\tstr := string(t)\n\tv, ok := builtin[str]\n\tif !ok {\n\t\treturn gocase.To(strcase.ToCamel(str))\n\t}\n\n\treturn v\n}", "func (r *Reader) Value() []byte {\n\treturn r.value\n}", "func (this *Dcmp0_Chunk_TableLookupBody) Value() (v []byte, err error) {\n\tif (this._f_value) {\n\t\treturn this.value, nil\n\t}\n\ttmp32, err := this.LookupTable()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tthis.value = []byte(tmp32[(this.Tag - 75)])\n\tthis._f_value = true\n\treturn this.value, nil\n}", "func (f Formal) Value() string {\n\treturn string(f)\n}", "func (id PlannerID) Value() string { return id.value }", "func (i *Item) Value() interface{} {\r\n\treturn i.data\r\n}", "func (t *Type) Val() *Type", "func (v *VInteger) Value() interface{} {\n\treturn v.value\n}", "func (this *Value) Value() interface{} {\n\tif this.parsedValue != nil || this.parsedType == NULL {\n\t\trv := devalue(this.parsedValue)\n\t\tif this.alias != nil {\n\t\t\toverlayAlias(rv, this.alias)\n\t\t}\n\t\treturn rv\n\t} else if this.parsedType != NOT_JSON {\n\t\terr := json.Unmarshal(this.raw, &this.parsedValue)\n\t\tif err != nil {\n\t\t\tpanic(\"unexpected parse error on valid JSON\")\n\t\t}\n\t\t// if there are any aliases, we must make a safe copy\n\t\t// and then overlay them\n\t\tif this.alias != nil {\n\t\t\t// we cannot damange the original parsed value\n\t\t\trv := safeCopy(this.parsedValue)\n\t\t\toverlayAlias(rv, this.alias)\n\t\t\treturn rv\n\t\t} else {\n\t\t\t// otherwise its safe to return directly\n\t\t\treturn this.parsedValue\n\t\t}\n\t} else {\n\t\treturn nil\n\t}\n}", "func (e Eth) Value() (driver.Value, error) {\n\treturn (utils.Big)(e).Value()\n}", "func (v *Value) String() string { return v.macaddr.String() }", "func (v *tagValuer) Value(name string) (interface{}, bool) {\n\tif value, ok := v.tags[name]; ok {\n\t\tif value == nil {\n\t\t\treturn nil, true\n\t\t}\n\t\treturn *value, true\n\t}\n\treturn nil, false\n}", "func (t TherapistType) Value() (driver.Value, error) {\n\treturn t.String(), nil\n}", "func (t *DataToken) GetValue() string {\n\treturn t.value\n}", "func (o DiagnosticBackendRequestDataMaskingHeaderOutput) Value() pulumi.StringOutput {\n\treturn o.ApplyT(func(v DiagnosticBackendRequestDataMaskingHeader) string { return v.Value }).(pulumi.StringOutput)\n}", "func (it *Iterator) Value() interface{} { return it.n.v }", "func (it *Iterator) Value() interface{} { return it.n.v }", "func (src Tag) Value() (driver.Value, error) {\n\tconst api = \"Tag.Value\"\n\n\treturn src.ID, nil\n}", "func (o DiagnosticBackendResponseDataMaskingHeaderOutput) Value() pulumi.StringOutput {\n\treturn o.ApplyT(func(v DiagnosticBackendResponseDataMaskingHeader) string { return v.Value }).(pulumi.StringOutput)\n}", "func (w *Wire) Value() WireValue {\n\treturn WireValue((w.ovnum & valueMask) >> valueShift)\n}", "func (f DefaultField) Value() interface{} {\n\treturn f.V\n}", "func (i *Iterator) Value() []byte {\n\treturn i.n.value\n}", "func (l *Lexer) Value() interface{} {\n\treturn l.value\n}", "func (s *GoObject) Value() interface{} {\n\treturn s.data\n}", "func (item *KVItem) Value() []byte {\n\titem.wg.Wait()\n\treturn item.val\n}", "func (node *GoValueNode) Value() reflect.Value {\n\n\treturn node.thisValue\n}", "func (d *decoder) value(node Node, v reflect.Value) error {\n\tu, rv := d.indirect(v)\n\tif u != nil {\n\t\tif err := u.UnmarshalMetadata(node); err != nil {\n\t\t\treturn errors.WithStack(err)\n\t\t}\n\t\treturn nil\n\t}\n\tswitch kind := rv.Type().Kind(); kind {\n\tcase reflect.String:\n\t\tif err := d.string(node, rv); err != nil {\n\t\t\treturn errors.WithStack(err)\n\t\t}\n\t\treturn nil\n\tdefault:\n\t\tpanic(fmt.Errorf(\"support for decoding into %T (%v) not yet implemented\", kind, kind))\n\t}\n}", "func (p provider) Value() interface{} {\n\treturn p.value\n}", "func (o DiagnosticFrontendRequestDataMaskingHeaderOutput) Value() pulumi.StringOutput {\n\treturn o.ApplyT(func(v DiagnosticFrontendRequestDataMaskingHeader) string { return v.Value }).(pulumi.StringOutput)\n}", "func (n *lnode) value() []byte {\n\tbuf := (*[maxAllocSize]byte)(unsafe.Pointer(n))\n\treturn buf[n.pos+n.ksize : n.pos+n.ksize+n.vsize]\n}", "func (o DiagnosticFrontendResponseDataMaskingHeaderOutput) Value() pulumi.StringOutput {\n\treturn o.ApplyT(func(v DiagnosticFrontendResponseDataMaskingHeader) string { return v.Value }).(pulumi.StringOutput)\n}", "func (e rawEntry) value(buf []byte) rawValue { return buf[e.ptr():][:e.sz()] }", "func (o HostedZoneTagOutput) Value() pulumi.StringOutput {\n\treturn o.ApplyT(func(v HostedZoneTag) string { return v.Value }).(pulumi.StringOutput)\n}", "func (it *iterator) Value() []byte {\n\tif len(it.values) > 0 {\n\t\treturn it.values[0]\n\t}\n\treturn nil\n}", "func (e REnv) Value() Value { return mrbObjValue(unsafe.Pointer(e.p)) }", "func (o ThingGroupTagOutput) Value() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ThingGroupTag) string { return v.Value }).(pulumi.StringOutput)\n}", "func (n *Node) GetValue() string {\n\treturn n.value\n}", "func (f *field) Val() interface{} {\n\treturn f.v\n}", "func (o SKUCapabilityResponseOutput) Value() pulumi.StringOutput {\n\treturn o.ApplyT(func(v SKUCapabilityResponse) string { return v.Value }).(pulumi.StringOutput)\n}", "func (t TimeValue) Value() interface{} {\n\treturn t.value\n}", "func (o *VerifiableAddress) GetValue() string {\n\tif o == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\n\treturn o.Value\n}", "func (ip IPv4) Value() string {\n\treturn ip.value\n}" ]
[ "0.71718717", "0.6518982", "0.6511803", "0.63563395", "0.6336774", "0.63253427", "0.62726206", "0.62723505", "0.6252216", "0.62233454", "0.6198178", "0.61777335", "0.6150266", "0.6144369", "0.61414146", "0.6107172", "0.6098916", "0.6090685", "0.6087054", "0.6082909", "0.6071128", "0.6045851", "0.6037178", "0.6034387", "0.6024091", "0.6021481", "0.6003124", "0.59865016", "0.59784746", "0.59784746", "0.59761167", "0.59621656", "0.59619164", "0.59553975", "0.5953996", "0.5934818", "0.5933224", "0.5908452", "0.5904715", "0.59019023", "0.5894881", "0.5879814", "0.58780926", "0.5877525", "0.58736277", "0.5863408", "0.5858885", "0.5856823", "0.5851806", "0.5850282", "0.58488846", "0.5842909", "0.58396184", "0.5831726", "0.5823126", "0.5819743", "0.5818977", "0.5812107", "0.58049095", "0.5804625", "0.5787697", "0.5786985", "0.5758535", "0.57562846", "0.57550687", "0.5753957", "0.5750599", "0.5746139", "0.5744168", "0.5741567", "0.5740976", "0.57310873", "0.5724488", "0.57229334", "0.57229334", "0.57114124", "0.5708748", "0.56983566", "0.5696805", "0.56929433", "0.56891936", "0.5689083", "0.5682954", "0.56829333", "0.5682345", "0.56762975", "0.56751406", "0.56702507", "0.56699294", "0.56618977", "0.5661585", "0.5660922", "0.5660318", "0.5656047", "0.5655396", "0.56490684", "0.56474316", "0.56441224", "0.56411463", "0.56375265" ]
0.69627863
1
Serialize serializes a padding TLV
func (p *PaddingTLV) Serialize(buf *bytes.Buffer) { buf.WriteByte(p.TLVType) buf.WriteByte(p.TLVLength) buf.Write(p.PaddingData) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (p *PaddingTLV) Length() uint8 {\n\treturn p.TLVLength\n}", "func NewPaddingTLV(length uint8) *PaddingTLV {\n\treturn &PaddingTLV{\n\t\tTLVType: PaddingType,\n\t\tTLVLength: length,\n\t\tPaddingData: make([]byte, length),\n\t}\n}", "func (g *GroupedAVP) Padding() int {\n\treturn 0\n}", "func getPadding(packetLen int) int {\n\tif packetLen%4 == 0 {\n\t\treturn 0\n\t}\n\treturn 4 - (packetLen % 4)\n}", "func pad(unpadded []byte, desiredLength int) []byte {\n\tif len(unpadded) == desiredLength {\n\t\treturn unpadded\n\t}\n\ttoAppend := desiredLength - len(unpadded)\n\treturn append(unpadded, bytes.Repeat([]byte{byte(0x00)}, toAppend)...)\n}", "func (vb *VarBytes) Serialize(w io.Writer) error {\n\tvar varlen = VarUint{UintType: GetUintTypeByValue(vb.Len), Value: vb.Len}\n\tif err := varlen.Serialize(w); err != nil {\n\t\treturn err\n\t}\n\treturn binary.Write(w, binary.LittleEndian, vb.Bytes)\n}", "func encode(p *Packet, verbose bool, logger *log.Logger) []byte {\n\tb := make([]byte, 1024)\n\tb[0] = uint8(p.Code)\n\tb[1] = p.Identifier\n\t// Skip Len for now 2+3\n\tcopy(b[4:20], p.Auth)\n\twritten := 20\n\n\tbb := b[20:]\n\tfor _, attr := range p.Attrs {\n\t\taLen := len(attr.Bytes()) + 2 // add type+len fields\n\t\tif aLen > 255 || aLen < 2 {\n\t\t\tpanic(\"Value too big for attr\")\n\t\t}\n\t\tbb[0] = uint8(attr.Type())\n\t\tbb[1] = uint8(aLen)\n\t\tcopy(bb[2:], attr.Bytes())\n\n\t\twritten += aLen\n\t\tbb = bb[aLen:]\n\t}\n\n\t// Now set Len\n\tbinary.BigEndian.PutUint16(b[2:4], uint16(written))\n\tif verbose {\n\t\tlogger.Printf(\"packet.send: \" + debug(p))\n\t}\n\treturn b[:written]\n}", "func (e *EDNS0_PADDING) Option() uint16 { return EDNS0PADDING }", "func setupPadding() {\n\n\tpaddingMap[0] = \"10101010101010101010101010101010\"\n\tpaddingMap[1] = \"0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f\"\n\tpaddingMap[2] = \"0e0e0e0e0e0e0e0e0e0e0e0e0e0e\"\n\tpaddingMap[3] = \"0d0d0d0d0d0d0d0d0d0d0d0d0d\"\n\tpaddingMap[4] = \"0c0c0c0c0c0c0c0c0c0c0c0c\"\n\tpaddingMap[5] = \"0b0b0b0b0b0b0b0b0b0b0b\"\n\tpaddingMap[6] = \"0a0a0a0a0a0a0a0a0a0a\"\n\tpaddingMap[7] = \"090909090909090909\"\n\tpaddingMap[8] = \"0808080808080808\"\n\tpaddingMap[9] = \"07070707070707\"\n\tpaddingMap[10] = \"060606060606\"\n\tpaddingMap[11] = \"0505050505\"\n\tpaddingMap[12] = \"04040404\"\n\tpaddingMap[13] = \"030303\"\n\tpaddingMap[14] = \"0202\"\n\tpaddingMap[15] = \"01\"\n}", "func (p *PaddingTLV) Value() interface{} {\n\treturn p\n}", "func padding(message []byte, identifier string) []byte {\n\t// create padding for the strings email, firstname, lastname - RFC6234 multiple of 512\n\n\t// calculate length\n\tmessageSize := binary.Size(message) * 8\n\tlog.Printf(\"%s size: %dBit\\n\", identifier, messageSize)\n\t// ( L + 1 + K ) mod 512 = 448 -> calculate k\n\tmessageL := (messageSize % 512) + 1\n\n\tmessageK := messageL\n\tif messageL > 448 {\n\t\tmessageK = 448 + (512 - messageL)\n\t} else {\n\t\tmessageK = 448 - messageL\n\t}\n\n\t// create buffer to add bytewise\n\tmessageBuffer := bytes.NewBuffer(make([]byte, 0, 512))\n\tbinary.Write(messageBuffer, binary.BigEndian, message)\n\n\t// add 1 - add k - Work with bytes 8bit - add: 1000 0000 | k-7 * 0 - all Strings: string % 8 = 0\n\tbinary.Write(messageBuffer, binary.BigEndian, uint8(0x80))\n\n\t// itearate through the String length K and fill the buffer with 0s\n\tmessageK -= 7\n\n\t// error Handling - if the padding failed\n\tif messageK < 0 || messageK%8 != 0 {\n\t\tlog.Fatalf(\"%s Length of Bits is to long: %d\", identifier, messageK)\n\t}\n\n\t// iteration\n\tfor i := 0; i < messageK/8; i++ {\n\t\tbinary.Write(messageBuffer, binary.BigEndian, uint8(0x00))\n\t}\n\n\t// 64-bit/8Byte block that is L in binary -> L original length\n\tbinary.Write(messageBuffer, binary.BigEndian, uint64(messageSize))\n\n\tlog.Printf(\"Padding for %s: %x(%dBytes|%dBits)\\n\", identifier, messageBuffer.Bytes(), binary.Size(messageBuffer.Bytes()), binary.Size(messageBuffer.Bytes())*8)\n\treturn messageBuffer.Bytes()\n}", "func (p *PaddingTLV) Type() uint8 {\n\treturn p.TLVType\n}", "func (t *TLV) MarshalBinary() []byte {\n\treturn nil\n}", "func TestPacket_Serialize(t *testing.T) {\n\ttearDown := setUp(t)\n\tdefer tearDown(t)\n\n\tdata := packet.Serialize()\n\tassert.Equal(t, []byte{0x21, 0x31}, data[0:2])\n\tassert.Equal(t, uint16(len(payload)+32), binary.BigEndian.Uint16(data[2:4]))\n\tassert.Equal(t, uint32(0), binary.BigEndian.Uint32(data[4:8]))\n\tassert.Equal(t, uint32(0xAAAABBBB), binary.BigEndian.Uint32(data[8:12]))\n\tassert.Equal(t, uint32(0xCCCCDDDD), binary.BigEndian.Uint32(data[12:16]))\n\tassert.Equal(t, deviceToken, data[16:32])\n}", "func (pkt *Packet) MarshalTlv() (typ uint32, value []byte, e error) {\n\tpayload, e := pkt.encodeL3()\n\tif e != nil {\n\t\treturn 0, nil, e\n\t}\n\treturn tlv.EncodeTlv(an.TtLpPacket, tlv.MakeElement(an.TtLpFragment, payload))\n}", "func EncodeBytesWithPadding(data []byte, targetLength int) []byte {\n\tvar buf bytes.Buffer\n\n\tfor i := 0; i < targetLength-len(data); i++ {\n\t\tbuf.WriteByte(0)\n\t}\n\n\tbuf.Write(data)\n\treturn buf.Bytes()\n}", "func pad(in []byte, length int) []byte {\n\tpadding := length - (len(in) % length)\n\tif padding == 0 {\n\t\tpadding = length\n\t}\n\tfor i := 0; i < padding; i++ {\n\t\tin = append(in, byte(padding))\n\t}\n\treturn in\n}", "func (m Mixer) EncodeIDPadding(password string, id uint64, paddingLen int) string {\n\treturn m.EncodeBase32Padding(password, strconv.FormatUint(id, 10), paddingLen)\n}", "func (p *IPv4) Marshal() []byte {\n\tdata := make([]byte, 20+len(p.data))\n\n\t// 0 1 2 3\n\t// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1\n\t// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n\t// |Version| IHL |Type of Service| Total Length |\n\t// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n\t// | Identification |Flags| Fragment Offset |\n\t// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n\t// | Time to Live | Protocol | Header Checksum |\n\t// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n\t// | Source Address |\n\t// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n\t// | Destination Address |\n\t// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n\t// | Options | Padding |\n\t// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n\n\tdata[0] = byte(p.version<<4 | 5)\n\tdata[1] = byte(p.tos)\n\tbo.PutUint16(data[2:], uint16(len(data)))\n\tbo.PutUint16(data[4:], p.id)\n\tbo.PutUint16(data[6:], (uint16(p.flags)<<13)|(p.offset&0x1fff))\n\tdata[8] = p.ttl\n\tdata[9] = byte(p.protocol)\n\tcopy(data[12:], p.src[0:4])\n\tcopy(data[16:], p.dst[0:4])\n\tcopy(data[20:], p.data)\n\n\tcks := Checksum(data[0:20])\n\tbo.PutUint16(data[10:], cks)\n\n\treturn data\n}", "func padding(size int, alignment int) int {\n\tunalignedPart := size % alignment\n\treturn (alignment - unalignedPart) % alignment\n}", "func padding(size int) string {\n\tresult := \"\"\n\tfor i := 0; i < size; i++ {\n\t\tresult += \" \"\n\t}\n\treturn result\n}", "func (npdu *NPDU) Marshal() ([]byte, error) {\n\tb := make([]byte, npduLength)\n\tb[0] = npdu.Version\n\tb[1] = npdu.Control\n\treturn b, nil\n}", "func Encode(packet packets.NdnPacket, byteStream io.Writer) error {\n\tt := Tlv{}\n\tswitch packet.PacketType() {\n\tcase INTEREST:\n\t\tt, _ = encodeInterest(packet.(packets.Interest))\n\tcase DATA:\n\t\tt, _ = encodeData(packet.(packets.Data))\n\tdefault:\n\t\treturn errors.New(\"Encode: -- unknown packet type --\")\n\t}\n\n\t//write the tlv as bytes\n\terr := TlvToBytes(t, byteStream)\n\treturn err\n}", "func (a PrefixAnnouncement) MarshalTlv() (typ uint32, value []byte, e error) {\n\tvalidityType, validityValue, _ := a.ValidityPeriod.MarshalTlv()\n\treturn tlv.EncodeTlv(an.TtContent, tlv.MakeElementNNI(an.MgmtExpirationPeriod, a.ExpirationPeriod), tlv.MakeElement(validityType, validityValue))\n}", "func (m Mixer) EncodeBase32Padding(password string, value string, paddingLen int) string {\n\t//base32 chars range: [A-Z] [2-7]\n\trunes := []rune(base32NoPadding.EncodeToString([]byte(value)))\n\tnumLen := len(runes)\n\tif paddingLen > 0 && numLen < paddingLen {\n\t\tseed := m.getSeed(password)\n\t\tpdSize := paddingLen - numLen - 1\n\t\trunes = append(runes, '0') //zero to split padding chars\n\t\tif pdSize > 0 {\n\t\t\trunes = append(runes, randomAlphabets(pdSize, seed)...)\n\t\t}\n\t}\n\treturn string(m.Encode(password, runes))\n}", "func (cfg frozenConfig) MarshalIndent(val interface{}, prefix, indent string) ([]byte, error) {\n return encoder.EncodeIndented(val, prefix, indent, cfg.encoderOpts)\n}", "func (f *FullyQualifiedCSIDFields) MarshalLen() int {\n\tl := 1\n\n\tswitch f.NodeIDType {\n\tcase nodeIDIPv4, nodeIDOther:\n\t\tl += 4\n\tcase nodeIDIPv6:\n\t\tl += 16\n\tdefault:\n\n\t}\n\n\tl += len(f.CSIDs) * 2\n\treturn l\n}", "func padding(level int, node *Node) string {\n\tlinks := make([]string, level+1)\n\n\tfor node.Root != nil {\n\t\tif isLast(node) {\n\t\t\tlinks[level] = strings.Repeat(\" \", IndentSize+1)\n\t\t} else {\n\t\t\tlinks[level] = fmt.Sprintf(\"%s%s\", EdgeTypeLink, strings.Repeat(\" \", IndentSize))\n\t\t}\n\t\tlevel--\n\t\tnode = node.Root\n\t}\n\n\treturn strings.Join(links, \"\")\n}", "func DumpData(i interface{}, pad int, writer io.Writer) {\n\tvar pads = strings.Repeat(\" \", pad)\n\tvar x = 80 - pad\n\n\tswitch v := i.(type) {\n\tcase *big.Int:\n\t\tvar p = certformat.Formater(fmt.Sprintf(\"%x\", v), \":\", \"0\")\n\t\tw := (x / 3) * 3\n\t\tfor j := 0; j < len(p); j += w {\n\t\t\tm := j + w\n\t\t\tif m > len(p) {\n\t\t\t\tm = len(p)\n\t\t\t}\n\t\t\tfmt.Fprintf(writer, \"%s%s\\n\", pads, p[j:m])\n\t\t}\n\n\tcase string:\n\t\tfor j := 0; j < len(v); j += x {\n\t\t\tm := j + x\n\t\t\tif m > len(v) {\n\t\t\t\tm = len(v)\n\t\t\t}\n\t\t\tfmt.Fprintf(writer, \"%s%s\\n\", pads, v[j:m])\n\t\t}\n\n\tcase *string:\n\t\tDumpData(*v, pad, writer)\n\n\tcase []uint8: // aka []byte\n\t\tvar p = certformat.Formater(hex.EncodeToString(v), \":\", \"0\")\n\t\tw := (x / 3) * 3\n\t\tfor j := 0; j < len(p); j += w {\n\t\t\tm := j + w\n\t\t\tif m > len(p) {\n\t\t\t\tm = len(p)\n\t\t\t}\n\t\t\tfmt.Fprintf(writer, \"%s%s\\n\", pads, p[j:m])\n\t\t}\n\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"don't know how to dump %T\", v))\n\t}\n}", "func EncodeStringWithPadding(data string, targetLength int) []byte {\n\tvar buf bytes.Buffer\n\n\tif len(data) < targetLength {\n\t\tfor i := 0; i < targetLength-len(data); i++ {\n\t\t\tbuf.WriteByte(0)\n\t\t}\n\t}\n\n\tbuf.Write([]byte(data))\n\treturn buf.Bytes()\n}", "func (c *Configuration) Serialize() []byte {\n\tb := make([]byte, confLength)\n\tb[0] = byte(c.Group)\n\tb[1] = byte(c.KDF)\n\tb[2] = byte(c.MAC)\n\tb[3] = byte(c.Hash)\n\tb[4] = byte(c.MHF)\n\tb[5] = byte(c.Mode)\n\tb[6] = encoding.I2OSP(c.NonceLen, 1)[0]\n\n\treturn b\n}", "func (w *messageWriter) pad(alignment int) error {\n\tn, err := w.Write(padding[:w.pos%alignment])\n\tif err != nil {\n\t\treturn err\n\t}\n\tw.pos += n\n\treturn nil\n}", "func (pa Attr) pack() []byte {\n\tvar space bool\n\tout := new(bytes.Buffer)\n\n\tfor k, v := range pa {\n\t\tif space {\n\t\t\tout.WriteRune(' ')\n\t\t}\n\t\tfmt.Fprintf(out, \"%s=%s\", k, quote(v))\n\n\t\tspace = true\n\t}\n\n\treturn out.Bytes()\n}", "func pad(d []byte, n int) []byte {\n\td = append(d, make([]byte, n)...)\n\treturn d\n}", "func (f *STAGFields) MarshalLen() int {\n\treturn 3\n}", "func (c *uint8Type) SerializeFixedT(v uint8, dest []byte) error {\n\tdest[0] = v\n\treturn nil\n}", "func Iso7816Padder(padLen int) []byte {\n\tpadding := append([]byte{byte(0x80)}, \n\t bytes.Repeat([]byte{byte(0)}, padLen-1)...)\n\treturn padding\n}", "func padToLength(source string, prefix int) string {\n\treturn fmt.Sprintf(fmt.Sprintf(\"%%-%ds\", prefix), source)\n}", "func padID(id string) string {\n\texpectedLen := 0\n\tswitch {\n\tcase len(id) < 16:\n\t\texpectedLen = 16\n\tcase len(id) > 16 && len(id) < 32:\n\t\texpectedLen = 32\n\tdefault:\n\t\treturn id\n\t}\n\n\treturn pads[expectedLen-len(id)] + id\n}", "func (v *Value) String() string { return v.macaddr.String() }", "func (d *DHCPv4) ToBytes() []byte {\n\tbuf := uio.NewBigEndianBuffer(make([]byte, 0, minPacketLen))\n\tbuf.Write8(uint8(d.OpCode))\n\tbuf.Write8(uint8(d.HWType))\n\n\t// HwAddrLen\n\thlen := uint8(len(d.ClientHWAddr))\n\tbuf.Write8(hlen)\n\tbuf.Write8(d.HopCount)\n\tbuf.WriteBytes(d.TransactionID[:])\n\tbuf.Write16(d.NumSeconds)\n\tbuf.Write16(d.Flags)\n\n\twriteIP(buf, d.ClientIPAddr)\n\twriteIP(buf, d.YourIPAddr)\n\twriteIP(buf, d.ServerIPAddr)\n\twriteIP(buf, d.GatewayIPAddr)\n\tcopy(buf.WriteN(16), d.ClientHWAddr)\n\n\tvar sname [64]byte\n\tcopy(sname[:63], []byte(d.ServerHostName))\n\tbuf.WriteBytes(sname[:])\n\n\tvar file [128]byte\n\tcopy(file[:127], []byte(d.BootFileName))\n\tbuf.WriteBytes(file[:])\n\n\t// The magic cookie.\n\tbuf.WriteBytes(magicCookie[:])\n\n\t// Write all options.\n\td.Options.Marshal(buf)\n\n\t// DHCP is based on BOOTP, and BOOTP messages have a minimum length of\n\t// 300 bytes per RFC 951. This not stated explicitly, but if you sum up\n\t// all the bytes in the message layout, you'll get 300 bytes.\n\t//\n\t// Some DHCP servers and relay agents care about this BOOTP legacy B.S.\n\t// and \"conveniently\" drop messages that are less than 300 bytes long.\n\t//\n\t// We subtract one byte for the OptionEnd option.\n\tif buf.Len()+1 < bootpMinLen {\n\t\tbuf.WriteBytes(bytes.Repeat([]byte{OptionPad.Code()}, bootpMinLen-1-buf.Len()))\n\t}\n\n\t// Finish the packet.\n\tbuf.Write8(OptionEnd.Code())\n\n\treturn buf.Data()\n}", "func (v *VLAN) MarshalBinary() ([]byte, error) {\n\tb := make([]byte, 2)\n\t_, err := v.read(b)\n\treturn b, err\n}", "func PKCS(data []byte, mode string) (padded_data []byte) {\r\n\tvar pad_num int\r\n\r\n\tif mode == \"add\" {\r\n\t\trem := len(data) % userlib.AESBlockSizeBytes\r\n\t\tpad_num = userlib.AESBlockSizeBytes - rem //number to pad by\r\n\t\t//pad := make([]byte, pad_num) //pad array we are appending later\r\n\t\tpadded_data = data[:]\r\n\t\tfor i := 0; i < pad_num; i++ {\r\n\t\t\t//pad = append(pad, byte(pad_num))\r\n\t\t\tpadded_data = append(padded_data, byte(pad_num))\r\n\t\t}\r\n\r\n\t\t//userlib.DebugMsg(\"%d\", padded_data)\r\n\t} else { //remove padding\r\n\t\t//last byte is amount of padding there is\r\n\t\t//ex: d = [1022] means 2 bytes of padding so return d[:2] which is [10]\r\n\r\n\t\tnum := len(data) - 1\r\n\t\tpad_num = len(data) - int(data[num]) //piazza: convert to byte > hex string > int?\r\n\t\tpadded_data = data[:pad_num]\r\n\t}\r\n\r\n\treturn padded_data\r\n}", "func canonicalPadding(b []byte) error {\n\tswitch {\n\tcase b[0]&0x80 == 0x80:\n\t\treturn errNegativeValue\n\tcase len(b) > 1 && b[0] == 0x00 && b[1]&0x80 != 0x80:\n\t\treturn errExcessivelyPaddedValue\n\tdefault:\n\t\treturn nil\n\t}\n}", "func MarshalSignableVote(r *bytes.Buffer, vote *Header) error {\n\tif err := encoding.WriteUint64LE(r, vote.Round); err != nil {\n\t\treturn err\n\t}\n\n\tif err := encoding.WriteUint8(r, vote.Step); err != nil {\n\t\treturn err\n\t}\n\n\treturn encoding.Write256(r, vote.BlockHash)\n}", "func (n *NegTokenInit) Marshal() ([]byte, error) {\n\tb, err := asn1.Marshal(*n)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tnt := asn1.RawValue{\n\t\tTag: 0,\n\t\tClass: 2,\n\t\tIsCompound: true,\n\t\tBytes: b,\n\t}\n\tnb, err := asn1.Marshal(nt)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn nb, nil\n}", "func (t *TxOut) SerializeSize() int {\n\treturn RCDHashSize + VarIntSerializeSize(uint64(t.Value))\n}", "func (p *Packet) Encode() []byte {\n\tn := len(p.Body)\n\tif n <= 191 {\n\t\tpacket := make([]byte, 2+n)\n\t\tpacket[0] = 0xc0 | p.Tag\n\t\tpacket[1] = byte(n)\n\t\treturn append(packet[:2], p.Body...)\n\t} else if n <= 8383 {\n\t\tpacket := make([]byte, 3+n)\n\t\tpacket[0] = 0xc0 | p.Tag\n\t\tpacket[1] = byte((n-192)>>8) + 192\n\t\tpacket[2] = byte(n - 192)\n\t\treturn append(packet[:3], p.Body...)\n\t} else {\n\t\tpacket := make([]byte, 6+n)\n\t\tpacket[0] = 0xc0 | p.Tag\n\t\tpacket[1] = 0xff\n\t\tbinary.BigEndian.PutUint32(packet[2:], uint32(n))\n\t\treturn append(packet[:6], p.Body...)\n\t}\n}", "func PaddingKey(key []byte) []byte {\n\t// Initially set to 0\n\t// Becuase it's hex byte array, so 128bit -> 32byte\n\tpaddingBytes := make([]byte, 32-len(key))\n\tkey = append(key, paddingBytes...)\n\n\treturn key\n}", "func (priv *PrivateKey) Marshal() []byte {\n\tout := make([]byte, 0, 3*32)\n\tout = appendN(out, priv.xi1)\n\tout = appendN(out, priv.xi2)\n\tout = appendN(out, priv.gamma)\n\treturn out\n}", "func (p *PrivateKey) Serialize() []byte {\n\tb := make([]byte, 0, PrivKeyBytesLen)\n\treturn paddedAppend(PrivKeyBytesLen, b, p.ToECDSA().D.Bytes())\n}", "func (p *Patch) SetPadding(value mat.AABB) {\n\tp.Padding = value\n\tp.SetRegion(p.Region)\n}", "func padData(rawData []byte) []byte {\n\tneedPadding := aes.BlockSize - ((len(rawData) + 2) % aes.BlockSize)\n\n\tvar dataBuf bytes.Buffer\n\tdataBuf.Grow(2 + len(rawData) + (aes.BlockSize % (len(rawData) + 2)))\n\n\tdataBuf.Write([]byte(\"|\"))\n\tdataBuf.Write(rawData)\n\tdataBuf.Write([]byte(\"|\"))\n\n\tfor i := 0; i < needPadding; i++ {\n\t\tdataBuf.Write([]byte(\" \"))\n\t}\n\n\treturn dataBuf.Bytes()\n}", "func SerializePacket(q *Packet) ([]byte, error) {\n\tvar msgBuffer bytes.Buffer\n\tenc := gob.NewEncoder(&msgBuffer)\n\terr := enc.Encode(q)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"Failed to serialize packet\")\n\t}\n\n\tlength := msgBuffer.Len()\n\n\tvar lengthBytes [8]byte\n\tbinary.PutUvarint(lengthBytes[:], uint64(length))\n\n\tvar result []byte\n\tresult = append(result, lengthBytes[:]...)\n\tresult = append(result, msgBuffer.Bytes()...)\n\n\treturn result, nil\n}", "func EncodeLength(length uint64) []byte {\n\t// The first bit is used to indicate whether this is the final byte\n\t// encoding the length. So, if the first bit is 0, just return a one\n\t// byte response containing the byte-encoded length.\n\tif length <= 0x7f {\n\t\treturn []byte{byte(length)}\n\t}\n\n\t// If the length is bigger the format is, first bit 1 + the rest of the\n\t// bits in the first byte encode the length of the length, then follows\n\t// the actual length.\n\n\t// Technically the SNMP spec allows for packet lengths longer than can be\n\t// specified in a 127-byte encoded integer, however, going out on a limb\n\t// here, I don't think I'm going to support a use case that insane.\n\n\tr := EncodeUInt(length)\n\tnumOctets := len(r)\n\tresult := make([]byte, 1+numOctets)\n\tresult[0] = 0x80 | byte(numOctets)\n\tfor i, b := range r {\n\t\tresult[1+i] = b\n\t}\n\treturn result\n}", "func (self *Encoder) Encode(v interface{}) ([]byte, error) {\n if self.indent != \"\" || self.prefix != \"\" { \n return EncodeIndented(v, self.prefix, self.indent, self.Opts)\n }\n return Encode(v, self.Opts)\n}", "func (priv *PrivateKey) ToBytes() (b []byte) {\n\td := priv.D.Bytes()\n\n\t/* Pad D to 32 bytes */\n\tpadded_d := append(bytes.Repeat([]byte{0x3f}, 32-len(d)), d...)\n\n\treturn padded_d\n}", "func (enc Encoding) WithPadding(padding rune) *Encoding {\n\tswitch {\n\tcase padding < NoPadding || padding == '\\r' || padding == '\\n' || padding > 0xff:\n\t\tpanic(\"invalid padding\")\n\tcase padding != NoPadding && enc.decodeMap[byte(padding)] != invalidIndex:\n\t\tpanic(\"padding contained in alphabet\")\n\t}\n\tenc.padChar = padding\n\treturn &enc\n}", "func (pingreq PingReqPacket) Marshall() []byte {\n\tfixedLength := 2\n\tbuf := make([]byte, fixedLength)\n\n\t// Header\n\tbuf[0] = byte(pingreqType << 4)\n\tbuf[1] = byte(0)\n\n\treturn buf\n}", "func pad(data []byte, blockSize int, padder Padder) []byte {\n \tdataLen := len(data)\n\tpadLen := blockSize - (dataLen % blockSize)\n\tpadding := padder(padLen)\n\treturn append(data, padding...)\n}", "func (mem *MemberKey) Tag() []byte {\n\treturn mem.a.Marshal()\n}", "func (sd *PrivateDescriptor) encode() ([]byte, error) {\n\tlength := sd.length()\n\n\t// add 2 bytes to contain splice_descriptor_tag & descriptor_length\n\tbuf := make([]byte, length+2)\n\tiow := iobit.NewWriter(buf)\n\tiow.PutUint32(8, sd.PrivateTag)\n\tiow.PutUint32(8, uint32(length))\n\tiow.PutUint32(32, sd.Identifier)\n\t_, err := iow.Write(sd.PrivateBytes)\n\tif err != nil {\n\t\treturn buf, err\n\t}\n\terr = iow.Flush()\n\n\treturn buf, err\n}", "func GetPaddingPayload(payloadSize int) ([]byte, error) {\n\tpayload := make([]byte, payloadSize)\n\tfor index := range payload {\n\t\tpayload[index] = 0xff\n\t}\n\treturn payload, nil\n}", "func (d *DecimalAlign) Pad(v interface{}) string {\n\tvar lp int\n\tif s, ok := v.(string); ok {\n\t\t// If a string then look for \".\".\n\t\t// If found then lp=num chars before but excluding it.\n\t\t// If not found then use length of string\n\t\tlp = strings.Index(s, \".\")\n\t\tif lp < 0 {\n\t\t\tlp = len(s)\n\t\t}\n\t} else {\n\t\tvf, _ := util.ToFloat64(v)\n\t\tlp = len(fmt.Sprintf(\"%.0f\", vf))\n\t}\n\treturn strconv.Itoa(d.lp - lp)\n}", "func (apdu *APDU) Marshal() ([]byte, error) {\n\tbuf := new(bytes.Buffer)\n\tbuf.WriteByte(apdu.TypeAndFlags)\n\tif apdu.SegmentSizes.set {\n\t\tbuf.WriteByte(apdu.SegmentSizes.raw)\n\t}\n\tbuf.WriteByte(apdu.InvokeID)\n\tbuf.WriteByte(apdu.ServerChoice)\n\treturn buf.Bytes(), nil\n}", "func Put(buf []byte, tag uint32, data []byte) ([]byte, error) {\n\tbuf = append(buf, PackTag(tag)...)\n\n\tl := len(data)\n\tswitch {\n\tcase l < 128:\n\t\tbuf = append(buf, byte(l))\n\tcase l < 256:\n\t\tbuf = append(buf, 0x81, byte(l))\n\tcase l < 65536:\n\t\tbuf = append(buf, 0x82, byte(l>>8), byte(l))\n\tdefault:\n\t\treturn nil, errors.New(\"TLV too long\")\n\t}\n\n\treturn append(buf, data...), nil\n}", "func (DisconnectPacket) Marshall() []byte {\n\tfixedLength := 2\n\tbuf := make([]byte, fixedLength)\n\n\tbuf[0] = disconnectType << 4\n\tbuf[1] = 0\n\treturn buf\n}", "func (m *VpnConfiguration) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter)(error) {\n err := m.DeviceConfiguration.Serialize(writer)\n if err != nil {\n return err\n }\n if m.GetAuthenticationMethod() != nil {\n cast := (*m.GetAuthenticationMethod()).String()\n err = writer.WriteStringValue(\"authenticationMethod\", &cast)\n if err != nil {\n return err\n }\n }\n {\n err = writer.WriteStringValue(\"connectionName\", m.GetConnectionName())\n if err != nil {\n return err\n }\n }\n {\n err = writer.WriteStringValue(\"realm\", m.GetRealm())\n if err != nil {\n return err\n }\n }\n {\n err = writer.WriteStringValue(\"role\", m.GetRole())\n if err != nil {\n return err\n }\n }\n if m.GetServers() != nil {\n cast := make([]i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, len(m.GetServers()))\n for i, v := range m.GetServers() {\n if v != nil {\n cast[i] = v.(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable)\n }\n }\n err = writer.WriteCollectionOfObjectValues(\"servers\", cast)\n if err != nil {\n return err\n }\n }\n return nil\n}", "func (eln *EmptyLeafNode) Serialize(w io.Writer) {\n\tw.Write([]byte{byte(NodeTypeEmptyLeaf)})\n}", "func (*nopSerializer) Marshal(Message) ([]byte, error) { return nil, nil }", "func (t *TPDU) MarshalLen() int {\n\treturn t.Header.MarshalLen() - len(t.Header.Payload) + len(t.Payload)\n}", "func EncodeBase64Pad(value bool) EncodeBase64Attr {\n\treturn func(m optionalAttr) {\n\t\tm[\"pad\"] = value\n\t}\n}", "func Pad(prefix string, reqd int) string {\n\treturn strings.Repeat(\"0\", 5-len(prefix)) + prefix\n}", "func (*OpenconfigInterfaces_Interfaces_Interface_Subinterfaces_Subinterface_Vlan_Config_VlanId_Union_String) Is_OpenconfigInterfaces_Interfaces_Interface_Subinterfaces_Subinterface_Vlan_Config_VlanId_Union() {}", "func (*OpenconfigInterfaces_Interfaces_Interface_Subinterfaces_Subinterface_Vlan_Config_VlanId_Union_String) Is_OpenconfigInterfaces_Interfaces_Interface_Subinterfaces_Subinterface_Vlan_Config_VlanId_Union() {}", "func (p *dataPacket) Write(w io.Writer) (int, error) {\n\tif _, err := w.Write(p.nonce[:]); err != nil {\n\t\treturn 0, err\n\t}\n\tif err := binary.Write(w, binary.LittleEndian, int64(len(p.data))); err != nil {\n\t\treturn 0, err\n\t}\n\treturn w.Write(p.data)\n}", "func (o *IPFixGen) encodeVarLengthData(length int, ie []byte) (data []byte) {\n\t/*\n\t\tIn most cases, the length of the Information Element will be less\n\t\tthan 255 octets. The following length-encoding mechanism optimizes\n\t\tthe overhead of carrying the Information Element length in this more\n\t\tcommon case. The length is carried in the octet before the\n\t\tInformation Element, as shown in Figure R.\n\n\t\t 0 1 2 3\n\t\t 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1\n\t\t+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n\t\t| Length (< 255)| Information Element |\n\t\t+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n\t\t| ... continuing as needed |\n\t\t+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n\n\t\t\tFigure R: Variable-Length Information Element (IE)\n\t\t\t\t\t\t(Length < 255 Octets)\n\n\t\tThe length may also be encoded into 3 octets before the Information\n\t\tElement, allowing the length of the Information Element to be greater\n\t\tthan or equal to 255 octets. In this case, the first octet of the\n\t\tLength field MUST be 255, and the length is carried in the second and\n\t\tthird octets, as shown in Figure S.\n\n\t\t 0 1 2 3\n\t\t 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1\n\t\t+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n\t\t| 255 | Length (0 to 65535) | IE |\n\t\t+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n\t\t| ... continuing as needed |\n\t\t+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n\n\t\t\tFigure S: Variable-Length Information Element (IE)\n\t\t\t\t\t(Length 0 to 65535 Octets)\n\n\t\tThe octets carrying the length (either the first or the first\n\t\tthree octets) MUST NOT be included in the length of the Information\n\t\tElement.\n\t*/\n\tif length < 0xFF {\n\t\tdata = append(data, uint8(length))\n\t} else {\n\t\tdata = append(data, 0xFF)\n\t\tlengthBuffer := make([]byte, 2)\n\t\tbinary.BigEndian.PutUint16(lengthBuffer, uint16(length))\n\t\tdata = append(data, lengthBuffer...)\n\t}\n\tdata = append(data, ie[:length]...)\n\treturn data\n}", "func (r *RecBytes) Serialize() []byte {\n\tres := make([]byte, 0, len(r.Path)+len(r.Time)+len(r.Val)+3)\n\tres = append(res, r.Path...)\n\tres = append(res, ' ')\n\tres = append(res, r.Val...)\n\tres = append(res, ' ')\n\tres = append(res, r.Time...)\n\tres = append(res, '\\n')\n\n\treturn res\n}", "func (t *AlternateTimeOffsetIndicatorTLV) MarshalBinaryTo(b []byte) (int, error) {\n\ttlvHeadMarshalBinaryTo(&t.TLVHead, b)\n\tb[tlvHeadSize] = t.KeyField\n\tbinary.BigEndian.PutUint32(b[tlvHeadSize+1:], uint32(t.CurrentOffset))\n\tbinary.BigEndian.PutUint32(b[tlvHeadSize+5:], uint32(t.JumpSeconds))\n\tcopy(b[tlvHeadSize+9:], t.TimeOfNextJump[:]) //uint48\n\tsize := tlvHeadSize + 15\n\tif t.DisplayName != \"\" {\n\t\tdd, err := t.DisplayName.MarshalBinary()\n\t\tif err != nil {\n\t\t\treturn 0, fmt.Errorf(\"writing AlternateTimeOffsetIndicatorTLV DisplayName: %w\", err)\n\t\t}\n\t\tcopy(b[tlvHeadSize+15:], dd)\n\t\tsize += len(dd)\n\t}\n\treturn size, nil\n}", "func (f XmlDataLenField) Tag() quickfix.Tag { return tag.XmlDataLen }", "func paddedLength(x int) int {\n\treturn (x + 0xf) & -0x10\n}", "func (g *GroupedAVP) Serialize() []byte {\n\tb := make([]byte, g.Len())\n\tvar n int\n\tfor _, a := range g.AVP {\n\t\ta.SerializeTo(b[n:])\n\t\tn += a.Len()\n\t}\n\treturn b\n}", "func SerializePacket(p *Packet) ([]byte, error) {\n\tdata, err := p.Marshal()\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"Failed to serialize packet\")\n\t}\n\n\tvar lengthBytes [8]byte\n\tbinary.PutUvarint(lengthBytes[:], uint64(p.Size()))\n\n\tvar result []byte\n\tresult = append(result, lengthBytes[:]...)\n\tresult = append(result, data...)\n\n\treturn result, nil\n}", "func (r ReissuePacketPrivate) Marshal() ([]byte, error) {\n\treturn asn1.Marshal(r)\n}", "func padBytesNeeded(elementLen int) int {\n\treturn 4*(elementLen/4+1) - elementLen\n}", "func (n Name) Serialize() []byte {\n\tnameData := make([]byte, 0, len(n)+5)\n\tfor _, label := range n.getLabels() {\n\t\tnameData = append(nameData, encodeLabel(label)...)\n\t}\n\t//terminate labels\n\tnameData = append(nameData, byte(0))\n\n\treturn nameData\n}", "func (p *ubPayload) Encode() []byte {\n\tretString := strconv.Itoa(p.flags)\n\tfl := strconv.Itoa(len(retString))\n\tretString = fl + retString + p.suffix\n\treturn []byte(retString)\n}", "func fixPadding(b64 string) string {\n\tcount := 4 - len(b64)%4\n\tif count > 0 && count < 4 {\n\t\treturn b64 + strings.Repeat(\"=\", count)\n\t}\n\treturn b64\n}", "func marshalVlan(info *VLan) ([]byte, error) {\n\toptions := []tcOption{}\n\n\tif info == nil {\n\t\treturn []byte{}, fmt.Errorf(\"VLan: %w\", ErrNoArg)\n\t}\n\t// TODO: improve logic and check combinations\n\tif info.Tm != nil {\n\t\treturn []byte{}, ErrNoArgAlter\n\t}\n\tif info.Parms != nil {\n\t\tdata, err := marshalStruct(info.Parms)\n\t\tif err != nil {\n\t\t\treturn []byte{}, err\n\t\t}\n\t\toptions = append(options, tcOption{Interpretation: vtBytes, Type: tcaVLanParms, Data: data})\n\t}\n\tif info.PushID != nil {\n\t\toptions = append(options, tcOption{Interpretation: vtUint16, Type: tcaVLanPushVLanID, Data: *info.PushID})\n\t}\n\tif info.PushProtocol != nil {\n\t\toptions = append(options, tcOption{Interpretation: vtUint16, Type: tcaVLanPushVLanProtocol, Data: *info.PushProtocol})\n\t}\n\tif info.PushPriority != nil {\n\t\toptions = append(options, tcOption{Interpretation: vtUint32, Type: tcaVLanPushVLanPriority, Data: *info.PushPriority})\n\t}\n\treturn marshalAttributes(options)\n}", "func (p *icmpEcho) Marshal() ([]byte, error) {\n\tb := make([]byte, 4+len(p.Data))\n\tb[0], b[1] = byte(p.ID>>8), byte(p.ID&0xff)\n\tb[2], b[3] = byte(p.Seq>>8), byte(p.Seq&0xff)\n\tcopy(b[4:], p.Data)\n\treturn b, nil\n}", "func paddedAppend(size uint, dst, src []byte) []byte {\n\tfor i := 0; i < int(size)-len(src); i++ {\n\t\tdst = append(dst, 0)\n\t}\n\treturn append(dst, src...)\n}", "func (p *Service) Pad(bytesValue []byte, blockSize int) ([]byte, error) {\n\tif len(bytesValue) == 0 {\n\t\treturn nil, errZeroLengthValue\n\t}\n\n\tif blockSize <= 0 {\n\t\treturn nil, errLesserThanOneBlockSize\n\t}\n\n\tpadSize := blockSize - (len(bytesValue) % blockSize)\n\tif padSize == 0 {\n\t\tpadSize = blockSize\n\t}\n\n\tpad := bytes.Repeat(\n\t\t[]byte{\n\t\t\tbyte(padSize),\n\t\t},\n\t\tpadSize,\n\t)\n\n\treturn append(bytesValue, pad...), nil\n}", "func (to *TxOut) SerializeSize() int {\n\t// Value 8 bytes + assetLength + Asset 12 byte\n\t// + serialized varint size for the length of PkScript +\n\t// PkScript bytes.\n\t// + serialized varint size for the length of Data +\n\t// Data bytes.\n\treturn 21 +\n\t\tserialization.VarIntSerializeSize(uint64(len(to.PkScript))) + len(to.PkScript) +\n\t\tserialization.VarIntSerializeSize(uint64(len(to.Data))) + len(to.Data)\n}", "func pad(message []byte) []byte {\n lenPadding := aes.BlockSize - (len(message) % aes.BlockSize)\n for i := 0; i < lenPadding; i++ {\n message = append(message, byte(lenPadding))\n }\n return message\n}", "func (h NalHeader) Marshal() ([]byte, error) {\n\t// avoid buf alloc\n\tw := bytes.NewBuffer(make([]byte, 0, h.MarshalSize()))\n\tbyte0 := h.ForbiddenZeroBit.Byte()\n\tbyte0 |= h.NalRefIdc.Byte()\n\tbyte0 |= h.NalUnitType.Byte()\n\tw.WriteByte(byte0)\n\treturn w.Bytes(), nil\n}", "func (b BitString) RightAlign() []byte {}", "func pad(src []byte) []byte {\n\tpadding := aes.BlockSize - len(src)%aes.BlockSize\n\tpadtext := bytes.Repeat([]byte{byte(padding)}, padding)\n\n\treturn append(src, padtext...)\n}", "func MarshalWithParams(val interface{}, params string) ([]byte, error) {}", "func packNil(pad *scratchpad) {\n\tpad.endAppend('s', 3, 'n', 'i', 'l')\n}", "func (pt MDTurbo) Serialize() ([PartitionBlkLen]uint8, error) {\n\treturn Serialize(pt)\n}" ]
[ "0.5785339", "0.5553274", "0.5451155", "0.5364017", "0.5321094", "0.52745724", "0.5261391", "0.52536887", "0.5251995", "0.52181494", "0.5216098", "0.5210108", "0.520552", "0.5186594", "0.5163753", "0.50572926", "0.50076044", "0.49867326", "0.4979345", "0.49656087", "0.49597865", "0.49460152", "0.49416703", "0.4922712", "0.49021354", "0.48864308", "0.48618677", "0.48606154", "0.48505667", "0.48497665", "0.48452827", "0.48114622", "0.48110026", "0.48042673", "0.47865945", "0.47851744", "0.47712746", "0.47658414", "0.47572902", "0.47344777", "0.4725565", "0.47235295", "0.4690708", "0.46846595", "0.4680104", "0.4673993", "0.46725866", "0.46711916", "0.46647748", "0.46414125", "0.46352577", "0.46328518", "0.46314192", "0.46290416", "0.4627405", "0.46256867", "0.4623284", "0.46224353", "0.46193793", "0.46148098", "0.45988193", "0.4593803", "0.45902872", "0.45869553", "0.45867002", "0.45833057", "0.45825908", "0.45767212", "0.4575379", "0.4570997", "0.45699736", "0.4568047", "0.4568", "0.45634407", "0.45631325", "0.4561878", "0.45560798", "0.45402166", "0.4540066", "0.45298046", "0.45260093", "0.4521318", "0.45194942", "0.45127976", "0.45101574", "0.45064083", "0.45056772", "0.44990608", "0.44976336", "0.4497206", "0.44826904", "0.447846", "0.44734502", "0.4472611", "0.4455904", "0.44502753", "0.44479808", "0.44419295", "0.44336843", "0.44310814" ]
0.76584166
0
Injectors from injector.go: Build
func Build() (*chi.Mux, func(), error) { logrusLogger, cleanup, err := logger.Provider() if err != nil { return nil, nil, err } opentracingTracer, cleanup2, err := tracer.Provider(logrusLogger) if err != nil { cleanup() return nil, nil, err } serverHandler, cleanup3, err := handlers.Provider(logrusLogger) if err != nil { cleanup2() cleanup() return nil, nil, err } mux, cleanup4, err := Provider(logrusLogger, opentracingTracer, serverHandler) if err != nil { cleanup3() cleanup2() cleanup() return nil, nil, err } return mux, func() { cleanup4() cleanup3() cleanup2() cleanup() }, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Inject(builder ConfigBuilder) {\n\n\t//load the env into the object\n\tbuilder.SetEnvConfigs()\n\n\t//setup dynamo library\n\tdynamoClient := builder.SetDynamoDBConfigsAndBuild()\n\t//connect to the instance\n\tlog.Println(\"Connecting to dynamo client\")\n\tdynamoClient.DefaultConnect()\n\n\t//dependency injection to our resource\n\t//we inject the dynamo client\n\t//shared client, therefore shared in between all the repos\n\tLoadAdvertRepo(&repo_builder.AdvertWrapper{\n\t\tDC: dynamoClient,\n\t})\n\n\trabbitMQClient := builder.SetRabbitMQConfigsAndBuild()\n\n\tInjectRabbitMQClient(rabbitMQClient)\n}", "func Inject(builder ConfigBuilder) {\n\n\t//load the env into the object\n\tbuilder.LoadEnvConfigs()\n\n\t//dependency injection to our resource\n\t//we inject the rabbitmq client\n\trabbitMQClient := builder.LoadRabbitMQConfigs()\n\tloadRabbitMQClient(rabbitMQClient)\n\n\tsubscribeToChannels()\n\n\tlog.Println(\"Setting up message handler...\")\n\t//initialise our message handler\n\tmh := &ed.MsgHandler{}\n\n\tes := &service.EmailService{EB: &templates.EmailBuilder{}}\n\tes.SetupTemplates()\n\t//inject the hermes service into it\n\tmh.InjectService(es)\n\n\tlog.Println(\"Loading endpoints...\")\n\teb := api.EndpointBuilder{}\n\n\teb.SetupRouter(mux.NewRouter())\n\teb.SetupEndpoints()\n\n\teb.SetQueueClient(rabbitMQClient)\n\t// we use the message handler here\n\teb.SetupMsgHandler(mh)\n\teb.SetupSubscriptionEndpoint()\n}", "func NewInjector(values ...interface{}) *Injector {\n\tinj := &Injector{\n\t\tdata: make(map[string]reflect.Value),\n\t}\n\tinj.Set(values...)\n\treturn inj\n}", "func Build() (Logger, func(), error) {\n\tcontext, cleanup, err := entrypoint.ContextProvider()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tviper, cleanup2, err := config.Provider()\n\tif err != nil {\n\t\tcleanup()\n\t\treturn nil, nil, err\n\t}\n\tloggerConfig, cleanup3, err := ProviderCfg(viper)\n\tif err != nil {\n\t\tcleanup2()\n\t\tcleanup()\n\t\treturn nil, nil, err\n\t}\n\tzap, cleanup4, err := Provider(context, loggerConfig)\n\tif err != nil {\n\t\tcleanup3()\n\t\tcleanup2()\n\t\tcleanup()\n\t\treturn nil, nil, err\n\t}\n\treturn zap, func() {\n\t\tcleanup4()\n\t\tcleanup3()\n\t\tcleanup2()\n\t\tcleanup()\n\t}, nil\n}", "func New() Injector {\n\treturn &injector{\n\t\tvalues: make(map[reflect.Type]reflect.Value),\n\t\tproviders: make(map[reflect.Type]reflect.Value),\n\t}\n}", "func dependencgInjections() (ReqProcessor, PropProcessor) {\n\t// get a single DB connection/pool\n\tdb, err := NewDBClient()\n\tif err != nil {\n\t\tpanic(\"Unable to get a DB connection\")\n\t}\n\n\trAlgo := NewReqMatchingAlgo()\n\tpAlgo := NewPropMatchingAlgo()\n\n\treqProcessor := NewReqProcessor(db, rAlgo)\n\tpropProcessor := NewPropProcessor(db, pAlgo)\n\n\t// Here we use r and p to perform the usecasaes\n\t// API handler/cotrollers will have access to r and p to perform the usecases\n\treturn reqProcessor, propProcessor\n}", "func (gp Provider) Build(config config.Credentials) provider.Provider {\n\tclient := NewClient()\n\n\treturn &Provider{\n\t\tVerifier: provider.NewVerifierBasket(\n\t\t\tNewTeamVerifier(teamConfigsToTeam(config.Github.Teams), client),\n\t\t\tNewOrganizationVerifier(config.Github.Organizations, client),\n\t\t),\n\t}\n}", "func Inject(function injector.Function) Handler {\n\treturn func(c Container) {\n\t\tif _, ok := c.(InjectorProvider); !ok {\n\t\t\tc.Error(fmt.Errorf(\"container %s doesn't implement InjectorProvider\", c), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\terr := c.(InjectorProvider).GetInjector().Call(function)\n\t\tif err != nil {\n\t\t\tc.Error(err, http.StatusInternalServerError)\n\t\t}\n\t}\n}", "func registerAllInjectors(ctx context.Context, groupName string, mgr ctrl.Manager, sources []caDataSource, client client.Client, ca cache.Cache) error {\n\tcontrollers := make([]controller.Controller, len(injectorSetups))\n\tfor i, setup := range injectorSetups {\n\t\tcontroller, err := newGenericInjectionController(ctx, groupName, mgr, setup, sources, ca, client)\n\t\tif err != nil {\n\t\t\tif !meta.IsNoMatchError(err) || !setup.injector.IsAlpha() {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tctrl.Log.V(logf.WarnLevel).Info(\"unable to register injector which is still in an alpha phase.\"+\n\t\t\t\t\" Enable the feature on the API server in order to use this injector\",\n\t\t\t\t\"injector\", setup.resourceName)\n\t\t}\n\t\tcontrollers[i] = controller\n\t}\n\tg, gctx := errgroup.WithContext(ctx)\n\n\tg.Go(func() (err error) {\n\t\tif err = ca.Start(gctx); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n\tif ca.WaitForCacheSync(gctx) {\n\t\tfor _, controller := range controllers {\n\t\t\tif gctx.Err() != nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tcontroller := controller\n\t\t\tg.Go(func() (err error) {\n\t\t\t\treturn controller.Start(gctx)\n\t\t\t})\n\t\t}\n\t} else {\n\t\t// I assume that if the cache sync fails, then the already-started cache\n\t\t// will exit with a meaningful error which will be returned by the errgroup\n\t\tctrl.Log.Error(nil, \"timed out or failed while waiting for cache\")\n\t}\n\treturn g.Wait()\n}", "func RegisterAllInjectors(ctx context.Context, mgr ctrl.Manager, namespace string, watchCerts bool) error {\n\t// TODO: refactor\n\tsds := &secretDataSource{\n\t\tclient: mgr.GetClient(),\n\t}\n\tcds := &certificateDataSource{\n\t\tclient: mgr.GetClient(),\n\t}\n\tcfg := mgr.GetConfig()\n\tcaBundle, err := dataFromSliceOrFile(cfg.CAData, cfg.CAFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\tkds := &kubeconfigDataSource{\n\t\tapiserverCABundle: caBundle,\n\t}\n\t// Registers a c/r controller for each of APIService, CustomResourceDefinition, Mutating/ValidatingWebhookConfiguration\n\t// TODO: add a flag to allow users to configure which of these controllers should be registered\n\tfor _, setup := range injectorSetups {\n\t\tlog := ctrl.Log.WithValues(\"kind\", setup.resourceName)\n\t\tlog.Info(\"Registering a reconciler for injectable\")\n\t\tr := &genericInjectReconciler{\n\t\t\tinjector: setup.injector,\n\t\t\tnamespace: namespace,\n\t\t\tresourceName: setup.resourceName,\n\t\t\tlog: log,\n\t\t\tClient: mgr.GetClient(),\n\t\t\t// TODO: refactor\n\t\t\tsources: []caDataSource{\n\t\t\t\tsds,\n\t\t\t\tcds,\n\t\t\t\tkds,\n\t\t\t},\n\t\t}\n\n\t\t// Index injectable with a new field. If the injectable's CA is\n\t\t// to be sourced from a Secret, the field's value will be the\n\t\t// namespaced name of the Secret.\n\t\t// This field can then be used as a field selector when listing injectables of this type.\n\t\tsecretTyp := setup.injector.NewTarget().AsObject()\n\t\tif err := mgr.GetFieldIndexer().IndexField(ctx, secretTyp, injectFromSecretPath, injectableCAFromSecretIndexer); err != nil {\n\t\t\terr := fmt.Errorf(\"error making injectable indexable by inject-ca-from-secret annotation: %w\", err)\n\t\t\treturn err\n\t\t}\n\t\tpredicates := predicate.Funcs{\n\t\t\tUpdateFunc: func(e event.UpdateEvent) bool {\n\t\t\t\treturn hasInjectableAnnotation(e.ObjectNew)\n\t\t\t},\n\t\t\tCreateFunc: func(e event.CreateEvent) bool {\n\t\t\t\treturn hasInjectableAnnotation(e.Object)\n\t\t\t},\n\t\t\tDeleteFunc: func(e event.DeleteEvent) bool {\n\t\t\t\treturn hasInjectableAnnotation(e.Object)\n\t\t\t},\n\t\t}\n\n\t\tb := ctrl.NewControllerManagedBy(mgr).\n\t\t\tFor(setup.objType,\n\t\t\t\t// We watch all CRDs,\n\t\t\t\t// Validating/MutatingWebhookConfigurations,\n\t\t\t\t// APIServices because the only way how to tell\n\t\t\t\t// if an object is an injectable is from\n\t\t\t\t// annotation value and this cannot be used to\n\t\t\t\t// filter List/Watch. The earliest point where\n\t\t\t\t// we can use the annotation to filter\n\t\t\t\t// injectables is here where we define which\n\t\t\t\t// objects' events should trigger a reconcile.\n\t\t\t\tbuilder.WithPredicates(predicates)).\n\t\t\tWatches(&source.Kind{Type: new(corev1.Secret)}, handler.EnqueueRequestsFromMapFunc((&secretForInjectableMapper{\n\t\t\t\tClient: mgr.GetClient(),\n\t\t\t\tlog: log,\n\t\t\t\tsecretToInjectable: buildSecretToInjectableFunc(setup.listType, setup.resourceName),\n\t\t\t}).Map))\n\t\tif watchCerts {\n\t\t\t// Index injectable with a new field. If the injectable's CA is\n\t\t\t// to be sourced from a Certificate's Secret, the field's value will be the\n\t\t\t// namespaced name of the Certificate.\n\t\t\t// This field can then be used as a field selector when listing injectables of this type.\n\t\t\tcertTyp := setup.injector.NewTarget().AsObject()\n\t\t\tif err := mgr.GetFieldIndexer().IndexField(ctx, certTyp, injectFromPath, injectableCAFromIndexer); err != nil {\n\t\t\t\terr := fmt.Errorf(\"error making injectable indexable by inject-ca-from path: %w\", err)\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tb.Watches(&source.Kind{Type: new(corev1.Secret)}, handler.EnqueueRequestsFromMapFunc((&secretForCertificateMapper{\n\t\t\t\tClient: mgr.GetClient(),\n\t\t\t\tlog: log,\n\t\t\t\tcertificateToInjectable: buildCertToInjectableFunc(setup.listType, setup.resourceName),\n\t\t\t}).Map)).\n\t\t\t\tWatches(&source.Kind{Type: new(cmapi.Certificate)},\n\t\t\t\t\thandler.EnqueueRequestsFromMapFunc((&certMapper{\n\t\t\t\t\t\tClient: mgr.GetClient(),\n\t\t\t\t\t\tlog: log,\n\t\t\t\t\t\ttoInjectable: buildCertToInjectableFunc(setup.listType, setup.resourceName),\n\t\t\t\t\t}).Map))\n\t\t}\n\t\terr := b.Complete(r)\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"error registering controller for %s: %w\", setup.objType.GetName(), err)\n\t\t}\n\t}\n\treturn nil\n}", "func (cli *CLI) Inject(key string, value any) *CLI {\n\tif cli.injectionData == nil {\n\t\tcli.injectionData = map[string]any{}\n\t}\n\tcli.injectionData[key] = value\n\treturn cli\n}", "func (i Injector) Run(a run.RunArguments) error {\n\tfor _, r := range i.Runnables {\n\t\tgo r.Run(a)\n\t}\n\tfor _, r := range i.RunFns {\n\t\tgo r(a)\n\t}\n\treturn nil\n}", "func InjectHandlers(sc datatype.ServiceContainer, rg *gin.RouterGroup) {\n\tauthenticator := routermiddleware.SessionMustAuth()\n\trg.POST(\"/load-repo-details\", authenticator, LoadRepoDetails(sc))\n}", "func Build(config map[string]interface{}) {\n}", "func (s *SidecarInjectField) Inject(pod *corev1.Pod) {\n\tlog.Info(fmt.Sprintf(\"inject pod : %s\", pod.GenerateName))\n\t// add initcontrainers to spec\n\tif pod.Spec.InitContainers != nil {\n\t\tpod.Spec.InitContainers = append(pod.Spec.InitContainers, s.Initcontainer)\n\t} else {\n\t\tpod.Spec.InitContainers = []corev1.Container{s.Initcontainer}\n\t}\n\n\t// add volume to spec\n\tif pod.Spec.Volumes == nil {\n\t\tpod.Spec.Volumes = []corev1.Volume{}\n\t}\n\tpod.Spec.Volumes = append(pod.Spec.Volumes, s.SidecarVolume)\n\n\t// choose a specific container to inject\n\ttargetContainers := s.findInjectContainer(pod.Spec.Containers)\n\n\t// add volumemount and env to container\n\tfor i := range targetContainers {\n\t\tlog.Info(fmt.Sprintf(\"inject container : %s\", targetContainers[i].Name))\n\t\tif (*targetContainers[i]).VolumeMounts == nil {\n\t\t\t(*targetContainers[i]).VolumeMounts = []corev1.VolumeMount{}\n\t\t}\n\n\t\t(*targetContainers[i]).VolumeMounts = append((*targetContainers[i]).VolumeMounts, s.SidecarVolumeMount)\n\n\t\tif (*targetContainers[i]).Env != nil {\n\t\t\t(*targetContainers[i]).Env = append((*targetContainers[i]).Env, s.Env)\n\t\t} else {\n\t\t\t(*targetContainers[i]).Env = []corev1.EnvVar{s.Env}\n\t\t}\n\n\t\t// envs to be append\n\t\tvar envsTBA []corev1.EnvVar\n\t\tfor j, envInject := range s.Envs {\n\t\t\tisExists := false\n\t\t\tfor _, envExists := range targetContainers[i].Env {\n\t\t\t\tif strings.EqualFold(envExists.Name, envInject.Name) {\n\t\t\t\t\tisExists = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !isExists {\n\t\t\t\tenvsTBA = append(envsTBA, s.Envs[j])\n\t\t\t}\n\t\t}\n\t\tif len(s.Envs) > 0 {\n\t\t\t(*targetContainers[i]).Env = append((*targetContainers[i]).Env, envsTBA...)\n\t\t}\n\t}\n}", "func (h *Handler) sidecarInjection(del bool, version, ns string) error {\n\texe, err := h.getExecutable(version)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tinjectCmd := \"add\"\n\tif del {\n\t\tinjectCmd = \"remove\"\n\t}\n\n\tcmd := &exec.Cmd{\n\t\tPath: exe,\n\t\tArgs: []string{\n\t\t\texe,\n\t\t\t\"namespace\",\n\t\t\tinjectCmd,\n\t\t\tns,\n\t\t},\n\t\tStdout: os.Stdout,\n\t\tStderr: os.Stdout,\n\t}\n\n\tif err := cmd.Run(); err != nil {\n\t\treturn ErrRunExecutable(err)\n\t}\n\n\treturn nil\n}", "func Build() {\n\tLogger = zap.New(zapcore.NewTee(corelist...))\n}", "func (di *DependencyInjector) Inject(r *Router) {\n\t// Repositories\n\tajaxRepository := new(repositories.AjaxRepository)\n\n\t// Services\n\tajaxService := new(services.AjaxService)\n\tajaxService.AjaxRepository = ajaxRepository\n\n\t// Controllers\n\tajaxController := new(controllers.AjaxController)\n\tajaxController.AjaxService = ajaxService\n\tindexController := new(controllers.IndexController)\n\n\t// Routes\n\tr.AjaxController = ajaxController\n\tr.IndexController = indexController\n}", "func Inject(c *gin.Context) {\n\tc.Set(\"cfg\", Config)\n\tc.Next()\n}", "func (w ErrorTraceProvider) Build(something map[string]interface{}) (p Provider, xerr fail.Error) {\n\tdefer func(prefix string) {\n\t\tif xerr != nil {\n\t\t\tlogrus.Debugf(\"%s : Intercepted error: %v\", prefix, xerr)\n\t\t}\n\t}(fmt.Sprintf(\"%s:Build\", w.Name))\n\treturn w.InnerProvider.Build(something)\n}", "func Build() {\n\tonce.Do(func() {\n\t\tdb, client := connect()\n\t\tinstance = MongoConnection{\n\t\t\tDb: db,\n\t\t\tClient: client,\n\t\t}\n\t})\n}", "func Inject(container mediator.Container, db *gorm.DB) {\n\tcontainer.Inject(\"db\", db)\n\tcontainer.Inject(\"repository\", repository.NewRepository(db))\n}", "func init() {\n\tcodegen.RegisterPluginFirst(\"zaplogger\", \"gen\", nil, Generate)\n\tcodegen.RegisterPluginLast(\"zaplogger-updater\", \"example\", nil, UpdateExample)\n}", "func init() {\n\tCommands = make(map[string][]string)\n\tcommands := [][]string{\n\t\t{\"module\", \"--stdout=true\", \".examples/modules/sockshop\"},\n\t\t{\"module\", \"-s\", \".examples/modules/sockshop\", \"-p\", \"image_registry=gcr.io/sockshop\"},\n\t\t{\"module\", \"-s\", \".examples/modules/sockshop\", \"-p\", \"image_registry=quay.io/sockshop\"},\n\t\t{\"module\", \"-s\", \".examples/modules/weavecloud\", \"-p\", \"service_token=abc123\"},\n\t\t{\"bundle\", \"--stdout\", \".examples/sockshop.yml\"},\n\t\t{\"bundle\", \"--stdout\", \".examples/weavecloud.yml\"},\n\t\t{\"bundle\", \"--stdout\", \".examples/weavecloud.yml\", \".examples/sockshop.yml\"},\n\t\t{\"module\", \"--output=json\", \"--stdout=true\", \".examples/modules/sockshop\"},\n\t\t{\"module\", \"--output=json\", \"-s\", \".examples/modules/sockshop\", \"-p\", \"image_registry=gcr.io/sockshop\"},\n\t\t{\"module\", \"--output=json\", \"-s\", \".examples/modules/sockshop\", \"-p\", \"image_registry=quay.io/sockshop\"},\n\t\t{\"module\", \"--output=json\", \"-s\", \".examples/modules/weavecloud\", \"-p\", \"service_token=abc123\"},\n\t\t{\"bundle\", \"--output=json\", \"--stdout\", \".examples/sockshop.yml\"},\n\t\t{\"bundle\", \"--output=json\", \"--stdout\", \".examples/weavecloud.yml\"},\n\t\t{\"bundle\", \"--output=json\", \"--stdout\", \".examples/weavecloud.yml\", \".examples/sockshop.yml\"},\n\t}\n\n\tfor _, command := range commands {\n\t\thash := sha1.New()\n\t\thash.Write([]byte(strings.Join(command, \" \")))\n\t\tCommands[fmt.Sprintf(\".generated/%x\", hash.Sum(nil))] = command\n\t}\n}", "func BuildMockInjector() (*MockInjector, func(), error) {\n\twire.Build(\n\t\tgorm.InitGormDB,\n\t\tmodel.ModelSet,\n\t\tservice.ServiceSet,\n\t\tMockInjectorSet,\n\t)\n\treturn new(MockInjector), nil, nil\n}", "func (cs *Supervisor) InjectDependencies(l echo.Logger, s *stores.Stores, ws *socket.Server) {\n\tcs.logger = l\n\tcs.stores = s\n\tcs.ws = ws\n}", "func (n *DUTTestNet) NewInjector(t *testing.T) (Injector, error) {\n\tt.Helper()\n\n\tifInfo, err := net.InterfaceByName(n.LocalDevName)\n\tif err != nil {\n\t\treturn Injector{}, err\n\t}\n\n\tvar haddr [8]byte\n\tcopy(haddr[:], ifInfo.HardwareAddr)\n\tsa := unix.SockaddrLinklayer{\n\t\tProtocol: htons(unix.ETH_P_IP),\n\t\tIfindex: ifInfo.Index,\n\t\tHalen: uint8(len(ifInfo.HardwareAddr)),\n\t\tAddr: haddr,\n\t}\n\n\tinjectFd, err := unix.Socket(unix.AF_PACKET, unix.SOCK_RAW, int(htons(unix.ETH_P_ALL)))\n\tif err != nil {\n\t\treturn Injector{}, err\n\t}\n\tif err := unix.Bind(injectFd, &sa); err != nil {\n\t\treturn Injector{}, err\n\t}\n\treturn Injector{\n\t\tfd: injectFd,\n\t}, nil\n}", "func InjectCommandFactory() cli.CommandFactory {\n\twire.Build(\n\t\twire.Bind(new(cli.CommandFactory), new(cli.CobraFactory)),\n\t\tcli.NewCobraFactory,\n\t)\n\treturn cli.CobraFactory{}\n}", "func Inject(l *logrus.Entry) {\n\tgrpclog.SetLogger(New(l))\n}", "func init() {\n\tp, err := New()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tproviders.Register(p)\n}", "func Inject(ctx context.Context, md *metadata.MD, opts ...Option) {\n\tc := newConfig(opts)\n\tc.Propagators.Inject(ctx, &metadataSupplier{\n\t\tmetadata: md,\n\t})\n}", "func (f *FormDataEncoderFactoryImpl) Inject(\n\tnamedFormDataEncoders map[string]domain.FormDataEncoder,\n\tnamedFormServices map[string]domain.FormService,\n\tdefaultFormDataEncoder domain.DefaultFormDataEncoder,\n\n) {\n\tf.namedFormServices = namedFormServices\n\tf.namedFormDataEncoders = namedFormDataEncoders\n\tf.defaultFormDataEncoder = defaultFormDataEncoder\n}", "func CreateInjectArgs(config *rest.Config) InjectArgs {\n return InjectArgs{\n InjectArgs: args.CreateInjectArgs(config),\n Clientset: clientset.NewForConfigOrDie(config),\n }\n}", "func (c *Client) Build(params map[string]interface{}) (api.ClientAPI, error) {\n\tUsername, _ := params[\"Username\"].(string)\n\tPassword, _ := params[\"Password\"].(string)\n\tTenantName, _ := params[\"TenantName\"].(string)\n\tRegion, _ := params[\"Region\"].(string)\n\treturn AuthenticatedClient(AuthOptions{\n\t\tUsername: Username,\n\t\tPassword: Password,\n\t\tTenantName: TenantName,\n\t\tRegion: Region,\n\t})\n}", "func (container *ContainerWithInjector) GetInjector() *injector.Injector {\n\tif container.injector == nil {\n\t\tcontainer.injector = injector.NewInjector()\n\t\tcontainer.injector.RegisterValue(container.GetRequest())\n\t\tcontainer.injector.RegisterValue(container.GetResponseWriter())\n\t}\n\treturn container.injector\n}", "func (p *provider) call(ctn *container) (reflect.Value, error) {\n\tinjectParam, err := p.inject(ctn)\n\tif err != nil {\n\t\treturn reflect.Value{}, err\n\t}\n\tconstructorResults := reflect.ValueOf(p.constructor).Call(injectParam)\n\tresult := constructorResults[0]\n\terr, ok := constructorResults[1].Interface().(error)\n\tif !ok {\n\t\terr = nil\n\t}\n\tif err != nil {\n\t\tcomp, _ := internal.GetOutputFieldType(p.constructor, 0)\n\t\treturn result, errors.Wrapf(err, \"initialize %s failed\", comp.String())\n\t}\n\treturn result, nil\n}", "func initRepos(index bleve.Index) {\n\t// The repository layer compiled is determined by build flags\n\tplugin.Repo = plugin.NewRepository(index)\n}", "func Connect(tag string, reference interface{}) (Injector, error) {\n\tinjector := injector{tag, reflect.Indirect(reflect.ValueOf(reference))}\n\treturn injector, injector.Inject(getFields(reference)...)\n}", "func doBind(sc *Collection, originalInvokeF *provider, originalInitF *provider, real bool) error {\n\t// Split up the collection into LITERAL, STATIC, RUN, and FINAL groups. Add\n\t// init and invoke as faked providers. Flatten into one ordered list.\n\tvar invokeIndex int\n\tvar invokeF *provider\n\tvar initF *provider\n\tvar debuggingProvider **provider\n\tfuncs := make([]*provider, 0, len(sc.contents)+3)\n\t{\n\t\tvar err error\n\t\tinvokeF, err = characterizeInitInvoke(originalInvokeF, charContext{inputsAreStatic: false})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif invokeF.flows == nil {\n\t\t\treturn fmt.Errorf(\"internal error #4: no flows for invoke\")\n\t\t}\n\t\tnonStaticTypes := make(map[typeCode]bool)\n\t\tfor _, tc := range invokeF.flows[outputParams] {\n\t\t\tnonStaticTypes[tc] = true\n\t\t}\n\n\t\tbeforeInvoke, afterInvoke, err := sc.characterizeAndFlatten(nonStaticTypes)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Add debugging provider\n\t\t{\n\t\t\td := newProvider(func() *Debugging { return nil }, -1, \"Debugging\")\n\t\t\td.cacheable = true\n\t\t\td.mustCache = true\n\t\t\td, err = characterizeFunc(d, charContext{inputsAreStatic: true})\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"internal error #29: problem with debugging injectors: %s\", err)\n\t\t\t}\n\t\t\td.isSynthetic = true\n\t\t\tdebuggingProvider = &d\n\t\t\tfuncs = append(funcs, d)\n\t\t}\n\n\t\t// Add init\n\t\tif originalInitF != nil {\n\t\t\tinitF, err = characterizeInitInvoke(originalInitF, charContext{inputsAreStatic: true})\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif initF.flows == nil {\n\t\t\t\treturn fmt.Errorf(\"internal error #5: no flows for initF\")\n\t\t\t}\n\t\t\tfuncs = append(funcs, initF)\n\t\t}\n\n\t\tfuncs = append(funcs, beforeInvoke...)\n\t\tinvokeIndex = len(funcs)\n\t\tfuncs = append(funcs, invokeF)\n\t\tfuncs = append(funcs, afterInvoke...)\n\n\t\tfor i, fm := range funcs {\n\t\t\tfm.chainPosition = i\n\t\t\tif fm.required {\n\t\t\t\tfm.include = true\n\t\t\t}\n\t\t}\n\t}\n\n\t// Figure out which providers must be included in the final chain. To do this,\n\t// first we figure out where each provider will get its inputs from when going\n\t// down the chain and where its inputs can be consumed when going up the chain.\n\t// Each of these linkages will be recorded as a dependency. Any dependency that\n\t// cannot be met will result in that provider being marked as impossible to\n\t// include.\n\t//\n\t// After all the dependencies are mapped, then we mark which providers will be\n\t// included in the final chain.\n\t//\n\t// The parameter list for the init function is complicated: both the inputs\n\t// and outputs are associated with downVmap, but they happen at different times:\n\t// some of the bookkeeping related to init happens in sequence with its position\n\t// in the function list, and some of it happens just before handling the invoke\n\t// function.\n\t//\n\t//\n\t// When that is finished, we can compute the upVmap and the downVmap.\n\n\t// Compute dependencies: set fm.downRmap, fm.upRmap, fm.cannotInclude,\n\t// fm.whyIncluded, fm.include\n\terr := computeDependenciesAndInclusion(funcs, initF)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Build the lists of parameters that are included in the value collections.\n\t// These are maps from types to position in the value collection.\n\t//\n\t// Also: calculate bypass zero for static chain. If there is a fallible injector\n\t// in the static chain, then part of the static chain my not run. Fallible\n\t// injectors need to know know which types need to be zeroed if the remaining\n\t// static injectors are skipped.\n\t//\n\t// Also: calculate the skipped-inner() zero for the run chain. If a wrapper\n\t// does not call the remainder of the chain, then the values returned by the remainder\n\t// of the chain must be zero'ed.\n\tdownVmap := make(map[typeCode]int)\n\tdownCount := 0\n\tupVmap := make(map[typeCode]int)\n\tupCount := 0\n\tfor _, fm := range funcs {\n\t\tif !fm.include {\n\t\t\tcontinue\n\t\t}\n\t\tfor _, flow := range fm.flows {\n\t\t\tfor _, tc := range flow {\n\t\t\t\tupVmap[tc] = -1\n\t\t\t\tdownVmap[tc] = -1\n\t\t\t}\n\t\t}\n\t}\n\t// calculate for the static set\n\tfor i := invokeIndex - 1; i >= 0; i-- {\n\t\tfm := funcs[i]\n\t\tfm.mustZeroIfRemainderSkipped = vmapMapped(downVmap)\n\t\taddToVmap(fm, outputParams, downVmap, fm.downRmap, &downCount)\n\t}\n\tif initF != nil {\n\t\tfor _, tc := range initF.flows[bypassParams] {\n\t\t\tif rm, found := initF.downRmap[tc]; found {\n\t\t\t\ttc = rm\n\t\t\t}\n\t\t\tif downVmap[tc] == -1 {\n\t\t\t\treturn fmt.Errorf(\"Type required by init func, %s, not provided by any static group injectors\", tc)\n\t\t\t}\n\t\t}\n\t}\n\t// calculate for the run set\n\tfor i := len(funcs) - 1; i >= invokeIndex; i-- {\n\t\tfm := funcs[i]\n\t\tfm.downVmapCount = downCount\n\t\taddToVmap(fm, inputParams, downVmap, fm.downRmap, &downCount)\n\t\tfm.upVmapCount = upCount\n\t\taddToVmap(fm, returnParams, upVmap, fm.upRmap, &upCount)\n\t\tfm.mustZeroIfInnerNotCalled = vmapMapped(upVmap)\n\t}\n\n\t// Fill in debugging (if used)\n\tif (*debuggingProvider).include {\n\t\t(*debuggingProvider).fn = func() *Debugging {\n\t\t\tincluded := make([]string, 0, len(funcs)+3)\n\t\t\tfor _, fm := range funcs {\n\t\t\t\tif fm.include {\n\t\t\t\t\tincluded = append(included, fmt.Sprintf(\"%s %s\", fm.group, fm))\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tnamesIncluded := make([]string, 0, len(funcs)+3)\n\t\t\tfor _, fm := range funcs {\n\t\t\t\tif fm.include {\n\t\t\t\t\tif fm.index >= 0 {\n\t\t\t\t\t\tnamesIncluded = append(namesIncluded, fmt.Sprintf(\"%s(%d)\", fm.origin, fm.index))\n\t\t\t\t\t} else {\n\t\t\t\t\t\tnamesIncluded = append(namesIncluded, fm.origin)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tincludeExclude := make([]string, 0, len(funcs)+3)\n\t\t\tfor _, fm := range funcs {\n\t\t\t\tif fm.include {\n\t\t\t\t\tincludeExclude = append(includeExclude, fmt.Sprintf(\"INCLUDED: %s %s BECAUSE %s\", fm.group, fm, fm.whyIncluded))\n\t\t\t\t} else {\n\t\t\t\t\tincludeExclude = append(includeExclude, fmt.Sprintf(\"EXCLUDED: %s %s BECAUSE %s\", fm.group, fm, fm.cannotInclude))\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tvar trace string\n\t\t\tif debugEnabled() {\n\t\t\t\ttrace = \"debugging already in progress\"\n\t\t\t} else {\n\t\t\t\ttrace = captureDoBindDebugging(sc, originalInvokeF, originalInitF)\n\t\t\t}\n\n\t\t\treproduce := generateReproduce(funcs, invokeF, initF)\n\n\t\t\treturn &Debugging{\n\t\t\t\tIncluded: included,\n\t\t\t\tNamesIncluded: namesIncluded,\n\t\t\t\tIncludeExclude: includeExclude,\n\t\t\t\tTrace: trace,\n\t\t\t\tReproduce: reproduce,\n\t\t\t}\n\t\t}\n\t}\n\tif debugEnabled() {\n\t\tfor _, fm := range funcs {\n\t\t\tdumpF(\"funclist\", fm)\n\t\t}\n\t}\n\n\t// Generate wrappers and split the handlers into groups (static, middleware, final)\n\tcollections := make(map[groupType][]*provider)\n\tfor _, fm := range funcs {\n\t\tif !fm.include {\n\t\t\tcontinue\n\t\t}\n\t\terr := generateWrappers(fm, downVmap, upVmap, upCount)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tcollections[fm.group] = append(collections[fm.group], fm)\n\t}\n\tif len(collections[finalGroup]) != 1 {\n\t\treturn fmt.Errorf(\"internal error #1: no final func provided\")\n\t}\n\n\t// Over the course of the following loop, f will be redefined\n\t// over and over so that at the end of the loop it will be a\n\t// function that executes the entire RUN chain.\n\tf := collections[finalGroup][0].wrapEndpoint\n\tfor i := len(collections[runGroup]) - 1; i >= 0; i-- {\n\t\tn := collections[runGroup][i]\n\n\t\tswitch n.class {\n\t\tcase wrapperFunc:\n\t\t\tinner := f\n\t\t\tw := n.wrapWrapper\n\t\t\tf = func(v valueCollection) valueCollection {\n\t\t\t\treturn w(v, inner)\n\t\t\t}\n\t\tcase injectorFunc, fallibleInjectorFunc:\n\t\t\tj := i - 1\n\t\tInjectors:\n\t\t\tfor j >= 0 {\n\t\t\t\tswitch collections[runGroup][j].class {\n\t\t\t\tdefault:\n\t\t\t\t\tbreak Injectors\n\t\t\t\tcase injectorFunc, fallibleInjectorFunc: //okay\n\t\t\t\t}\n\t\t\t\tj--\n\t\t\t}\n\t\t\tj++\n\t\t\tnext := f\n\t\t\tinjectors := make([]func(valueCollection) (bool, valueCollection), 0, i-j+1)\n\t\t\tfor k := j; k <= i; k++ {\n\t\t\t\tinjectors = append(injectors, collections[runGroup][k].wrapFallibleInjector)\n\t\t\t}\n\t\t\tf = func(v valueCollection) valueCollection {\n\t\t\t\tfor _, injector := range injectors {\n\t\t\t\t\terrored, upV := injector(v)\n\t\t\t\t\tif errored {\n\t\t\t\t\t\treturn upV\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treturn next(v)\n\t\t\t}\n\t\t\ti = j\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"internal error #2: should not be here: %s\", n.class)\n\t\t}\n\t}\n\n\t// Initialize the value collection. When invoke is called the baseValues\n\t// collection will be copied.\n\tbaseValues := make(valueCollection, downCount)\n\tfor _, lit := range collections[literalGroup] {\n\t\ti := downVmap[lit.flows[outputParams][0]]\n\t\tif i >= 0 {\n\t\t\tbaseValues[i] = reflect.ValueOf(lit.fn)\n\t\t}\n\t}\n\n\t// Generate static chain function\n\trunStaticChain := func() error {\n\t\tdebugf(\"STATIC CHAIN LENGTH: %d\", len(collections[staticGroup]))\n\t\tfor _, inj := range collections[staticGroup] {\n\t\t\tdebugf(\"STATIC CHAIN CALLING %s\", inj)\n\n\t\t\terr := inj.wrapStaticInjector(baseValues)\n\t\t\tif err != nil {\n\t\t\t\tdebugf(\"STATIC CHAIN RETURNING EARLY DUE TO ERROR %s\", err)\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}\n\tfor _, inj := range collections[staticGroup] {\n\t\tif inj.wrapStaticInjector == nil {\n\t\t\treturn inj.errorf(\"internal error #3: missing static injector wrapping\")\n\t\t}\n\t}\n\n\t// Generate and bind init func.\n\tinitFunc := func() {}\n\tvar initOnce sync.Once\n\tif initF != nil {\n\t\toutMap, err := generateOutputMapper(initF, 0, outputParams, downVmap, \"init inputs\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tinMap, err := generateInputMapper(initF, 0, bypassParams, initF.bypassRmap, downVmap, \"init results\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tdebugln(\"SET INIT FUNC\")\n\t\tif real {\n\t\t\treflect.ValueOf(initF.fn).Elem().Set(\n\t\t\t\treflect.MakeFunc(reflect.ValueOf(initF.fn).Type().Elem(),\n\t\t\t\t\tfunc(inputs []reflect.Value) []reflect.Value {\n\t\t\t\t\t\tdebugln(\"INSIDE INIT\")\n\t\t\t\t\t\t// if initDone panic, return error, or ignore?\n\t\t\t\t\t\tinitOnce.Do(func() {\n\t\t\t\t\t\t\toutMap(baseValues, inputs)\n\t\t\t\t\t\t\tdebugln(\"RUN STATIC CHAIN\")\n\t\t\t\t\t\t\t_ = runStaticChain()\n\t\t\t\t\t\t})\n\t\t\t\t\t\tdumpValueArray(baseValues, \"base values before init return\", downVmap)\n\t\t\t\t\t\tout := inMap(baseValues)\n\t\t\t\t\t\tdebugln(\"DONE INIT\")\n\t\t\t\t\t\tdumpValueArray(out, \"init return\", nil)\n\t\t\t\t\t\tdumpF(\"init\", initF)\n\n\t\t\t\t\t\treturn out\n\t\t\t\t\t}))\n\t\t}\n\t\tdebugln(\"SET INIT FUNC - DONE\")\n\n\t} else {\n\t\tinitFunc = func() {\n\t\t\tinitOnce.Do(func() {\n\t\t\t\t_ = runStaticChain()\n\t\t\t})\n\t\t}\n\t}\n\n\t// Generate and bind invoke func\n\t{\n\t\toutMap, err := generateOutputMapper(invokeF, 0, outputParams, downVmap, \"invoke inputs\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tinMap, err := generateInputMapper(invokeF, 0, returnedParams, invokeF.upRmap, upVmap, \"invoke results\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tdebugln(\"SET INVOKE FUNC\")\n\t\tif real {\n\t\t\treflect.ValueOf(invokeF.fn).Elem().Set(\n\t\t\t\treflect.MakeFunc(reflect.ValueOf(invokeF.fn).Type().Elem(),\n\t\t\t\t\tfunc(inputs []reflect.Value) []reflect.Value {\n\t\t\t\t\t\tinitFunc()\n\t\t\t\t\t\tvalues := baseValues.Copy()\n\t\t\t\t\t\tdumpValueArray(values, \"invoke - before input copy\", downVmap)\n\t\t\t\t\t\toutMap(values, inputs)\n\t\t\t\t\t\tdumpValueArray(values, \"invoke - after input copy\", downVmap)\n\t\t\t\t\t\tret := f(values)\n\t\t\t\t\t\treturn inMap(ret)\n\t\t\t\t\t}))\n\t\t}\n\t\tdebugln(\"SET INVOKE FUNC - DONE\")\n\t}\n\n\treturn nil\n}", "func InjectEnv() env.Env {\n\twire.Build(\n\t\twire.Bind(new(env.Env), new(env.GoDotEnv)),\n\t\tenv.NewGoDotEnv,\n\t)\n\treturn env.GoDotEnv{}\n}", "func main() {\r\n\t// Ensure that all loggers are closed before exiting.\r\n\t// This is especially important to send out all logs\r\n\t// to loki, if enabled.\r\n\tdefer log.Close()\r\n\tdefer func() {\r\n\t\t// Recover panics so that they can be logged to all\r\n\t\t// registered writers correctly.\r\n\r\n\t\terr := recover()\r\n\t\tif err == nil {\r\n\t\t\treturn\r\n\t\t}\r\n\r\n\t\tentry := log.Panic()\r\n\t\t_, file, line, ok := runtime.Caller(2)\r\n\t\tif ok {\r\n\t\t\tentry.Fields(\"file\", file, \"line\", line)\r\n\t\t}\r\n\t\tentry.Msgf(\"panic: %v\", err)\r\n\t}()\r\n\r\n\t// Parse command line flags\r\n\tflagConfig, _ := argp.String(\"-c\", \"config.yml\", \"Optional config file location.\")\r\n\tflagDevMode, _ := argp.Bool(\"-devmode\", false, \"Enable development mode.\")\r\n\tflagProfile, _ := argp.String(\"-cpuprofile\", \"\", \"CPU profile output location.\")\r\n\tflagQuiet, _ := argp.Bool(\"-quiet\", false, \"Hide startup message.\")\r\n\t_, _ = argp.Bool(\"-docker\", false, \"Docker mode (deprecated)\")\r\n\tflagVersion, _ := argp.Bool(\"-v\", false, \"Show version information\")\r\n\r\n\tif flagHelp, _ := argp.Bool(\"-h\", false, \"Display help.\"); flagHelp {\r\n\t\tfmt.Println(\"Usage:\\n\" + argp.Help())\r\n\t\treturn\r\n\t}\r\n\r\n\tif flagVersion {\r\n\t\tfmt.Printf(\r\n\t\t\t\"shinpuru v%s\\n\"+\r\n\t\t\t\t\"Release: %t\\n\"+\r\n\t\t\t\t\"Commit: %s\\n\"+\r\n\t\t\t\t\"Build Date: %s\\n\",\r\n\t\t\tembedded.AppVersion,\r\n\t\t\tembedded.IsRelease(),\r\n\t\t\tembedded.AppCommit,\r\n\t\t\tembedded.AppDate)\r\n\t\treturn\r\n\t}\r\n\r\n\tif !flagQuiet {\r\n\t\tstartupmsg.Output(os.Stdout)\r\n\t}\r\n\r\n\t// Initialize dependency injection builder\r\n\tdiBuilder, _ := di.NewBuilder()\r\n\r\n\t// Initialize time provider\r\n\tdiBuilder.Add(di.Def{\r\n\t\tName: static.DiTimeProvider,\r\n\t\tBuild: func(ctn di.Container) (interface{}, error) {\r\n\t\t\treturn timeprovider.Time{}, nil\r\n\t\t},\r\n\t})\r\n\r\n\t// Initialize config\r\n\tdiBuilder.Add(di.Def{\r\n\t\tName: static.DiConfig,\r\n\t\tBuild: func(ctn di.Container) (interface{}, error) {\r\n\t\t\treturn config.NewPaerser(argp.Args(), flagConfig), nil\r\n\t\t},\r\n\t})\r\n\r\n\t// Initialize metrics server\r\n\tdiBuilder.Add(di.Def{\r\n\t\tName: static.DiMetrics,\r\n\t\tBuild: func(ctn di.Container) (interface{}, error) {\r\n\t\t\treturn inits.InitMetrics(ctn), nil\r\n\t\t},\r\n\t})\r\n\r\n\t// Initialize redis client\r\n\tdiBuilder.Add(di.Def{\r\n\t\tName: static.DiRedis,\r\n\t\tBuild: func(ctn di.Container) (interface{}, error) {\r\n\t\t\tcfg := ctn.Get(static.DiConfig).(config.Provider)\r\n\t\t\treturn redis.NewClient(&redis.Options{\r\n\t\t\t\tAddr: cfg.Config().Cache.Redis.Addr,\r\n\t\t\t\tPassword: cfg.Config().Cache.Redis.Password,\r\n\t\t\t\tDB: cfg.Config().Cache.Redis.Type,\r\n\t\t\t}), nil\r\n\t\t},\r\n\t})\r\n\r\n\t// Initialize database middleware and shutdown routine\r\n\tdiBuilder.Add(di.Def{\r\n\t\tName: static.DiDatabase,\r\n\t\tBuild: func(ctn di.Container) (interface{}, error) {\r\n\t\t\treturn inits.InitDatabase(ctn), nil\r\n\t\t},\r\n\t\tClose: func(obj interface{}) error {\r\n\t\t\tdatabase := obj.(database.Database)\r\n\t\t\tlog.Info().Msg(\"Shutting down database connection...\")\r\n\t\t\tdatabase.Close()\r\n\t\t\treturn nil\r\n\t\t},\r\n\t})\r\n\r\n\t// Initialize twitch notification listener\r\n\tdiBuilder.Add(di.Def{\r\n\t\tName: static.DiTwitchNotifyListener,\r\n\t\tBuild: func(ctn di.Container) (interface{}, error) {\r\n\t\t\treturn listeners.NewListenerTwitchNotify(ctn), nil\r\n\t\t},\r\n\t\tClose: func(obj interface{}) error {\r\n\t\t\tlistener := obj.(*listeners.ListenerTwitchNotify)\r\n\t\t\tlog.Info().Msg(\"Shutting down twitch notify listener...\")\r\n\t\t\tlistener.TearDown()\r\n\t\t\treturn nil\r\n\t\t},\r\n\t})\r\n\r\n\t// Initialize twitch notification worker\r\n\tdiBuilder.Add(di.Def{\r\n\t\tName: static.DiTwitchNotifyWorker,\r\n\t\tBuild: func(ctn di.Container) (interface{}, error) {\r\n\t\t\treturn inits.InitTwitchNotifyWorker(ctn), nil\r\n\t\t},\r\n\t})\r\n\r\n\t// Initialize life cycle timer\r\n\tdiBuilder.Add(di.Def{\r\n\t\tName: static.DiScheduler,\r\n\t\tBuild: func(ctn di.Container) (interface{}, error) {\r\n\t\t\treturn inits.InitScheduler(ctn), nil\r\n\t\t},\r\n\t})\r\n\r\n\t// Initialize storage middleware\r\n\tdiBuilder.Add(di.Def{\r\n\t\tName: static.DiObjectStorage,\r\n\t\tBuild: func(ctn di.Container) (interface{}, error) {\r\n\t\t\treturn inits.InitStorage(ctn), nil\r\n\t\t},\r\n\t})\r\n\r\n\t// Initialize permissions command handler middleware\r\n\tdiBuilder.Add(di.Def{\r\n\t\tName: static.DiPermissions,\r\n\t\tBuild: func(ctn di.Container) (interface{}, error) {\r\n\t\t\treturn permissions.NewPermissions(ctn), nil\r\n\t\t},\r\n\t})\r\n\r\n\t// Initialize discord bot session and shutdown routine\r\n\tdiBuilder.Add(di.Def{\r\n\t\tName: static.DiDiscordSession,\r\n\t\tBuild: func(ctn di.Container) (interface{}, error) {\r\n\t\t\treturn discordgo.New(\"\")\r\n\t\t},\r\n\t\tClose: func(obj interface{}) error {\r\n\t\t\tsession := obj.(*discordgo.Session)\r\n\t\t\tlog.Info().Msg(\"Shutting down bot session...\")\r\n\t\t\tsession.Close()\r\n\t\t\treturn nil\r\n\t\t},\r\n\t})\r\n\r\n\t// Initialize Discord OAuth Module\r\n\tdiBuilder.Add(di.Def{\r\n\t\tName: static.DiDiscordOAuthModule,\r\n\t\tBuild: func(ctn di.Container) (interface{}, error) {\r\n\t\t\treturn inits.InitDiscordOAuth(ctn), nil\r\n\t\t},\r\n\t})\r\n\r\n\t// Initialize auth refresh token handler\r\n\tdiBuilder.Add(di.Def{\r\n\t\tName: static.DiAuthRefreshTokenHandler,\r\n\t\tBuild: func(ctn di.Container) (interface{}, error) {\r\n\t\t\treturn auth.NewDatabaseRefreshTokenHandler(ctn), nil\r\n\t\t},\r\n\t})\r\n\r\n\t// Initialize auth access token handler\r\n\tdiBuilder.Add(di.Def{\r\n\t\tName: static.DiAuthAccessTokenHandler,\r\n\t\tBuild: func(ctn di.Container) (interface{}, error) {\r\n\t\t\treturn auth.NewJWTAccessTokenHandler(ctn)\r\n\t\t},\r\n\t})\r\n\r\n\t// Initialize auth API token handler\r\n\tdiBuilder.Add(di.Def{\r\n\t\tName: static.DiAuthAPITokenHandler,\r\n\t\tBuild: func(ctn di.Container) (interface{}, error) {\r\n\t\t\treturn auth.NewDatabaseAPITokenHandler(ctn)\r\n\t\t},\r\n\t})\r\n\r\n\t// Initialize OAuth API handler implementation\r\n\tdiBuilder.Add(di.Def{\r\n\t\tName: static.DiOAuthHandler,\r\n\t\tBuild: func(ctn di.Container) (interface{}, error) {\r\n\t\t\treturn auth.NewRefreshTokenRequestHandler(ctn), nil\r\n\t\t},\r\n\t})\r\n\r\n\t// Initialize access token authorization middleware\r\n\tdiBuilder.Add(di.Def{\r\n\t\tName: static.DiAuthMiddleware,\r\n\t\tBuild: func(ctn di.Container) (interface{}, error) {\r\n\t\t\treturn auth.NewAccessTokenMiddleware(ctn), nil\r\n\t\t},\r\n\t})\r\n\r\n\t// Initialize OTA generator\r\n\tdiBuilder.Add(di.Def{\r\n\t\tName: static.DiOneTimeAuth,\r\n\t\tBuild: func(ctn di.Container) (interface{}, error) {\r\n\t\t\treturn onetimeauth.NewJwt(&onetimeauth.JwtOptions{\r\n\t\t\t\tIssuer: \"shinpuru v.\" + embedded.AppVersion,\r\n\t\t\t})\r\n\t\t},\r\n\t})\r\n\r\n\t// Initialize backup handler\r\n\tdiBuilder.Add(di.Def{\r\n\t\tName: static.DiBackupHandler,\r\n\t\tBuild: func(ctn di.Container) (interface{}, error) {\r\n\t\t\treturn backup.New(ctn), nil\r\n\t\t},\r\n\t})\r\n\r\n\t// Initialize command handler\r\n\tdiBuilder.Add(di.Def{\r\n\t\tName: static.DiCommandHandler,\r\n\t\tBuild: func(ctn di.Container) (interface{}, error) {\r\n\t\t\treturn inits.InitCommandHandler(ctn)\r\n\t\t},\r\n\t\tClose: func(obj interface{}) error {\r\n\t\t\tlog.Info().Msg(\"Unegister commands ...\")\r\n\t\t\treturn obj.(*ken.Ken).Unregister()\r\n\t\t},\r\n\t})\r\n\r\n\t// Initialize web server\r\n\tdiBuilder.Add(di.Def{\r\n\t\tName: static.DiWebserver,\r\n\t\tBuild: func(ctn di.Container) (interface{}, error) {\r\n\t\t\treturn inits.InitWebServer(ctn), nil\r\n\t\t},\r\n\t})\r\n\r\n\t// Initialize code execution factroy\r\n\tdiBuilder.Add(di.Def{\r\n\t\tName: static.DiCodeExecFactory,\r\n\t\tBuild: func(ctn di.Container) (interface{}, error) {\r\n\t\t\treturn inits.InitCodeExec(ctn), nil\r\n\t\t},\r\n\t})\r\n\r\n\t// Initialize karma service\r\n\tdiBuilder.Add(di.Def{\r\n\t\tName: static.DiKarma,\r\n\t\tBuild: func(ctn di.Container) (interface{}, error) {\r\n\t\t\treturn karma.NewKarmaService(ctn), nil\r\n\t\t},\r\n\t})\r\n\r\n\t// Initialize report service\r\n\tdiBuilder.Add(di.Def{\r\n\t\tName: static.DiReport,\r\n\t\tBuild: func(ctn di.Container) (interface{}, error) {\r\n\t\t\treturn report.New(ctn)\r\n\t\t},\r\n\t})\r\n\r\n\t// Initialize guild logger\r\n\tdiBuilder.Add(di.Def{\r\n\t\tName: static.DiGuildLog,\r\n\t\tBuild: func(ctn di.Container) (interface{}, error) {\r\n\t\t\treturn guildlog.New(ctn), nil\r\n\t\t},\r\n\t})\r\n\r\n\t// Initialize KV cache\r\n\tdiBuilder.Add(di.Def{\r\n\t\tName: static.DiKVCache,\r\n\t\tBuild: func(ctn di.Container) (interface{}, error) {\r\n\t\t\treturn kvcache.NewTimedmapCache(10 * time.Minute), nil\r\n\t\t},\r\n\t})\r\n\r\n\t// Initialize State\r\n\tdiBuilder.Add(di.Def{\r\n\t\tName: static.DiState,\r\n\t\tBuild: func(ctn di.Container) (interface{}, error) {\r\n\t\t\treturn inits.InitState(ctn)\r\n\t\t},\r\n\t})\r\n\r\n\t// Initialize verification service\r\n\tdiBuilder.Add(di.Def{\r\n\t\tName: static.DiVerification,\r\n\t\tBuild: func(ctn di.Container) (interface{}, error) {\r\n\t\t\treturn verification.New(ctn), nil\r\n\t\t},\r\n\t})\r\n\r\n\tdiBuilder.Add(di.Def{\r\n\t\tName: static.DiBirthday,\r\n\t\tBuild: func(ctn di.Container) (interface{}, error) {\r\n\t\t\treturn birthday.New(ctn), nil\r\n\t\t},\r\n\t})\r\n\r\n\t// Build dependency injection container\r\n\tctn := diBuilder.Build()\r\n\t// Tear down dependency instances\r\n\tdefer ctn.DeleteWithSubContainers()\r\n\r\n\t// Setting log level from config\r\n\tcfg := ctn.Get(static.DiConfig).(config.Provider)\r\n\tif err := cfg.Parse(); err != nil {\r\n\t\tlog.Fatal().Err(err).Msg(\"Failed to parse config\")\r\n\t}\r\n\r\n\tlog.SetLevel(level.Level(cfg.Config().Logging.LogLevel))\r\n\r\n\tif lokiCfg := cfg.Config().Logging.Loki; lokiCfg.Enabled {\r\n\t\tw, err := lokiwriter.NewWriter(lokiCfg.Options)\r\n\t\tif err != nil {\r\n\t\t\tlog.Fatal().Err(err).Msg(\"Loki writer initialization failed\")\r\n\t\t}\r\n\t\tlog.AddWriter(w)\r\n\t\tlog.Info().Field(\"target\", lokiCfg.Address).Msg(\"Loki log writer enabled\")\r\n\t}\r\n\r\n\tif err := config.Validate(cfg); err != nil {\r\n\t\tentry := log.Fatal()\r\n\t\tif validationError, ok := err.(config.ValidationError); ok {\r\n\t\t\tentry = entry.Field(\"key\", validationError.Key())\r\n\t\t}\r\n\t\tentry.Err(err).Msg(\"Invalid config\")\r\n\t}\r\n\r\n\t// Initial log output\r\n\tlog.Info().Msg(\"Starting up...\")\r\n\r\n\tif old, curr, latest := util.CheckForUpdate(); old {\r\n\t\tlog.Warn().\r\n\t\t\tField(\"current\", curr.String()).\r\n\t\t\tField(\"latest\", latest.String()).\r\n\t\t\tMsg(\"Update available\")\r\n\t}\r\n\r\n\tif profLoc := util.GetEnv(envKeyProfile, flagProfile); profLoc != \"\" {\r\n\t\tsetupProfiler(profLoc)\r\n\t}\r\n\r\n\tif flagDevMode {\r\n\t\tsetupDevMode()\r\n\t}\r\n\r\n\tctn.Get(static.DiCommandHandler)\r\n\r\n\t// Initialize discord session and event\r\n\t// handlers\r\n\treleaseShard := inits.InitDiscordBotSession(ctn)\r\n\tdefer releaseShard()\r\n\r\n\t// Get Web WebServer instance to start web\r\n\t// server listener\r\n\tctn.Get(static.DiWebserver)\r\n\t// Get Backup Handler to ensure backup\r\n\t// timer is running.\r\n\tctn.Get(static.DiBackupHandler)\r\n\t// Get Metrics Server to start metrics\r\n\t// endpoint.\r\n\tctn.Get(static.DiMetrics)\r\n\r\n\t// Block main go routine until one of the following\r\n\t// specified exit syscalls occure.\r\n\tlog.Info().Msg(\"Started event loop. Stop with CTRL-C...\")\r\n\r\n\tlog.Info().Field(\"took\", startuptime.Took()).Msg(\"Initialization finished\")\r\n\tsc := make(chan os.Signal, 1)\r\n\tsignal.Notify(sc, syscall.SIGINT, syscall.SIGTERM, os.Interrupt, os.Kill)\r\n\t<-sc\r\n}", "func (m *Module) Build(s *system.System) {\n\tr := s.CommandRouter\n\n\tt, err := system.NewSubCommandRouter(`^config(\\s|$)`, \"config\")\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\tt.Router.Prefix = \"^\"\n\tr.AddSubrouter(t)\n\n\tt.CommandRoute = &system.CommandRoute{\n\t\tName: \"config\",\n\t\tDesc: \"configures guild settings\",\n\t\tHandler: Auth(CmdConfig),\n\t}\n\n\tk := t.Router\n\tk.On(\"prefix\", Auth(CmdPrefix)).Set(\"\", \"sets the guild command prefix\")\n\tk.On(\"admins\", Auth(CmdAdmins)).Set(\"\", \"sets the admin list\")\n}", "func main() {\n\tConfigurationFilesImplPtr = new(ConfigurationFilesImpl);\n\tArgsInit();\n\tStorageBackendImplPtr = new(ElasticsearchStorageBackendImp);\n\tStorageBackendImplPtr.init(ConfigurationFilesImplPtr);\n\tRestApiImplPtr = new(CyberBullyingEntryPointRestApiImpl);\n\tRestApiImplPtr.init(ConfigurationFilesImplPtr, StorageBackendImplPtr);\n\tRestServerImplPtr = new(RestServer);\n\tRestServerImplPtr.init(ConfigurationFilesImplPtr, RestApiImplPtr.GetApi());\n Run(); \n}", "func (i injector) Inject(dest ...interface{}) error {\n\tfor _, d := range dest {\n\t\tif d == nil {\n\t\t\tcontinue\n\t\t}\n\t\tif hasInit, ok := d.(Initializable); ok {\n\t\t\terr := hasInit.Init()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\terr := i.injectSingle(d)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func init() {\n\tcore.RegisterConfigGroup(defaultConfigs)\n\tcore.RegisterServiceWithConfig(\"api\", &api.ApiServiceFactory{}, api.Configs)\n\tcore.RegisterServiceWithConfig(\"collector\", &collector.CollectorServiceFactory{}, collector.Configs)\n}", "func Build(appEnv, appName, appPath string, logger *plugins.ServerLog) {\n\trevel.Init(appEnv, appPath, \"\")\n\n\t// First, clear the generated files (to avoid them messing with ProcessSource).\n\tcleanSource(appName)\n\n\tsourceInfo, compileError := ProcessSource(revel.CodePaths)\n\tif compileError != nil {\n panic(compileError)\n\t}\n\n checkIsHaveArgs := func(ctrl []*TypeInfo) bool {\n result := false\n\n\t L: for _, c := range ctrl {\n\t\t for _, a:= range c.MethodSpecs {\n if len(a.Args) > 0 {\n result = true\n break L\n }\n\t\t\t}\n\t }\n\n return result\n }\n\n ctrlSpecs := sourceInfo.ControllerSpecs()\n\n\t// Generate two source files.\n\ttemplateArgs := map[string]interface{}{\n \"AppName\": appName,\n \"AppPath\": appPath,\n \"AppEnv\": appEnv,\n\t\t\"Controllers\": ctrlSpecs,\n\t\t\"ValidationKeys\": sourceInfo.ValidationKeys,\n\t\t\"ImportPaths\": calcImportAliases(sourceInfo),\n\t\t\"TestSuites\": sourceInfo.TestSuites(),\n \"IsArgInCtrl\": checkIsHaveArgs(ctrlSpecs),\n\t}\n\n\tgenSource(runtimePath, appName + \".go\", MAIN, templateArgs)\n\t// genSource(\"routes\", \"routes.go\", ROUTES, templateArgs)\n\n}", "func (i *importer) inject() error {\n\taccounts, err := i.retrieveAccounts(func(account *Account) error {\n\t\tif isNativeChainAccount(account) {\n\t\t\ti.logger.Info(\"skipping the creation of native account\",\n\t\t\t\tzap.String(\"account\", account.name),\n\t\t\t)\n\t\t\treturn nil\n\t\t}\n\t\ti.createAccount(account)\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to create chain accounts: %w\", err)\n\t}\n\n\tfor _, account := range accounts {\n\t\ti.logger.Debug(\"processing account\", zap.String(\"account\", account.name))\n\n\t\terr := account.setupAccountInfo()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unable to setup account %q: %w\", account.name, err)\n\t\t}\n\n\t\terr = i.createPermissions(account)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unable to create permissions for accounts %q: %w\", account.name, err)\n\t\t}\n\t\tif !account.hasCode {\n\t\t\tcontinue\n\t\t}\n\n\t\terr = i.migrateContract(account)\n\t\tif err != nil {\n\t\t\ti.logger.Error(\"unable to process account\",\n\t\t\t\tzap.String(\"account\", account.name),\n\t\t\t\tzap.Error(err),\n\t\t\t)\n\t\t}\n\t}\n\n\t// cleanup\n\timporterAuthority := i.importerAuthority()\n\tfor _, account := range accounts {\n\t\ti.logger.Debug(\"cleaning up account\", zap.String(\"account\", account.name))\n\t\terr = i.setPermissions(account, &importerAuthority)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unable to create permissions for accounts %q: %w\", account.name, err)\n\t\t}\n\t}\n\n\treturn nil\n}", "func CreateInjectArgs(config *rest.Config) InjectArgs {\n\tcs := kubernetes.NewForConfigOrDie(config)\n\teventBroadcaster := record.NewBroadcaster()\n\teventBroadcaster.StartLogging(glog.Infof)\n\teventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: cs.CoreV1().Events(\"\")})\n\treturn InjectArgs{\n\t\tConfig: config,\n\t\tKubernetesClientSet: cs,\n\t\tKubernetesInformers: informers.NewSharedInformerFactory(cs, 2*time.Minute),\n\t\tControllerManager: &controller.ControllerManager{},\n\t\tEventBroadcaster: eventBroadcaster,\n\t}\n}", "func (c *Client) Build(params map[string]interface{}) (api.ClientAPI, error) {\n\t// tenantName, _ := params[\"name\"].(string)\n\n\tidentity, _ := params[\"identity\"].(map[string]interface{})\n\tcompute, _ := params[\"compute\"].(map[string]interface{})\n\t// network, _ := params[\"network\"].(map[string]interface{})\n\n\tusername, _ := identity[\"Username\"].(string)\n\tpassword, _ := identity[\"Password\"].(string)\n\tdomainName, _ := identity[\"UserDomainName\"].(string)\n\n\tregion, _ := compute[\"Region\"].(string)\n\tprojectName, _ := compute[\"ProjectName\"].(string)\n\tprojectID, _ := compute[\"ProjectID\"].(string)\n\tdefaultImage, _ := compute[\"DefaultImage\"].(string)\n\n\treturn AuthenticatedClient(\n\t\tAuthOptions{\n\t\t\tUsername: username,\n\t\t\tPassword: password,\n\t\t\tRegion: region,\n\t\t\tDomainName: domainName,\n\t\t\tProjectName: projectName,\n\t\t\tProjectID: projectID,\n\t\t},\n\t\topenstack.CfgOptions{\n\t\t\tDefaultImage: defaultImage,\n\t\t},\n\t)\n}", "func (p *provider) inject(ctn *container) ([]reflect.Value, error) {\n\tif p.inTypes == nil {\n\t\tp.inTypes = internal.InputOf(p.constructor)\n\t}\n\ttypesLen := len(p.inTypes)\n\tinParam := make([]reflect.Value, typesLen)\n\tvar err error\n\tfor i, inType := range p.inTypes {\n\t\tv := reflect.New(inType)\n\t\tswitch inType.Kind() {\n\t\tcase reflect.Slice:\n\t\t\terr = ctn.sliceOf(v, p.filters[inType.Elem().String()])\n\t\tcase reflect.Map:\n\t\t\terr = ctn.mapOf(v, p.filters[inType.Elem().String()])\n\t\tcase reflect.Interface:\n\t\t\terr = ctn.instanceOf(v, p.filters[inType.String()])\n\t\tcase reflect.Ptr:\n\t\t\terr = ctn.instanceOf(v, p.filters[inType.Elem().String()])\n\t\tdefault:\n\t\t\terr = InvalidDependencies\n\t\t}\n\t\tif err != nil {\n\t\t\treturn inParam, errors.Wrapf(err, \"inject %s failed\", v.Type().String())\n\t\t}\n\t\tinParam[i] = v.Elem()\n\t}\n\treturn inParam, nil\n}", "func Inject(ctx context.Context, metadata *metadata.MD) {\n\tpropagator.Inject(ctx, &metadataSupplier{\n\t\tmetadata: metadata,\n\t})\n}", "func (m builder) build() (oci.SpecModifier, error) {\n\tif len(m.devices) == 0 && m.cdiSpec == nil {\n\t\treturn nil, nil\n\t}\n\n\tif m.cdiSpec != nil {\n\t\tmodifier := fromCDISpec{\n\t\t\tcdiSpec: &cdi.Spec{Spec: m.cdiSpec},\n\t\t}\n\t\treturn modifier, nil\n\t}\n\n\tregistry, err := cdi.NewCache(\n\t\tcdi.WithAutoRefresh(false),\n\t\tcdi.WithSpecDirs(m.specDirs...),\n\t)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to create CDI registry: %v\", err)\n\t}\n\n\tmodifier := fromRegistry{\n\t\tlogger: m.logger,\n\t\tregistry: registry,\n\t\tdevices: m.devices,\n\t}\n\n\treturn modifier, nil\n}", "func Inject(db *sql.DB) gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\tc.Set(\"db\", db)\n\t\tc.Next()\n\t}\n}", "func NewI(args ...interface{}) Injections {\n\tret := make(Injections)\n\tfor _, v := range args {\n\t\tret[reflect.TypeOf(v)] = v\n\t}\n\treturn ret\n}", "func DockerBuildAll(repository string) {\n\tvar fns []interface{}\n\tfor _, module := range []ModuleName{SCALER, OPERATOR, INTERCEPTOR} {\n\t\tfns = append(fns, mg.F(DockerBuild, repository, (string)(module)))\n\t}\n\tmg.Deps(fns...)\n}", "func DI() proto.RoomServicesServer {\n\tiulidGenerator := util.NewULIDGenerator()\n\tiFactory := room.NewFactory(iulidGenerator)\n\tdb := mysql.ConnectGorm()\n\tiRepository := room2.NewRepositoryImpl(db)\n\tiDomainService := room3.NewDomainService(iRepository)\n\tiInputPort := room4.NewInteractor(iFactory, iRepository, iDomainService)\n\troomIInputPort := room5.NewInteractor(iRepository)\n\tclient := redis.NewClient()\n\tmessageIRepository := message.NewRepositoryImpl(db, client)\n\tmessageIInputPort := message2.NewInteractor(messageIRepository)\n\tmessageIFactory := message3.NewFactory(iulidGenerator)\n\tiInputPort2 := room6.NewInteractor(messageIFactory, messageIRepository, iRepository)\n\troomServicesServer := room7.NewController(iInputPort, roomIInputPort, messageIInputPort, iInputPort2)\n\treturn roomServicesServer\n}", "func Inject(value reflect.Value, name string) (bool, error) {\n\tenvValue, ok := os.LookupEnv(name)\n\tif !ok {\n\t\treturn false, nil\n\t}\n\treturn literal.Injector.Inject(value, envValue)\n}", "func init() {\n\tmakeFunc := func(base func([]reflect.Value) []reflect.Value, fptr interface{}) {\n\t\tfn := reflect.ValueOf(fptr).Elem()\n\t\tv := reflect.MakeFunc(fn.Type(), base)\n\t\tfn.Set(v)\n\t}\n\n\t// getAll(Repository) (int, string)\n\tgetAll := func(in []reflect.Value) []reflect.Value {\n\t\tvalues := in[0].MethodByName(\"GetAll\").Call([]reflect.Value{})\n\t\t// values is []reflect.Value returned by reflect.Call.\n\t\t// Since GetAll only returns interface{}, we just want the first object in values\n\t\tjsonResponse := string(jsonEncode(values[0].Interface()))\n\t\treturn genericHandlerReturn(http.StatusFound, jsonResponse)\n\t}\n\n\tmakeFunc(getAll, &GetAllUnits)\n\n\t/*func AddUnit(rw http.ResponseWriter, u Unit, repo IUnitRepository) (int, string) {\n\t\trepo.Add(&u)\n\t\trw.Header().Set(\"Location\", fmt.Sprintf(\"/unit/%d\", u.Id))\n\t\treturn http.StatusCreated, \"\"\n\t}\n\t// add(http.ResponseWriter, entity, Repository) (int, string)\n\tadd := func(in []reflect.Value) []reflect.Value {\n\t\tin[2].MethodByName(\"Add\").Call([]reflect.Value{in[1]})\n\t\theader := in[0].MethodByName(\"Header\").Call(nil)\n\t\tlocation := reflect.ValueOf(\"Location\")\n\t\tlocationValue := reflect.ValueOf(fmt.Sprintf(\"/unit/%d\", in[1].FieldByName(\"Id\")))\n\t\treflect.ValueOf(header).MethodByName(\"Set\").Call([]reflect.Value{location, locationValue})\n\t\treturn genericHandlerReturn(http.StatusCreated, \"\")\n\t}\n\n\tmakeFunc(add, &AddUnit)\n\t*/\n\n\t// get(martini.Params, Repository) (int, string)\n\tget := func(in []reflect.Value) []reflect.Value {\n\t\tparams := in[0].Interface().(martini.Params)\n\t\tid, err := strconv.Atoi(params[\"id\"])\n\n\t\tif err != nil {\n\t\t\treturn notFoundGeneric()\n\t\t}\n\n\t\tinGet := []reflect.Value{reflect.ValueOf(id)}\n\t\tvalues := in[1].MethodByName(\"Get\").Call(inGet)\n\n\t\tif values[0].IsNil() {\n\t\t\treturn notFoundGeneric()\n\t\t}\n\n\t\tjsonResponse := string(jsonEncode(values[0].Interface()))\n\t\treturn []reflect.Value{reflect.ValueOf(http.StatusOK), reflect.ValueOf(jsonResponse)}\n\t}\n\n\tmakeFunc(get, &GetUnit)\n\n}", "func (b *Builder) Build(ctx context.Context, app *AppContext) error {\n\tif err := buildComponents(ctx, app); err != nil {\n\t\treturn fmt.Errorf(\"error building components: %v\", err)\n\t}\n\treturn nil\n}", "func InjectFirebase(c *gin.Context) {\n\tlog.Printf(\"Loading firebase app\")\n\tc.Keys = map[string]interface{}{}\n\tctx, _ := context.WithTimeout(context.Background(), 5*time.Second)\n\topt := option.WithHTTPClient(http.DefaultClient)\n\topt = option.WithScopes(\"https://www.googleapis.com/auth/cloud-platform\", \"https://www.googleapis.com/auth/userinfo.email\")\n\tdatabaseURL := \"https://project-hermes-staging.firebaseio.com\"\n\tconfig := &firebase.Config{\n\t\tDatabaseURL: databaseURL,\n\t\tProjectID: \"project-hermes-staging\",\n\t}\n\n\tif app, err := firebase.NewApp(ctx, config, opt); err != nil {\n\t\tlog.Fatalf(\"Unable to load firebase app: %s\", err)\n\t\tc.AbortWithError(http.StatusServiceUnavailable, errors.New(\"an error occurred while processing your request\"))\n\t} else {\n\t\tc.Keys[FirebaseApp] = app\n\t\tc.Next()\n\t}\n}", "func Build(\n\tctx context.Context,\n) (*goji.Mux, error) {\n\tif mint.GetHost(ctx) == \"\" {\n\t\tif env.Get(ctx).Environment == env.Production {\n\t\t\treturn nil, errors.Trace(errors.Newf(\n\t\t\t\t\"You must set the `-host` flag to a publicly accessible hostname that other mints can use to contact this mint over HTTPS (placing the mint behind a HAProxy, NGINX or similar for SSL termination in production). If you're just testing and don't have a public domain name pointing to this machine, please run with `-env=qa` and `-host=127.0.0.1`\",\n\t\t\t))\n\t\t}\n\t\treturn nil, errors.Trace(errors.Newf(\n\t\t\t\"You must set the `-host` flag to the hostname that other mints can use to contact this mint over HTTP (since you're running in QA). You can use `-host=127.0.0.1` for testing purposes.\",\n\t\t))\n\t}\n\n\tmux := goji.NewMux()\n\tmux.Use(requestlogger.Middleware)\n\tmux.Use(recoverer.Middleware)\n\tmux.Use(db.Middleware(db.GetDBMap(ctx)))\n\tmux.Use(env.Middleware(env.Get(ctx)))\n\tmux.Use(async.Middleware(async.Get(ctx)))\n\tmux.Use(authentication.Middleware)\n\n\tlogging.Logf(ctx, \"Initializing: environment=%s host=%s port=%s\",\n\t\tenv.Get(ctx).Environment, mint.GetHost(ctx), mint.GetPort(ctx))\n\n\t(&Controller{}).Bind(mux)\n\n\t// Start on async worker.\n\tgo func() {\n\t\tasync.Get(ctx).Run()\n\t}()\n\n\treturn mux, nil\n}", "func InjectRoutingService(\n\truntime env.Runtime,\n\tprefix provider.LogPrefix,\n\tlogLevel logger.LogLevel,\n\tsqlDB *sql.DB,\n\tgithubClientID provider.GithubClientID,\n\tgithubClientSecret provider.GithubClientSecret,\n\tfacebookClientID provider.FacebookClientID,\n\tfacebookClientSecret provider.FacebookClientSecret,\n\tfacebookRedirectURI provider.FacebookRedirectURI,\n\tgoogleClientID provider.GoogleClientID,\n\tgoogleClientSecret provider.GoogleClientSecret,\n\tgoogleRedirectURI provider.GoogleRedirectURI,\n\tjwtSecret provider.JwtSecret,\n\tbufferSize provider.KeyGenBufferSize,\n\tkgsRPCConfig provider.KgsRPCConfig,\n\twebFrontendURL provider.WebFrontendURL,\n\ttokenValidDuration provider.TokenValidDuration,\n\tdataDogAPIKey provider.DataDogAPIKey,\n\tsegmentAPIKey provider.SegmentAPIKey,\n\tipStackAPIKey provider.IPStackAPIKey,\n) (service.Routing, error) {\n\twire.Build(\n\t\twire.Bind(new(timer.Timer), new(timer.System)),\n\t\twire.Bind(new(geo.Geo), new(geo.IPStack)),\n\n\t\twire.Bind(new(url.Retriever), new(url.RetrieverPersist)),\n\t\twire.Bind(new(repository.UserURLRelation), new(sqldb.UserURLRelationSQL)),\n\t\twire.Bind(new(repository.User), new(*sqldb.UserSQL)),\n\t\twire.Bind(new(repository.URL), new(*sqldb.URLSql)),\n\n\t\tobservabilitySet,\n\t\tauthSet,\n\t\tgithubAPISet,\n\t\tfacebookAPISet,\n\t\tgoogleAPISet,\n\t\tkeyGenSet,\n\t\tfeatureDecisionSet,\n\n\t\tservice.NewRouting,\n\t\twebreq.NewHTTPClient,\n\t\twebreq.NewHTTP,\n\t\tgraphql.NewClientFactory,\n\t\ttimer.NewSystem,\n\t\tprovider.NewIPStack,\n\t\tenv.NewDeployment,\n\n\t\tsqldb.NewUserSQL,\n\t\tsqldb.NewURLSql,\n\t\tsqldb.NewUserURLRelationSQL,\n\t\turl.NewRetrieverPersist,\n\t\taccount.NewProvider,\n\t\tprovider.NewShortRoutes,\n\t)\n\treturn service.Routing{}, nil\n}", "func (inj *injector) Provide(provider interface{}) TypeMapper {\n\tval := reflect.ValueOf(provider)\n\tt := val.Type()\n\tnumout := t.NumOut()\n\tfor i := 0; i < numout; i++ {\n\t\tout := t.Out(i)\n\t\tinj.providers[out] = val\n\t}\n\treturn inj\n}", "func (dbPrivConf *DbPrivConfInject) generateInitContainer(i int, matched *v1.BcsDbPrivConfig) corev1.Container {\n\tvar fieldPath, callType string\n\n\tif dbPrivConf.Injects.DbPriv.NetworkType == \"overlay\" {\n\t\tfieldPath = \"status.hostIP\"\n\t} else if dbPrivConf.Injects.DbPriv.NetworkType == \"underlay\" {\n\t\tfieldPath = \"status.podIP\"\n\t}\n\n\tif matched.Spec.DbType == \"mysql\" {\n\t\tcallType = \"mysql_ignoreCC\"\n\t} else if matched.Spec.DbType == \"spider\" {\n\t\tcallType = \"spider_ignoreCC\"\n\t}\n\n\tinitContainer := corev1.Container{\n\t\tName: \"db-privilege\" + \"-\" + strconv.Itoa(i),\n\t\tImage: dbPrivConf.Injects.DbPriv.InitContainerImage,\n\t\tEnv: []corev1.EnvVar{\n\t\t\t{\n\t\t\t\tName: \"io_tencent_bcs_privilege_ip\",\n\t\t\t\tValueFrom: &corev1.EnvVarSource{\n\t\t\t\t\tFieldRef: &corev1.ObjectFieldSelector{\n\t\t\t\t\t\tFieldPath: fieldPath,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"io_tencent_bcs_esb_url\",\n\t\t\t\tValue: dbPrivConf.Injects.DbPriv.EsbUrl,\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"io_tencent_bcs_app_code\",\n\t\t\t\tValue: string(dbPrivConf.DbPrivSecret.Data[\"sdk-appCode\"][:]),\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"io_tencent_bcs_app_secret\",\n\t\t\t\tValue: string(dbPrivConf.DbPrivSecret.Data[\"sdk-appSecret\"]),\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"io_tencent_bcs_app_operator\",\n\t\t\t\tValue: string(dbPrivConf.DbPrivSecret.Data[\"sdk-operator\"]),\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"io_tencent_bcs_db_privilege_app_name\",\n\t\t\t\tValue: matched.Spec.AppName,\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"io_tencent_bcs_db_privilege_target\",\n\t\t\t\tValue: matched.Spec.TargetDb,\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"io_tencent_bcs_db_privilege_db_name\",\n\t\t\t\tValue: matched.Spec.DbName,\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"io_tencent_bcs_db_privilege_call_user\",\n\t\t\t\tValue: matched.Spec.CallUser,\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"io_tencent_bcs_db_privilege_db_type\",\n\t\t\t\tValue: callType,\n\t\t\t},\n\t\t},\n\t}\n\n\treturn initContainer\n}", "func init() {\n\tPromu.AddCommand(buildCmd)\n\n\tbuildCmd.Flags().String(\"prefix\", \"\", \"Specific dir to store binaries (default is .)\")\n\n\tviper.BindPFlag(\"build.prefix\", buildCmd.Flags().Lookup(\"prefix\"))\n}", "func init() {\n\tfor group, values := range defaultConfigs {\n\t\tcore.RegisterConfig(group, values)\n\t}\n\tcore.RegisterService(\"indicator\", indicator.Configs, &indicator.IndicatorServiceFactory{})\n\tcore.RegisterService(\"executor\", executor.Configs, &executor.ExecutorServiceFactory{})\n}", "func Build(loggerConfig *config.LoggerConfig) (glogger.Logger, error) {\n\tloggerCode := loggerConfig.Code\n\tloggerFactory, err := getLoggerFactory(loggerCode)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"getLoggerFactory\")\n\t}\n\tlogger, err := loggerFactory.Build(loggerConfig)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"loggerFactory.Build\")\n\t}\n\treturn logger, nil\n}", "func (p *TwitterOutputPlugin) Build(output *model.OutputDef) (model.OutputProvider, error) {\n\tconsumerKey := output.Props.Get(\"consumerKey\")\n\tif consumerKey == \"\" {\n\t\treturn nil, fmt.Errorf(\"missing consumer key property\")\n\t}\n\tconsumerSecret := output.Props.Get(\"consumerSecret\")\n\tif consumerSecret == \"\" {\n\t\treturn nil, fmt.Errorf(\"missing consumer secret property\")\n\t}\n\taccessToken := output.Props.Get(\"accessToken\")\n\tif accessToken == \"\" {\n\t\treturn nil, fmt.Errorf(\"missing access token property\")\n\t}\n\taccessTokenSecret := output.Props.Get(\"accessTokenSecret\")\n\tif accessTokenSecret == \"\" {\n\t\treturn nil, fmt.Errorf(\"missing access token secret property\")\n\t}\n\tanaconda.SetConsumerKey(consumerKey)\n\tanaconda.SetConsumerSecret(consumerSecret)\n\tapi := anaconda.NewTwitterApi(accessToken, accessTokenSecret)\n\n\treturn &TwitterOutputProvider{\n\t\tid: output.ID,\n\t\talias: output.Alias,\n\t\tspec: spec,\n\t\ttags: output.Tags,\n\t\tenabled: output.Enabled,\n\t\tapi: api,\n\t\tconsumerKey: consumerKey,\n\t\tconsumerSecret: consumerSecret,\n\t}, nil\n}", "func init() {\n\tmodules.Register(\"k6/x/faker\", New())\n}", "func init() {\n\tregistry.Add(\"tapo\", NewTapoFromConfig)\n}", "func (a EnvServiceProvider) Register(container inter.Container) inter.Container {\n\tcontainer.Bind(\"env\", config.App.Env)\n\n\treturn container\n}", "func (b Binding) addGlobalInjections() {\n\tfor _, v := range b.base().container.globalInjections {\n\t\tb.AddInjection(v)\n\t}\n}", "func Init(router *gin.RouterGroup, ri *bulrush.ReverseInject) {\n\tri.Register(RegisterHello)\n}", "func Inject(inner http.Handler) http.Handler {\n\treturn http.HandlerFunc(handler(inner))\n}", "func newRegistry(builders []adapter.RegisterFn) *registry {\n\tr := &registry{make(BuildersByName)}\n\tfor idx, builder := range builders {\n\t\tglog.V(3).Infof(\"Registering [%d] %#v\", idx, builder)\n\t\tbuilder(r)\n\t}\n\t// ensure interfaces are satisfied.\n\t// should be compiled out.\n\tvar _ adapter.Registrar = r\n\tvar _ builderFinder = r\n\treturn r\n}", "func newProviderConstructor(name string, fn reflection.Func) (*providerConstructor, error) {\n\tctorType := determineCtorType(fn)\n\tif ctorType == ctorUnknown {\n\t\treturn nil, fmt.Errorf(\"invalid constructor signature, got %s\", fn.Type)\n\t}\n\tprovider := &providerConstructor{\n\t\tname: name,\n\t\tcall: fn,\n\t\tctorType: ctorType,\n\t}\n\t// result type\n\trt := fn.Out(0)\n\t// constructor result with di.Inject - only addressable pointers\n\t// anonymous parameters with di.Inject - only struct\n\tif canInject(rt) && rt.Kind() != reflect.Ptr {\n\t\treturn nil, fmt.Errorf(\"di.Inject not supported for unaddressable result of constructor, use *%s instead\", rt)\n\t}\n\t// if struct is injectable, range over inject fields and parse injectable params\n\tif canInject(rt) {\n\t\tprovider.inject.fields, provider.inject.params = parseFieldParams(rt)\n\t}\n\tvar params parameterList\n\tfor i := 0; i < provider.call.NumIn(); i++ {\n\t\tin := provider.call.In(i)\n\t\tparams = append(params, parameter{\n\t\t\t// haven't found the way to specify name for type in function\n\t\t\tname: \"\",\n\t\t\ttyp: in,\n\t\t})\n\t}\n\tprovider.params = append(params, provider.inject.params...)\n\treturn provider, nil\n}", "func main() {\n\t// Parse command line flags\n\tflag.Parse()\n\n\tif !*flagQuiet {\n\t\tstartupmsg.Output(os.Stdout)\n\t}\n\n\t// Initialize dependency injection builder\n\tdiBuilder, _ := di.NewBuilder()\n\n\t// Setup config parser\n\tdiBuilder.Add(di.Def{\n\t\tName: static.DiConfigParser,\n\t\tBuild: func(ctn di.Container) (p interface{}, err error) {\n\t\t\text := strings.ToLower(filepath.Ext(*flagConfig))\n\t\t\tswitch ext {\n\t\t\tcase \".yml\", \".yaml\":\n\t\t\t\tp = new(config.YAMLConfigParser)\n\t\t\tcase \".json\":\n\t\t\t\tp = new(config.JSONConfigParser)\n\t\t\tdefault:\n\t\t\t\terr = errors.New(\"unsupported configuration file\")\n\t\t\t}\n\t\t\treturn\n\t\t},\n\t})\n\n\t// Initialize config\n\tdiBuilder.Add(di.Def{\n\t\tName: static.DiConfig,\n\t\tBuild: func(ctn di.Container) (interface{}, error) {\n\t\t\treturn inits.InitConfig(*flagConfig, ctn), nil\n\t\t},\n\t})\n\n\t// Initialize metrics server\n\tdiBuilder.Add(di.Def{\n\t\tName: static.DiMetrics,\n\t\tBuild: func(ctn di.Container) (interface{}, error) {\n\t\t\treturn inits.InitMetrics(ctn), nil\n\t\t},\n\t})\n\n\t// Initialize redis client\n\tdiBuilder.Add(di.Def{\n\t\tName: static.DiRedis,\n\t\tBuild: func(ctn di.Container) (interface{}, error) {\n\t\t\tconfig := ctn.Get(static.DiConfig).(*config.Config)\n\t\t\treturn redis.NewClient(&redis.Options{\n\t\t\t\tAddr: config.Database.Redis.Addr,\n\t\t\t\tPassword: config.Database.Redis.Password,\n\t\t\t\tDB: config.Database.Redis.Type,\n\t\t\t}), nil\n\t\t},\n\t})\n\n\t// Initialize database middleware and shutdown routine\n\tdiBuilder.Add(di.Def{\n\t\tName: static.DiDatabase,\n\t\tBuild: func(ctn di.Container) (interface{}, error) {\n\t\t\treturn inits.InitDatabase(ctn), nil\n\t\t},\n\t\tClose: func(obj interface{}) error {\n\t\t\tdatabase := obj.(database.Database)\n\t\t\tlogrus.Info(\"Shutting down database connection...\")\n\t\t\tdatabase.Close()\n\t\t\treturn nil\n\t\t},\n\t})\n\n\t// Initialize twitch notification listener\n\tdiBuilder.Add(di.Def{\n\t\tName: static.DiTwitchNotifyListener,\n\t\tBuild: func(ctn di.Container) (interface{}, error) {\n\t\t\treturn listeners.NewListenerTwitchNotify(ctn), nil\n\t\t},\n\t\tClose: func(obj interface{}) error {\n\t\t\tlistener := obj.(*listeners.ListenerTwitchNotify)\n\t\t\tlogrus.Info(\"Shutting down twitch notify listener...\")\n\t\t\tlistener.TearDown()\n\t\t\treturn nil\n\t\t},\n\t})\n\n\t// Initialize twitch notification worker\n\tdiBuilder.Add(di.Def{\n\t\tName: static.DiTwitchNotifyWorker,\n\t\tBuild: func(ctn di.Container) (interface{}, error) {\n\t\t\treturn inits.InitTwitchNotifyWorker(ctn), nil\n\t\t},\n\t})\n\n\t// Initialize life cycle timer\n\tdiBuilder.Add(di.Def{\n\t\tName: static.DiLifecycleTimer,\n\t\tBuild: func(ctn di.Container) (interface{}, error) {\n\t\t\treturn inits.InitLTCTimer(ctn), nil\n\t\t},\n\t})\n\n\t// Initialize storage middleware\n\tdiBuilder.Add(di.Def{\n\t\tName: static.DiObjectStorage,\n\t\tBuild: func(ctn di.Container) (interface{}, error) {\n\t\t\treturn inits.InitStorage(ctn), nil\n\t\t},\n\t})\n\n\t// Initialize permissions command handler middleware\n\tdiBuilder.Add(di.Def{\n\t\tName: static.DiPermissionMiddleware,\n\t\tBuild: func(ctn di.Container) (interface{}, error) {\n\t\t\treturn middleware.NewPermissionMiddleware(ctn), nil\n\t\t},\n\t})\n\n\t// Initialize ghost ping ignore command handler middleware\n\tdiBuilder.Add(di.Def{\n\t\tName: static.DiGhostpingIgnoreMiddleware,\n\t\tBuild: func(ctn di.Container) (interface{}, error) {\n\t\t\treturn middleware.NewGhostPingIgnoreMiddleware(), nil\n\t\t},\n\t})\n\n\t// Initialize discord bot session and shutdown routine\n\tdiBuilder.Add(di.Def{\n\t\tName: static.DiDiscordSession,\n\t\tBuild: func(ctn di.Container) (interface{}, error) {\n\t\t\treturn discordgo.New()\n\t\t},\n\t\tClose: func(obj interface{}) error {\n\t\t\tsession := obj.(*discordgo.Session)\n\t\t\tlogrus.Info(\"Shutting down bot session...\")\n\t\t\tsession.Close()\n\t\t\treturn nil\n\t\t},\n\t})\n\n\t// Initialize Discord OAuth Module\n\tdiBuilder.Add(di.Def{\n\t\tName: static.DiDiscordOAuthModule,\n\t\tBuild: func(ctn di.Container) (interface{}, error) {\n\t\t\treturn inits.InitDiscordOAuth(ctn), nil\n\t\t},\n\t})\n\n\t// Initialize auth refresh token handler\n\tdiBuilder.Add(di.Def{\n\t\tName: static.DiAuthRefreshTokenHandler,\n\t\tBuild: func(ctn di.Container) (interface{}, error) {\n\t\t\treturn auth.NewDatabaseRefreshTokenHandler(ctn), nil\n\t\t},\n\t})\n\n\t// Initialize auth access token handler\n\tdiBuilder.Add(di.Def{\n\t\tName: static.DiAuthAccessTokenHandler,\n\t\tBuild: func(ctn di.Container) (interface{}, error) {\n\t\t\treturn auth.NewJWTAccessTokenHandler(ctn)\n\t\t},\n\t})\n\n\t// Initialize auth API token handler\n\tdiBuilder.Add(di.Def{\n\t\tName: static.DiAuthAPITokenHandler,\n\t\tBuild: func(ctn di.Container) (interface{}, error) {\n\t\t\treturn auth.NewDatabaseAPITokenHandler(ctn)\n\t\t},\n\t})\n\n\t// Initialize OAuth API handler implementation\n\tdiBuilder.Add(di.Def{\n\t\tName: static.DiOAuthHandler,\n\t\tBuild: func(ctn di.Container) (interface{}, error) {\n\t\t\treturn auth.NewRefreshTokenRequestHandler(ctn), nil\n\t\t},\n\t})\n\n\t// Initialize access token authorization middleware\n\tdiBuilder.Add(di.Def{\n\t\tName: static.DiAuthMiddleware,\n\t\tBuild: func(ctn di.Container) (interface{}, error) {\n\t\t\treturn auth.NewAccessTokenMiddleware(ctn), nil\n\t\t},\n\t})\n\n\t// Initialize OTA generator\n\tdiBuilder.Add(di.Def{\n\t\tName: static.DiOneTimeAuth,\n\t\tBuild: func(ctn di.Container) (interface{}, error) {\n\t\t\treturn onetimeauth.NewJwt(&onetimeauth.JwtOptions{\n\t\t\t\tIssuer: \"shinpuru v.\" + embedded.AppVersion,\n\t\t\t})\n\t\t},\n\t})\n\n\t// Initialize backup handler\n\tdiBuilder.Add(di.Def{\n\t\tName: static.DiBackupHandler,\n\t\tBuild: func(ctn di.Container) (interface{}, error) {\n\t\t\treturn backup.New(ctn), nil\n\t\t},\n\t})\n\n\t// Initialize command handler\n\tdiBuilder.Add(di.Def{\n\t\tName: static.DiCommandHandler,\n\t\tBuild: func(ctn di.Container) (interface{}, error) {\n\t\t\treturn inits.InitCommandHandler(ctn), nil\n\t\t},\n\t})\n\n\t// Initialize web server\n\tdiBuilder.Add(di.Def{\n\t\tName: static.DiWebserver,\n\t\tBuild: func(ctn di.Container) (interface{}, error) {\n\t\t\treturn inits.InitWebServer(ctn), nil\n\t\t},\n\t})\n\n\t// Initialize code execution factroy\n\tdiBuilder.Add(di.Def{\n\t\tName: static.DiCodeExecFactory,\n\t\tBuild: func(ctn di.Container) (interface{}, error) {\n\t\t\treturn inits.InitCodeExec(ctn), nil\n\t\t},\n\t})\n\n\t// Initialize karma service\n\tdiBuilder.Add(di.Def{\n\t\tName: static.DiKarma,\n\t\tBuild: func(ctn di.Container) (interface{}, error) {\n\t\t\treturn karma.NewKarmaService(ctn), nil\n\t\t},\n\t})\n\n\t// Initialize report service\n\tdiBuilder.Add(di.Def{\n\t\tName: static.DiReport,\n\t\tBuild: func(ctn di.Container) (interface{}, error) {\n\t\t\treturn report.New(ctn), nil\n\t\t},\n\t})\n\n\t// Initialize guild logger\n\tdiBuilder.Add(di.Def{\n\t\tName: static.DiGuildLog,\n\t\tBuild: func(ctn di.Container) (interface{}, error) {\n\t\t\treturn guildlog.New(ctn), nil\n\t\t},\n\t})\n\n\t// Initialize KV cache\n\tdiBuilder.Add(di.Def{\n\t\tName: static.DiKVCache,\n\t\tBuild: func(ctn di.Container) (interface{}, error) {\n\t\t\treturn kvcache.NewTimedmapCache(10 * time.Minute), nil\n\t\t},\n\t})\n\n\tdiBuilder.Add(di.Def{\n\t\tName: static.DiState,\n\t\tBuild: func(ctn di.Container) (interface{}, error) {\n\t\t\treturn inits.InitState(ctn)\n\t\t},\n\t})\n\n\t// Build dependency injection container\n\tctn := diBuilder.Build()\n\n\t// Setting log level from config\n\tcfg := ctn.Get(static.DiConfig).(*config.Config)\n\tlogrus.SetLevel(logrus.Level(cfg.Logging.LogLevel))\n\tlogrus.SetFormatter(&logrus.TextFormatter{\n\t\tForceColors: true,\n\t\tFullTimestamp: true,\n\t\tTimestampFormat: \"2006/01/02 15:04:05 MST\",\n\t})\n\n\t// Initial log output\n\tlogrus.Info(\"Starting up...\")\n\n\tif profLoc := util.GetEnv(envKeyProfile, *flagProfile); profLoc != \"\" {\n\t\tsetupProfiler(profLoc)\n\t}\n\n\tif *flagDevMode {\n\t\tsetupDevMode()\n\t}\n\n\t// Initialize discord session and event\n\t// handlers\n\tinits.InitDiscordBotSession(ctn)\n\n\t// This is currently the really hacky workaround\n\t// to bypass the di.Container when trying to get\n\t// the Command handler instance inside a command\n\t// context, because the handler can not resolve\n\t// itself on build, so it is bypassed here using\n\t// shireikans object map. Maybe I find a better\n\t// solution for that at some time.\n\thandler := ctn.Get(static.DiCommandHandler).(shireikan.Handler)\n\thandler.SetObject(static.DiCommandHandler, handler)\n\n\t// Get Web WebServer instance to start web\n\t// server listener\n\tctn.Get(static.DiWebserver)\n\t// Get Backup Handler to ensure backup\n\t// timer is running.\n\tctn.Get(static.DiBackupHandler)\n\t// Get Metrics Server to start metrics\n\t// endpoint.\n\tctn.Get(static.DiMetrics)\n\n\t// Block main go routine until one of the following\n\t// specified exit syscalls occure.\n\tlogrus.Info(\"Started event loop. Stop with CTRL-C...\")\n\n\tlogrus.WithField(\"took\", startuptime.Took().String()).Info(\"Initialization finished\")\n\tsc := make(chan os.Signal, 1)\n\tsignal.Notify(sc, syscall.SIGINT, syscall.SIGTERM, os.Interrupt, os.Kill)\n\t<-sc\n\n\t// Tear down dependency instances\n\tctn.DeleteWithSubContainers()\n}", "func Run() error {\n\tcmd := cmd.NewProxyInjectorCommand()\n\treturn cmd.Execute()\n}", "func main() {\n\texpvars()\n\tservice.Run()\n}", "func Inject(key string, client *mongo.Client) {\n\tclientManager.Inject(key, &ClosableClient{client})\n}", "func (c *client) compileSteps(p *yaml.Build, _pipeline *library.Pipeline, tmpls map[string]*yaml.Template, r *pipeline.RuleData) (*pipeline.Build, *library.Pipeline, error) {\n\tvar err error\n\n\t// check if the pipeline disabled the clone\n\tif p.Metadata.Clone == nil || *p.Metadata.Clone {\n\t\t// inject the clone step\n\t\tp, err = c.CloneStep(p)\n\t\tif err != nil {\n\t\t\treturn nil, _pipeline, err\n\t\t}\n\t}\n\n\t// inject the init step\n\tp, err = c.InitStep(p)\n\tif err != nil {\n\t\treturn nil, _pipeline, err\n\t}\n\n\t// inject the templates into the steps\n\tp, err = c.ExpandSteps(p, tmpls, r, c.TemplateDepth)\n\tif err != nil {\n\t\treturn nil, _pipeline, err\n\t}\n\n\tif c.ModificationService.Endpoint != \"\" {\n\t\t// send config to external endpoint for modification\n\t\tp, err = c.modifyConfig(p, c.build, c.repo)\n\t\tif err != nil {\n\t\t\treturn nil, _pipeline, err\n\t\t}\n\t}\n\n\t// validate the yaml configuration\n\terr = c.Validate(p)\n\tif err != nil {\n\t\treturn nil, _pipeline, err\n\t}\n\n\t// Create some default global environment inject vars\n\t// these are used below to overwrite to an empty\n\t// map if they should not be injected into a container\n\tenvGlobalServices, envGlobalSecrets, envGlobalSteps := p.Environment, p.Environment, p.Environment\n\n\tif !p.Metadata.HasEnvironment(\"services\") {\n\t\tenvGlobalServices = make(raw.StringSliceMap)\n\t}\n\n\tif !p.Metadata.HasEnvironment(\"secrets\") {\n\t\tenvGlobalSecrets = make(raw.StringSliceMap)\n\t}\n\n\tif !p.Metadata.HasEnvironment(\"steps\") {\n\t\tenvGlobalSteps = make(raw.StringSliceMap)\n\t}\n\n\t// inject the environment variables into the services\n\tp.Services, err = c.EnvironmentServices(p.Services, envGlobalServices)\n\tif err != nil {\n\t\treturn nil, _pipeline, err\n\t}\n\n\t// inject the environment variables into the secrets\n\tp.Secrets, err = c.EnvironmentSecrets(p.Secrets, envGlobalSecrets)\n\tif err != nil {\n\t\treturn nil, _pipeline, err\n\t}\n\n\t// inject the environment variables into the steps\n\tp.Steps, err = c.EnvironmentSteps(p.Steps, envGlobalSteps)\n\tif err != nil {\n\t\treturn nil, _pipeline, err\n\t}\n\n\t// inject the substituted environment variables into the steps\n\tp.Steps, err = c.SubstituteSteps(p.Steps)\n\tif err != nil {\n\t\treturn nil, _pipeline, err\n\t}\n\n\t// inject the scripts into the steps\n\tp.Steps, err = c.ScriptSteps(p.Steps)\n\tif err != nil {\n\t\treturn nil, _pipeline, err\n\t}\n\n\t// create executable representation\n\tbuild, err := c.TransformSteps(r, p)\n\tif err != nil {\n\t\treturn nil, _pipeline, err\n\t}\n\n\treturn build, _pipeline, nil\n}", "func (c *Config) Build() weather.Provider {\n\t// Build the OWM URL.\n\twURL := url.URL{\n\t\tScheme: \"http\",\n\t\tHost: \"api.wunderground.com\",\n\t\tPath: fmt.Sprintf(\"/api/%s/conditions/q/%s.json\", c.apiKey, c.query),\n\t}\n\treturn Provider(wURL.String())\n}", "func (b Binding) AddInjection(i interface{}) Binding {\n\tbb := b.base()\n\tit := reflect.TypeOf(i)\n\tbb.singletons[it] = i\n\n\tpta := parameterTypeArray(b, true)\n\n\tfor ii, t := range pta {\n\t\tif t == it {\n\t\t\tbb.injections[ii] = it\n\t\t}\n\t}\n\n\treturn b\n}", "func Build(ctx context.Context, set Settings) (*Pipelines, error) {\n\texps := &Pipelines{\n\t\ttelemetry: set.Telemetry,\n\t\tallReceivers: make(map[config.DataType]map[config.ComponentID]component.Receiver),\n\t\tallExporters: make(map[config.DataType]map[config.ComponentID]component.Exporter),\n\t\tpipelines: make(map[config.ComponentID]*builtPipeline, len(set.PipelineConfigs)),\n\t}\n\n\treceiversConsumers := make(map[config.DataType]map[config.ComponentID][]baseConsumer)\n\n\t// Iterate over all pipelines, and create exporters, then processors.\n\t// Receivers cannot be created since we need to know all consumers, a.k.a. we need all pipelines build up to the\n\t// first processor.\n\tfor pipelineID, pipeline := range set.PipelineConfigs {\n\t\t// The data type of the pipeline defines what data type each exporter is expected to receive.\n\t\tif _, ok := exps.allExporters[pipelineID.Type()]; !ok {\n\t\t\texps.allExporters[pipelineID.Type()] = make(map[config.ComponentID]component.Exporter)\n\t\t}\n\t\texpByID := exps.allExporters[pipelineID.Type()]\n\n\t\tbp := &builtPipeline{\n\t\t\treceivers: make([]builtComponent, len(pipeline.Receivers)),\n\t\t\tprocessors: make([]builtComponent, len(pipeline.Processors)),\n\t\t\texporters: make([]builtComponent, len(pipeline.Exporters)),\n\t\t}\n\t\texps.pipelines[pipelineID] = bp\n\n\t\t// Iterate over all Exporters for this pipeline.\n\t\tfor i, expID := range pipeline.Exporters {\n\t\t\t// If already created an exporter for this [DataType, ComponentID] nothing to do, will reuse this instance.\n\t\t\tif exp, ok := expByID[expID]; ok {\n\t\t\t\tbp.exporters[i] = builtComponent{id: expID, comp: exp}\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\texp, err := buildExporter(ctx, set.Telemetry, set.BuildInfo, set.ExporterConfigs, set.ExporterFactories, expID, pipelineID)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tbp.exporters[i] = builtComponent{id: expID, comp: exp}\n\t\t\texpByID[expID] = exp\n\t\t}\n\n\t\t// Build a fan out consumer to all exporters.\n\t\tswitch pipelineID.Type() {\n\t\tcase config.TracesDataType:\n\t\t\tbp.lastConsumer = buildFanOutExportersTracesConsumer(bp.exporters)\n\t\tcase config.MetricsDataType:\n\t\t\tbp.lastConsumer = buildFanOutExportersMetricsConsumer(bp.exporters)\n\t\tcase config.LogsDataType:\n\t\t\tbp.lastConsumer = buildFanOutExportersLogsConsumer(bp.exporters)\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"create fan-out exporter in pipeline %q, data type %q is not supported\", pipelineID, pipelineID.Type())\n\t\t}\n\n\t\tmutatesConsumedData := bp.lastConsumer.Capabilities().MutatesData\n\t\t// Build the processors backwards, starting from the last one.\n\t\t// The last processor points to fan out consumer to all Exporters, then the processor itself becomes a\n\t\t// consumer for the one that precedes it in the pipeline and so on.\n\t\tfor i := len(pipeline.Processors) - 1; i >= 0; i-- {\n\t\t\tprocID := pipeline.Processors[i]\n\n\t\t\tproc, err := buildProcessor(ctx, set.Telemetry, set.BuildInfo, set.ProcessorConfigs, set.ProcessorFactories, procID, pipelineID, bp.lastConsumer)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tbp.processors[i] = builtComponent{id: procID, comp: proc}\n\t\t\tbp.lastConsumer = proc.(baseConsumer)\n\t\t\tmutatesConsumedData = mutatesConsumedData || bp.lastConsumer.Capabilities().MutatesData\n\t\t}\n\n\t\t// Some consumers may not correctly implement the Capabilities, and ignore the next consumer when calculated the Capabilities.\n\t\t// Because of this wrap the first consumer if any consumers in the pipeline mutate the data and the first says that it doesn't.\n\t\tswitch pipelineID.Type() {\n\t\tcase config.TracesDataType:\n\t\t\tbp.lastConsumer = capTraces{Traces: bp.lastConsumer.(consumer.Traces), cap: consumer.Capabilities{MutatesData: mutatesConsumedData}}\n\t\tcase config.MetricsDataType:\n\t\t\tbp.lastConsumer = capMetrics{Metrics: bp.lastConsumer.(consumer.Metrics), cap: consumer.Capabilities{MutatesData: mutatesConsumedData}}\n\t\tcase config.LogsDataType:\n\t\t\tbp.lastConsumer = capLogs{Logs: bp.lastConsumer.(consumer.Logs), cap: consumer.Capabilities{MutatesData: mutatesConsumedData}}\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"create cap consumer in pipeline %q, data type %q is not supported\", pipelineID, pipelineID.Type())\n\t\t}\n\n\t\t// The data type of the pipeline defines what data type each exporter is expected to receive.\n\t\tif _, ok := receiversConsumers[pipelineID.Type()]; !ok {\n\t\t\treceiversConsumers[pipelineID.Type()] = make(map[config.ComponentID][]baseConsumer)\n\t\t}\n\t\trecvConsByID := receiversConsumers[pipelineID.Type()]\n\t\t// Iterate over all Receivers for this pipeline and just append the lastConsumer as a consumer for the receiver.\n\t\tfor _, recvID := range pipeline.Receivers {\n\t\t\trecvConsByID[recvID] = append(recvConsByID[recvID], bp.lastConsumer)\n\t\t}\n\t}\n\n\t// Now that we built the `receiversConsumers` map, we can build the receivers as well.\n\tfor pipelineID, pipeline := range set.PipelineConfigs {\n\t\t// The data type of the pipeline defines what data type each exporter is expected to receive.\n\t\tif _, ok := exps.allReceivers[pipelineID.Type()]; !ok {\n\t\t\texps.allReceivers[pipelineID.Type()] = make(map[config.ComponentID]component.Receiver)\n\t\t}\n\t\trecvByID := exps.allReceivers[pipelineID.Type()]\n\t\tbp := exps.pipelines[pipelineID]\n\n\t\t// Iterate over all Receivers for this pipeline.\n\t\tfor i, recvID := range pipeline.Receivers {\n\t\t\t// If already created a receiver for this [DataType, ComponentID] nothing to do.\n\t\t\tif exp, ok := recvByID[recvID]; ok {\n\t\t\t\tbp.receivers[i] = builtComponent{id: recvID, comp: exp}\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\trecv, err := buildReceiver(ctx, set.Telemetry, set.BuildInfo, set.ReceiverConfigs, set.ReceiverFactories, recvID, pipelineID, receiversConsumers[pipelineID.Type()][recvID])\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tbp.receivers[i] = builtComponent{id: recvID, comp: recv}\n\t\t\trecvByID[recvID] = recv\n\t\t}\n\t}\n\treturn exps, nil\n}", "func (wa *WebAPI) buildPipelines(p Push, rawPush []byte, f dinghyfile.Downloader, w http.ResponseWriter, dinghyLog dinghylog.DinghyLog) {\n\t// see if we have any configurations for this repo.\n\t// if we do have configurations, see if this is the branch we want to use. If it's not, skip and return.\n\tvar validation bool\n\tif rc := wa.Config.GetRepoConfig(p.Name(), p.Repo()); rc != nil {\n\t\tif !p.IsBranch(rc.Branch) {\n\t\t\tdinghyLog.Infof(\"Received request from branch %s. Does not match configured branch %s. Proceeding as validation.\", p.Branch(), rc.Branch)\n\t\t\tvalidation = true\n\t\t}\n\t} else {\n\t\t// if we didn't find any configurations for this repo, proceed with master\n\t\tdinghyLog.Infof(\"Found no custom configuration for repo: %s, proceeding with master\", p.Repo())\n\t\tif !p.IsMaster() {\n\t\t\tdinghyLog.Infof(\"Skipping Spinnaker pipeline update because this branch (%s) is not master. Proceeding as validation.\", p.Branch())\n\t\t\tvalidation = true\n\t\t}\n\t}\n\n\tdinghyLog.Infof(\"Processing request for branch: %s\", p.Branch())\n\n\t// deserialze push data to a map. used in template logic later\n\trawPushData := make(map[string]interface{})\n\tif err := json.Unmarshal(rawPush, &rawPushData); err != nil {\n\t\tdinghyLog.Errorf(\"unable to deserialze raw data to map\")\n\t}\n\n\t// Construct a pipeline builder using provided downloader\n\tbuilder := &dinghyfile.PipelineBuilder{\n\t\tDownloader: f,\n\t\tDepman: wa.Cache,\n\t\tTemplateRepo: wa.Config.TemplateRepo,\n\t\tTemplateOrg: wa.Config.TemplateOrg,\n\t\tDinghyfileName: wa.Config.DinghyFilename,\n\t\tDeleteStalePipelines: false,\n\t\tAutolockPipelines: wa.Config.AutoLockPipelines,\n\t\tClient: wa.Client,\n\t\tEventClient: wa.EventClient,\n\t\tLogger: dinghyLog,\n\t\tUms: wa.Ums,\n\t\tNotifiers: wa.Notifiers,\n\t\tPushRaw: rawPushData,\n\t\tRepositoryRawdataProcessing: wa.Config.RepositoryRawdataProcessing,\n\t\tAction: pipebuilder.Process,\n\t}\n\n\tif validation {\n\t\tbuilder.Client = wa.ClientReadOnly\n\t\tbuilder.Depman = wa.CacheReadOnly\n\t\tbuilder.Action = pipebuilder.Validate\n\t}\n\n\tbuilder.Parser = wa.Parser\n\tbuilder.Parser.SetBuilder(builder)\n\n\t// Process the push.\n\tdinghyLog.Info(\"Processing Push\")\n\terr := wa.ProcessPush(p, builder)\n\tif err == dinghyfile.ErrMalformedJSON {\n\t\tutil.WriteHTTPError(w, http.StatusUnprocessableEntity, err)\n\t\tdinghyLog.Errorf(\"ProcessPush Failed (malformed JSON): %s\", err.Error())\n\t\tsaveLogEventError(wa.LogEventsClient, p, dinghyLog, logevents.LogEvent{RawData: string(rawPush)})\n\t\treturn\n\t} else if err != nil {\n\t\tdinghyLog.Errorf(\"ProcessPush Failed (other): %s\", err.Error())\n\t\tutil.WriteHTTPError(w, http.StatusInternalServerError, err)\n\t\tsaveLogEventError(wa.LogEventsClient, p, dinghyLog, logevents.LogEvent{RawData: string(rawPush)})\n\t\treturn\n\t}\n\n\t// Check if we're in a template repo\n\tif p.Repo() == wa.Config.TemplateRepo {\n\t\t// Set status to pending while we process modules\n\t\tp.SetCommitStatus(git.StatusPending, git.DefaultMessagesByBuilderAction[builder.Action][git.StatusPending])\n\n\t\t// For each module pushed, rebuild dependent dinghyfiles\n\t\tfor _, file := range p.Files() {\n\t\t\tif err := builder.RebuildModuleRoots(p.Org(), p.Repo(), file, p.Branch()); err != nil {\n\t\t\t\tswitch err.(type) {\n\t\t\t\tcase *util.GitHubFileNotFoundErr:\n\t\t\t\t\tutil.WriteHTTPError(w, http.StatusNotFound, err)\n\t\t\t\tdefault:\n\t\t\t\t\tutil.WriteHTTPError(w, http.StatusInternalServerError, err)\n\t\t\t\t}\n\t\t\t\tp.SetCommitStatus(git.StatusError, \"Rebuilding dependent dinghyfiles Failed\")\n\t\t\t\tdinghyLog.Errorf(\"RebuildModuleRoots Failed: %s\", err.Error())\n\t\t\t\tsaveLogEventError(wa.LogEventsClient, p, dinghyLog, logevents.LogEvent{RawData: string(rawPush)})\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tp.SetCommitStatus(git.StatusSuccess, git.DefaultMessagesByBuilderAction[builder.Action][git.StatusSuccess])\n\t}\n\n\t// Only save event if changed files were in repo or it was having a dinghyfile\n\t// TODO: If a template repo is having files not related with dinghy an event will be saved\n\tif p.Repo() == wa.Config.TemplateRepo {\n\t\tif len(p.Files()) > 0 {\n\t\t\tsaveLogEventSuccess(wa.LogEventsClient, p, dinghyLog, logevents.LogEvent{RawData: string(rawPush)})\n\t\t}\n\t} else {\n\t\tdinghyfiles := []string{}\n\t\tfor _, currfile := range p.Files() {\n\t\t\tif filepath.Base(currfile) == builder.DinghyfileName {\n\t\t\t\tdinghyfiles = append(dinghyfiles, currfile)\n\t\t\t}\n\t\t}\n\t\tif len(dinghyfiles) > 0 {\n\t\t\tsaveLogEventSuccess(wa.LogEventsClient, p, dinghyLog, logevents.LogEvent{RawData: string(rawPush), Files: dinghyfiles})\n\t\t}\n\t}\n\tw.Write([]byte(`{\"status\":\"accepted\"}`))\n}", "func (injector *InterfaceImplementationInjector) Inject(\n\tdef TypeDefinition, implementations ...*InterfaceImplementation) (TypeDefinition, error) {\n\tresult := def\n\n\tfor _, impl := range implementations {\n\t\tvar err error\n\t\tresult, err = injector.visitor.VisitDefinition(result, impl)\n\t\tif err != nil {\n\t\t\treturn TypeDefinition{}, err\n\t\t}\n\t}\n\n\treturn result, nil\n}", "func init() {\n\tRegisterProxifiedApplication(NewEnvProxySetter())\n}", "func Inject(target interface{}) Option {\n\tv := reflect.ValueOf(target)\n\n\tif t := v.Type(); t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Struct {\n\t\treturn Invoke(func() error {\n\t\t\treturn fmt.Errorf(\"Inject expected a pointer to a struct, got a %v\", t)\n\t\t})\n\t}\n\n\tv = v.Elem()\n\tt := v.Type()\n\n\t// We generate a function with one argument for each field in the target\n\t// struct.\n\n\targTypes := make([]reflect.Type, 0, t.NumField())\n\n\t// List of values in the target struct aligned with the arguments of the\n\t// generated function.\n\t//\n\t// So for example, if the target is,\n\t//\n\t// \tvar target struct {\n\t// \t\tFoo io.Reader\n\t// \t\tbar []byte\n\t// \t\tBaz io.Writer\n\t// \t}\n\t//\n\t// The generated function has the shape,\n\t//\n\t// \tfunc(io.Reader, io.Writer)\n\t//\n\t// And `targets` is,\n\t//\n\t// \t[\n\t// \t\ttarget.Field(0), // Foo io.Reader\n\t// \t\ttarget.Field(2), // Baz io.Writer\n\t// \t]\n\t//\n\t// As we iterate through the arguments received by the function, we can\n\t// simply copy the value into the corresponding value in the targets list.\n\ttargets := make([]reflect.Value, 0, t.NumField())\n\n\tfor i := 0; i < t.NumField(); i++ {\n\t\tf := t.Field(i)\n\t\t// Skip private fields.\n\t\tif f.PkgPath != \"\" {\n\t\t\tcontinue\n\t\t}\n\n\t\targTypes = append(argTypes, f.Type)\n\t\ttargets = append(targets, v.Field(i))\n\t}\n\n\t// Equivalent to,\n\t//\n\t// \tfunc(foo Foo, bar Bar) {\n\t// \t\ttarget.Foo = foo\n\t// \t\ttarget.Bar = bar\n\t// \t}\n\n\tfn := reflect.MakeFunc(\n\t\treflect.FuncOf(argTypes, nil /* results */, false /* variadic */),\n\t\tfunc(args []reflect.Value) []reflect.Value {\n\t\t\tfor i, arg := range args {\n\t\t\t\ttargets[i].Set(arg)\n\t\t\t}\n\t\t\treturn nil\n\t\t},\n\t)\n\n\treturn Invoke(fn.Interface())\n}", "func (t *Tangle) Inject(s *Object, tip bool) error {\n\terr := t.verifySite(s.Site)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn t.addSite(s, tip)\n}", "func construct(ctx context.Context, req *pulumirpc.ConstructRequest, engineConn *grpc.ClientConn,\n\tconstructF constructFunc) (*pulumirpc.ConstructResponse, error) {\n\n\t// Configure the RunInfo.\n\trunInfo := RunInfo{\n\t\tProject: req.GetProject(),\n\t\tStack: req.GetStack(),\n\t\tConfig: req.GetConfig(),\n\t\tConfigSecretKeys: req.GetConfigSecretKeys(),\n\t\tParallel: int(req.GetParallel()),\n\t\tDryRun: req.GetDryRun(),\n\t\tMonitorAddr: req.GetMonitorEndpoint(),\n\t\tengineConn: engineConn,\n\t}\n\tpulumiCtx, err := NewContext(ctx, runInfo)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"constructing run context\")\n\t}\n\n\t// Deserialize the inputs and apply appropriate dependencies.\n\tinputDependencies := req.GetInputDependencies()\n\tdeserializedInputs, err := plugin.UnmarshalProperties(\n\t\treq.GetInputs(),\n\t\tplugin.MarshalOptions{KeepSecrets: true, KeepResources: true, KeepUnknowns: req.GetDryRun()},\n\t)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"unmarshaling inputs\")\n\t}\n\tinputs := make(map[string]interface{}, len(deserializedInputs))\n\tfor key, value := range deserializedInputs {\n\t\tk := string(key)\n\t\tvar deps []Resource\n\t\tif inputDeps, ok := inputDependencies[k]; ok {\n\t\t\tdeps = make([]Resource, len(inputDeps.GetUrns()))\n\t\t\tfor i, depURN := range inputDeps.GetUrns() {\n\t\t\t\tdeps[i] = pulumiCtx.newDependencyResource(URN(depURN))\n\t\t\t}\n\t\t}\n\n\t\tinputs[k] = &constructInput{\n\t\t\tvalue: value,\n\t\t\tdeps: deps,\n\t\t}\n\t}\n\n\t// Rebuild the resource options.\n\taliases := make([]Alias, len(req.GetAliases()))\n\tfor i, urn := range req.GetAliases() {\n\t\taliases[i] = Alias{URN: URN(urn)}\n\t}\n\tdependencyURNs := urnSet{}\n\tfor _, urn := range req.GetDependencies() {\n\t\tdependencyURNs.add(URN(urn))\n\t}\n\tproviders := make(map[string]ProviderResource, len(req.GetProviders()))\n\tfor pkg, ref := range req.GetProviders() {\n\t\tresource, err := createProviderResource(pulumiCtx, ref)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tproviders[pkg] = resource\n\t}\n\tvar parent Resource\n\tif req.GetParent() != \"\" {\n\t\tparent = pulumiCtx.newDependencyResource(URN(req.GetParent()))\n\t}\n\topts := resourceOption(func(ro *resourceOptions) {\n\t\tro.Aliases = aliases\n\t\tro.DependsOn = []func(ctx context.Context) (urnSet, error){\n\t\t\tfunc(ctx context.Context) (urnSet, error) {\n\t\t\t\treturn dependencyURNs, nil\n\t\t\t},\n\t\t}\n\t\tro.Protect = req.GetProtect()\n\t\tro.Providers = providers\n\t\tro.Parent = parent\n\t})\n\n\turn, state, err := constructF(pulumiCtx, req.GetType(), req.GetName(), inputs, opts)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Wait for async work to finish.\n\tif err = pulumiCtx.wait(); err != nil {\n\t\treturn nil, err\n\t}\n\n\trpcURN, _, _, err := urn.ToURNOutput().awaitURN(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Serialize all state properties, first by awaiting them, and then marshaling them to the requisite gRPC values.\n\tresolvedProps, propertyDeps, _, err := marshalInputs(state)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"marshaling properties\")\n\t}\n\n\t// Marshal all properties for the RPC call.\n\tkeepUnknowns := req.GetDryRun()\n\trpcProps, err := plugin.MarshalProperties(\n\t\tresolvedProps,\n\t\tplugin.MarshalOptions{KeepSecrets: true, KeepUnknowns: keepUnknowns, KeepResources: pulumiCtx.keepResources})\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"marshaling properties\")\n\t}\n\n\t// Convert the property dependencies map for RPC and remove duplicates.\n\trpcPropertyDeps := make(map[string]*pulumirpc.ConstructResponse_PropertyDependencies)\n\tfor k, deps := range propertyDeps {\n\t\tsort.Slice(deps, func(i, j int) bool { return deps[i] < deps[j] })\n\n\t\turns := make([]string, 0, len(deps))\n\t\tfor i, d := range deps {\n\t\t\tif i > 0 && urns[i-1] == string(d) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\turns = append(urns, string(d))\n\t\t}\n\n\t\trpcPropertyDeps[k] = &pulumirpc.ConstructResponse_PropertyDependencies{\n\t\t\tUrns: urns,\n\t\t}\n\t}\n\n\treturn &pulumirpc.ConstructResponse{\n\t\tUrn: string(rpcURN),\n\t\tState: rpcProps,\n\t\tStateDependencies: rpcPropertyDeps,\n\t}, nil\n}", "func NewServiceInjector(secretClient client.SecretsClient, serviceClient client.ServicesClient) functions.ServiceInjector {\n\treturn &serviceInjector{\n\t\tsecretClient: secretClient,\n\t\tserviceClient: serviceClient,\n\t}\n}", "func newGenericInjectionController(ctx context.Context, groupName string, mgr ctrl.Manager,\n\tsetup injectorSetup, sources []caDataSource, ca cache.Cache,\n\tclient client.Client) (controller.Controller, error) {\n\tlog := ctrl.Log.WithName(groupName).WithName(setup.resourceName)\n\ttyp := setup.injector.NewTarget().AsObject()\n\n\tc, err := controller.NewUnmanaged(\n\t\tfmt.Sprintf(\"controller-for-%s-%s\", groupName, setup.resourceName),\n\t\tmgr,\n\t\tcontroller.Options{\n\t\t\tReconciler: &genericInjectReconciler{\n\t\t\t\tClient: client,\n\t\t\t\tsources: sources,\n\t\t\t\tlog: log.WithName(\"generic-inject-reconciler\"),\n\t\t\t\tresourceName: setup.resourceName,\n\t\t\t\tinjector: setup.injector,\n\t\t\t},\n\t\t\tLog: log,\n\t\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err := c.Watch(source.NewKindWithCache(typ, ca), &handler.EnqueueRequestForObject{}); err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, s := range sources {\n\t\tif err := s.ApplyTo(ctx, mgr, setup, c, ca); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn c, nil\n}", "func (oi *offsetInjector) deploy(ctx context.Context) error {\n\tif err := oi.c.RunE(ctx, oi.c.All(), \"test -x ./bumptime\"); err == nil {\n\t\toi.deployed = true\n\t\treturn nil\n\t}\n\n\tif err := oi.c.Install(ctx, oi.c.l, oi.c.All(), \"ntp\"); err != nil {\n\t\treturn err\n\t}\n\tif err := oi.c.Install(ctx, oi.c.l, oi.c.All(), \"gcc\"); err != nil {\n\t\treturn err\n\t}\n\tif err := oi.c.RunL(ctx, oi.c.l, oi.c.All(), \"sudo\", \"service\", \"ntp\", \"stop\"); err != nil {\n\t\treturn err\n\t}\n\tif err := oi.c.RunL(ctx, oi.c.l,\n\t\toi.c.All(),\n\t\t\"curl\",\n\t\t\"--retry\", \"3\",\n\t\t\"--fail\",\n\t\t\"--show-error\",\n\t\t\"-kO\",\n\t\t\"https://raw.githubusercontent.com/cockroachdb/jepsen/master/cockroachdb/resources/bumptime.c\",\n\t); err != nil {\n\t\treturn err\n\t}\n\tif err := oi.c.RunL(ctx, oi.c.l,\n\t\toi.c.All(), \"gcc\", \"bumptime.c\", \"-o\", \"bumptime\", \"&&\", \"rm bumptime.c\",\n\t); err != nil {\n\t\treturn err\n\t}\n\toi.deployed = true\n\treturn nil\n}", "func Build(args ...string) {\n wd, _ := os.Getwd()\n logger.Log(fmt.Sprintf(\"In %s to build.\", wd))\n if cfg != nil {\n logger.Log(fmt.Sprintf(\"Building...%s\\n\", cfg.App))\n cmd := exec.Command(\"docker\", \"build\", \"-t\", cfg.Container, cfg.BuildFile)\n cmd.Stdout = os.Stdout\n cmd.Stderr = os.Stderr\n cmd.Stdin = os.Stdin\n cmd.Run()\n } else {\n config.LoadConfigs()\n for _, process := range config.List() {\n SetProcess(process)\n SetConfig(config.Process(process))\n Build(args...)\n }\n }\n}", "func (c *Container) Register(value interface{}) {\n\tif c.Len() == 0 {\n\t\tc.Add(value)\n\t\treturn\n\t}\n\n\t// create injector\n\tinjector := Struct(value, *c...)\n\n\t// inject dependencies to value\n\tinjector.Inject(value)\n\n\tc.AddOnce(value)\n}", "func (b *ManagerBuilder) Build() *Manager {\n\tb.services = append([]security.UserInfoService{b.UsersBuilder.Build()}, b.services...)\n\treturn b.Manager\n}", "func (s *LogService) Inject(logger Logger) *LogService {\n\ts.logger = logger\n\treturn s\n}", "func contextInjector(baseCtx func() context.Context) grpcutil.UnifiedServerInterceptor {\n\treturn func(ctx context.Context, fullMethod string, handler func(ctx context.Context) error) error {\n\t\treturn handler(&mergedCtx{ctx, baseCtx()})\n\t}\n}", "func (c *Config) Build() *Godim {\n\tif c.appProfile == nil {\n\t\tc.appProfile = newAppProfile()\n\t}\n\tc.appProfile.lock()\n\tif c.activateES {\n\t\tc.eventSwitch = NewEventSwitch(c.bufferSize)\n\t}\n\treturn NewGodim(c)\n}", "func init() {\n\tpctx.Import(\"android/soong/android\")\n\tRegisterPrebuiltEtcBuildComponents(android.InitRegistrationContext)\n}" ]
[ "0.6409702", "0.62989783", "0.6237891", "0.6081254", "0.5925314", "0.5865492", "0.5862993", "0.5773957", "0.57736605", "0.56715137", "0.5591116", "0.5471734", "0.54399365", "0.5416204", "0.53920984", "0.5386215", "0.537239", "0.5371537", "0.536091", "0.5348586", "0.5323686", "0.5321513", "0.5282367", "0.52652025", "0.5240356", "0.5231229", "0.5189564", "0.5173955", "0.5173025", "0.5152816", "0.51419425", "0.5139347", "0.511931", "0.5100405", "0.50988346", "0.5071116", "0.5068981", "0.5066509", "0.50601727", "0.5060073", "0.50557286", "0.5055437", "0.5054344", "0.50511646", "0.50309914", "0.5026243", "0.50256157", "0.50168794", "0.5013522", "0.5001316", "0.49975133", "0.49869895", "0.49859992", "0.49550703", "0.49402082", "0.4936638", "0.49005464", "0.4896808", "0.48874944", "0.4881822", "0.4880045", "0.48761243", "0.48727292", "0.4869522", "0.48664197", "0.4862476", "0.4846495", "0.48449433", "0.483706", "0.4836669", "0.4828933", "0.48266178", "0.48193976", "0.48157328", "0.4813848", "0.48024738", "0.4798527", "0.47957742", "0.47946838", "0.47943127", "0.47910634", "0.47888514", "0.47860035", "0.47843164", "0.47798952", "0.4776618", "0.47749153", "0.4764538", "0.4754534", "0.4752557", "0.4749038", "0.47489202", "0.47416756", "0.47398487", "0.47304696", "0.47296646", "0.4725777", "0.47187287", "0.47155547", "0.47137" ]
0.5705451
9
AFunc1In calls the stored function 'a_bit_of_everything.a_func_1_in(number) number' on db.
func AFunc1In(ctx context.Context, db DB, aParam int64) (int64, error) { // call a_bit_of_everything.a_func_1_in const sqlstr = `SELECT a_bit_of_everything.a_func_1_in(:1) FROM dual` // run var r0 int64 logf(sqlstr, aParam) if err := db.QueryRowContext(ctx, sqlstr, aParam).Scan(&r0); err != nil { return 0, logerror(err) } return r0, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func A1In1Out(ctx context.Context, db DB, aParam int) (int, error) {\n\t// At the moment, the Go MySQL driver does not support stored procedures\n\t// with out parameters\n\treturn 0, fmt.Errorf(\"unsupported\")\n}", "func FuncIn() {\n\tsimlog.FuncIn()\n}", "func (l *logger) FuncIn() {\n\tif !l.isDebug {\n\t\treturn\n\t}\n\tl.printLog(\"[DEBUG]\", \"%s\", \"IN\")\n}", "func funcAsin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector {\n\treturn simpleFunc(vals, enh, math.Asin)\n}", "func (m *Message) IN1() (*IN1, error) {\n\tps, err := m.Parse(\"IN1\")\n\tpst, ok := ps.(*IN1)\n\tif ok {\n\t\treturn pst, err\n\t}\n\treturn nil, err\n}", "func (s *BasevhdlListener) EnterFunction_call_or_indexed_name_part(ctx *Function_call_or_indexed_name_partContext) {\n}", "func Function1x1[I0, R0 any](doFn func(I0) R0) {\n\truntime.RegisterFunction(doFn)\n\tregisterMethodTypes(reflect.TypeOf(doFn))\n\tcaller := func(fn any) reflectx.Func {\n\t\tf := fn.(func(I0) R0)\n\t\treturn &caller1x1[I0, R0]{fn: f}\n\t}\n\treflectx.RegisterFunc(reflect.TypeOf((*func(I0) R0)(nil)).Elem(), caller)\n}", "func f1(key, sqn, rand, opc, amf []byte) ([]byte, []byte, error) {\n\t// TEMP = E_K(RAND XOR OP_C)\n\ttemp, err := encrypt(key, xor(rand, opc))\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t// IN1 = SQN || AMF || SQN || AMF\n\tvar in1 = make([]byte, 0, ExpectedOpcBytes)\n\tin1 = append(in1, sqn...)\n\tin1 = append(in1, amf...)\n\tin1 = append(in1, in1...)\n\n\tconst rotationBytes = 8 // Constant from 3GPP 35.206 4.1\n\n\t// OUT1 = E_K(TEMP XOR rotate(IN1 XOR OP_C, r1) XOR c1) XOR OP_C\n\tout1, err := encrypt(key, xor(temp, rotate(xor(in1, opc), rotationBytes)))\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tout1 = xor(out1, opc)\n\n\t// MAC-A = f1 = OUT1[0] .. OUT1[63]\n\t// MAC-S = f1* = OUT1[64] .. OUT1[127]\n\treturn out1[:8], out1[8:], nil\n}", "func Function8x1[I0, I1, I2, I3, I4, I5, I6, I7, R0 any](doFn func(I0, I1, I2, I3, I4, I5, I6, I7) R0) {\n\truntime.RegisterFunction(doFn)\n\tregisterMethodTypes(reflect.TypeOf(doFn))\n\tcaller := func(fn any) reflectx.Func {\n\t\tf := fn.(func(I0, I1, I2, I3, I4, I5, I6, I7) R0)\n\t\treturn &caller8x1[I0, I1, I2, I3, I4, I5, I6, I7, R0]{fn: f}\n\t}\n\treflectx.RegisterFunc(reflect.TypeOf((*func(I0, I1, I2, I3, I4, I5, I6, I7) R0)(nil)).Elem(), caller)\n}", "func (self *State)Asin(a any)any{\n self.IncOperations(self.coeff[\"asin\"]+self.off[\"asin\"])\n return wrap1(a,math.Asin)\n}", "func (s *BasePlSqlParserListener) EnterFunction_call(ctx *Function_callContext) {}", "func (s *BasePlSqlParserListener) EnterInto_clause1(ctx *Into_clause1Context) {}", "func fn1() {}", "func funcFromFunc() {\n\taddExp := mathExpression()\n\tprintln(addExp(10.0, 20.0))\n}", "func INCB(mr operand.Op) { ctx.INCB(mr) }", "func (s *BaseMySqlParserListener) EnterScalarFunctionName(ctx *ScalarFunctionNameContext) {}", "func Function0x1[R0 any](doFn func() R0) {\n\truntime.RegisterFunction(doFn)\n\tregisterMethodTypes(reflect.TypeOf(doFn))\n\tcaller := func(fn any) reflectx.Func {\n\t\tf := fn.(func() R0)\n\t\treturn &caller0x1[R0]{fn: f}\n\t}\n\treflectx.RegisterFunc(reflect.TypeOf((*func() R0)(nil)).Elem(), caller)\n}", "func Function5x1[I0, I1, I2, I3, I4, R0 any](doFn func(I0, I1, I2, I3, I4) R0) {\n\truntime.RegisterFunction(doFn)\n\tregisterMethodTypes(reflect.TypeOf(doFn))\n\tcaller := func(fn any) reflectx.Func {\n\t\tf := fn.(func(I0, I1, I2, I3, I4) R0)\n\t\treturn &caller5x1[I0, I1, I2, I3, I4, R0]{fn: f}\n\t}\n\treflectx.RegisterFunc(reflect.TypeOf((*func(I0, I1, I2, I3, I4) R0)(nil)).Elem(), caller)\n}", "func Function10x1[I0, I1, I2, I3, I4, I5, I6, I7, I8, I9, R0 any](doFn func(I0, I1, I2, I3, I4, I5, I6, I7, I8, I9) R0) {\n\truntime.RegisterFunction(doFn)\n\tregisterMethodTypes(reflect.TypeOf(doFn))\n\tcaller := func(fn any) reflectx.Func {\n\t\tf := fn.(func(I0, I1, I2, I3, I4, I5, I6, I7, I8, I9) R0)\n\t\treturn &caller10x1[I0, I1, I2, I3, I4, I5, I6, I7, I8, I9, R0]{fn: f}\n\t}\n\treflectx.RegisterFunc(reflect.TypeOf((*func(I0, I1, I2, I3, I4, I5, I6, I7, I8, I9) R0)(nil)).Elem(), caller)\n}", "func (s *BasePlSqlParserListener) EnterNumeric_function_name(ctx *Numeric_function_nameContext) {}", "func add1(x, y int) int{\n return x + y\n}", "func (fn *formulaFuncs) IMSIN(argsList *list.List) formulaArg {\n\tif argsList.Len() != 1 {\n\t\treturn newErrorFormulaArg(formulaErrorVALUE, \"IMSIN requires 1 argument\")\n\t}\n\tvalue := argsList.Front().Value.(formulaArg).Value()\n\tinumber, err := strconv.ParseComplex(str2cmplx(value), 128)\n\tif err != nil {\n\t\treturn newErrorFormulaArg(formulaErrorNUM, err.Error())\n\t}\n\treturn newStringFormulaArg(cmplx2str(cmplx.Sin(inumber), value[len(value)-1:]))\n}", "func FunctionType1(tau1 TypeT, rng TypeT) TypeT {\n\tcarr := []C.type_t{C.type_t(tau1)}\n\treturn TypeT(C.yices_function_type(C.uint32_t(1), (*C.type_t)(&carr[0]), C.type_t(rng)))\n}", "func f1(x int) int {\n\treturn x / 3 * 3\n}", "func functionName1() {\n\tfmt.Println(\"Simple Function\")\n}", "func A2In2Out(ctx context.Context, db DB, paramOne, paramTwo int) (int, int, error) {\n\t// call public.a_2_in_2_out\n\tconst sqlstr = `SELECT * FROM public.a_2_in_2_out($1, $2)`\n\t// run\n\tvar returnOne int\n\tvar returnTwo int\n\tlogf(sqlstr, paramOne, paramTwo)\n\tif err := db.QueryRowContext(ctx, sqlstr, paramOne, paramTwo).Scan(&returnOne, &returnTwo); err != nil {\n\t\treturn 0, 0, logerror(err)\n\t}\n\treturn returnOne, returnTwo, nil\n}", "func BenchmarkQueryFilterInt1From1(b *testing.B) {\n\tbenchmarkQueryFilterInt(b, 1, 1)\n}", "func FuncArg(arg1 int) {}", "func (fn Function1) Call(args ...Expression) (Expression, error) {\n\tif len(args) != 1 {\n\t\treturn nil, ErrInvalidArgumentNumber.New(fn.Name, 1, len(args))\n\t}\n\n\treturn fn.Fn(args[0]), nil\n}", "func (self *TStatement) In(field string, args ...interface{}) *TStatement {\r\n\tif len(args) == 0 {\r\n\t\t// FIXME IN Condition must pass at least one arguments\r\n\t\t// TODO report err stack\r\n\t\tlog.Errf(\"IN Condition must pass at least one arguments\")\r\n\t\treturn self\r\n\t}\r\n\r\n\tif self.domain == nil {\r\n\t\tself.domain = domain.NewDomainNode()\r\n\t}\r\n\r\n\tself.domain.IN(field, args...)\r\n\t//cond := domain.New(field, \"IN\", args...)\r\n\t//self.Op(domain.AND_OPERATOR, cond)\r\n\treturn self\r\n}", "func (m *Message) AllIN1() ([]*IN1, error) {\n\tpss, err := m.ParseAll(\"IN1\")\n\treturn pss.([]*IN1), err\n}", "func (*AndExpr) iExpr() {}", "func (q *Query) In(field_name string, val []interface{}) *Query {\n\treturn q.addCondition(field_name, query.OpIn, val)\n}", "func (*AndExpr) iExpr() {}", "func A2In2Out(ctx context.Context, db DB, paramOne, paramTwo int64) (int64, int64, error) {\n\t// call a_bit_of_everything.a_2_in_2_out\n\tconst sqlstr = `BEGIN a_2_in_2_out(:param_one, :param_two, :return_one, :return_two); END;`\n\t// run\n\tvar returnOne int64\n\tvar returnTwo int64\n\tlogf(sqlstr, paramOne, paramTwo)\n\tif _, err := db.ExecContext(ctx, sqlstr, sql.Named(\"param_one\", paramOne), sql.Named(\"param_two\", paramTwo), sql.Out{Dest: &returnOne}, sql.Out{Dest: &returnTwo}); err != nil {\n\t\treturn 0, 0, logerror(err)\n\t}\n\treturn returnOne, returnTwo, nil\n}", "func (s *BaseMySqlParserListener) EnterAggregateWindowedFunction1(ctx *AggregateWindowedFunction1Context) {\n}", "func (statement *Statement) In(column string, args ...interface{}) *Statement {\n\tin := builder.In(statement.quote(column), args...)\n\tstatement.cond = statement.cond.And(in)\n\treturn statement\n}", "func Function1x0[I0 any](doFn func(I0)) {\n\truntime.RegisterFunction(doFn)\n\tregisterMethodTypes(reflect.TypeOf(doFn))\n\tcaller := func(fn any) reflectx.Func {\n\t\tf := fn.(func(I0))\n\t\treturn &caller1x0[I0]{fn: f}\n\t}\n\treflectx.RegisterFunc(reflect.TypeOf((*func(I0))(nil)).Elem(), caller)\n}", "func fnv1(x uint32, list string) uint32 {\n\tfor _, b := range list {\n\t\tx = x*16777619 ^ uint32(b)\n\t}\n\treturn x\n}", "func (s *BaseMySqlParserListener) EnterSimpleFunctionCall(ctx *SimpleFunctionCallContext) {}", "func Function1x5[I0, R0, R1, R2, R3, R4 any](doFn func(I0) (R0, R1, R2, R3, R4)) {\n\truntime.RegisterFunction(doFn)\n\tregisterMethodTypes(reflect.TypeOf(doFn))\n\tcaller := func(fn any) reflectx.Func {\n\t\tf := fn.(func(I0) (R0, R1, R2, R3, R4))\n\t\treturn &caller1x5[I0, R0, R1, R2, R3, R4]{fn: f}\n\t}\n\treflectx.RegisterFunc(reflect.TypeOf((*func(I0) (R0, R1, R2, R3, R4))(nil)).Elem(), caller)\n}", "func Function2x1[I0, I1, R0 any](doFn func(I0, I1) R0) {\n\truntime.RegisterFunction(doFn)\n\tregisterMethodTypes(reflect.TypeOf(doFn))\n\tcaller := func(fn any) reflectx.Func {\n\t\tf := fn.(func(I0, I1) R0)\n\t\treturn &caller2x1[I0, I1, R0]{fn: f}\n\t}\n\treflectx.RegisterFunc(reflect.TypeOf((*func(I0, I1) R0)(nil)).Elem(), caller)\n}", "func (s *BaseSyslParserListener) EnterExpr_first_func(ctx *Expr_first_funcContext) {}", "func (stmt *statement) In(args ...interface{}) Statement {\n\tbuf := bytebufferpool.Get()\n\tbuf.WriteString(\"IN (\")\n\tl := len(args) - 1\n\tfor i := range args {\n\t\tif i < l {\n\t\t\tbuf.Write(placeholderComma)\n\t\t} else {\n\t\t\tbuf.Write(placeholder)\n\t\t}\n\t}\n\tbuf.WriteString(\")\")\n\n\tstmt.addPart(posWhere, \"\", bufferToString(&buf.B), args, \" \")\n\n\tbytebufferpool.Put(buf)\n\treturn stmt\n}", "func add1(n int) int {\n\treturn n + 1\n}", "func (c Column) Apply1(fn interface{}, ix index.Int) (interface{}, error) {\n\tswitch t := fn.(type) {\n\tcase func(bool) int:\n\t\tresult := make([]int, len(c.data))\n\t\tfor _, i := range ix {\n\t\t\tresult[i] = t(c.data[i])\n\t\t}\n\t\treturn result, nil\n\tcase func(bool) float64:\n\t\tresult := make([]float64, len(c.data))\n\t\tfor _, i := range ix {\n\t\t\tresult[i] = t(c.data[i])\n\t\t}\n\t\treturn result, nil\n\tcase func(bool) bool:\n\t\tresult := make([]bool, len(c.data))\n\t\tfor _, i := range ix {\n\t\t\tresult[i] = t(c.data[i])\n\t\t}\n\t\treturn result, nil\n\tcase func(bool) *string:\n\t\tresult := make([]*string, len(c.data))\n\t\tfor _, i := range ix {\n\t\t\tresult[i] = t(c.data[i])\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\treturn nil, qerrors.New(c.fnName(\"Apply1\"), \"cannot apply type %#v to column\", fn)\n\t}\n}", "func (session *Session) In(column string, args ...interface{}) *Session {\n\tsession.Statement.In(column, args...)\n\treturn session\n}", "func (s *BasePlSqlParserListener) EnterFunction_name(ctx *Function_nameContext) {}", "func (s *BasePlSqlParserListener) EnterNumeric_function(ctx *Numeric_functionContext) {}", "func (s *BasePlSqlParserListener) EnterFunction_argument_analytic(ctx *Function_argument_analyticContext) {\n}", "func (s *BaseSyslParserListener) EnterExpr_single_arg_func(ctx *Expr_single_arg_funcContext) {}", "func FuncAddArg(arg1 int) {}", "func Function9x1[I0, I1, I2, I3, I4, I5, I6, I7, I8, R0 any](doFn func(I0, I1, I2, I3, I4, I5, I6, I7, I8) R0) {\n\truntime.RegisterFunction(doFn)\n\tregisterMethodTypes(reflect.TypeOf(doFn))\n\tcaller := func(fn any) reflectx.Func {\n\t\tf := fn.(func(I0, I1, I2, I3, I4, I5, I6, I7, I8) R0)\n\t\treturn &caller9x1[I0, I1, I2, I3, I4, I5, I6, I7, I8, R0]{fn: f}\n\t}\n\treflectx.RegisterFunc(reflect.TypeOf((*func(I0, I1, I2, I3, I4, I5, I6, I7, I8) R0)(nil)).Elem(), caller)\n}", "func FileimportIDIn(vs ...int) predicate.Watchlist {\n\tv := make([]interface{}, len(vs))\n\tfor i := range v {\n\t\tv[i] = vs[i]\n\t}\n\treturn predicate.Watchlist(func(s *sql.Selector) {\n\t\t// if not arguments were provided, append the FALSE constants,\n\t\t// since we can't apply \"IN ()\". This will make this predicate falsy.\n\t\tif len(v) == 0 {\n\t\t\ts.Where(sql.False())\n\t\t\treturn\n\t\t}\n\t\ts.Where(sql.In(s.C(FieldFileimportID), v...))\n\t})\n}", "func (b BuiltinFunctionsUsage) Inc(scalarFuncSigName string) {\n\tv, ok := b[scalarFuncSigName]\n\tif !ok {\n\t\tb[scalarFuncSigName] = 1\n\t} else {\n\t\tb[scalarFuncSigName] = v + 1\n\t}\n}", "func (s *BaseMySqlParserListener) EnterScalarFunctionCall(ctx *ScalarFunctionCallContext) {}", "func AndIn(clause string, args ...interface{}) QueryMod {\n\treturn func(q *queries.Query) {\n\t\tqueries.AppendIn(q, clause, args...)\n\t}\n}", "func basicFuntionOne() {\n\tfmt.Println(\"Basic function #1\")\n}", "func (s *BaseTdatListener) EnterArithmeticExprInPrimaryExpr(ctx *ArithmeticExprInPrimaryExprContext) {\n}", "func my_math1(num, num1 int) int {\n\tsol := num * num1\n\t// returning as an integer\n\treturn sol\n}", "func Function6x1[I0, I1, I2, I3, I4, I5, R0 any](doFn func(I0, I1, I2, I3, I4, I5) R0) {\n\truntime.RegisterFunction(doFn)\n\tregisterMethodTypes(reflect.TypeOf(doFn))\n\tcaller := func(fn any) reflectx.Func {\n\t\tf := fn.(func(I0, I1, I2, I3, I4, I5) R0)\n\t\treturn &caller6x1[I0, I1, I2, I3, I4, I5, R0]{fn: f}\n\t}\n\treflectx.RegisterFunc(reflect.TypeOf((*func(I0, I1, I2, I3, I4, I5) R0)(nil)).Elem(), caller)\n}", "func BioIn(vs ...string) predicate.User {\n\treturn predicate.User(sql.FieldIn(FieldBio, vs...))\n}", "func (s *BasejossListener) EnterFuncSin(ctx *FuncSinContext) {}", "func (s *BasePlSqlParserListener) EnterFunction_association(ctx *Function_associationContext) {}", "func Function1x2[I0, R0, R1 any](doFn func(I0) (R0, R1)) {\n\truntime.RegisterFunction(doFn)\n\tregisterMethodTypes(reflect.TypeOf(doFn))\n\tcaller := func(fn any) reflectx.Func {\n\t\tf := fn.(func(I0) (R0, R1))\n\t\treturn &caller1x2[I0, R0, R1]{fn: f}\n\t}\n\treflectx.RegisterFunc(reflect.TypeOf((*func(I0) (R0, R1))(nil)).Elem(), caller)\n}", "func BaseIn(vs ...string) predicate.MetaSchema {\n\tv := make([]interface{}, len(vs))\n\tfor i := range v {\n\t\tv[i] = vs[i]\n\t}\n\treturn predicate.MetaSchema(func(s *sql.Selector) {\n\t\t// if not arguments were provided, append the FALSE constants,\n\t\t// since we can't apply \"IN ()\". This will make this predicate falsy.\n\t\tif len(vs) == 0 {\n\t\t\ts.Where(sql.False())\n\t\t\treturn\n\t\t}\n\t\ts.Where(sql.In(s.C(FieldBase), v...))\n\t},\n\t)\n}", "func (s *BaseSyslParserListener) EnterExpr_any_func(ctx *Expr_any_funcContext) {}", "func cpuEXA1() {\n\tif key[_V[getx()]] == 0 {\n\t\tpc += 4\n\t} else {\n\t\tpc += 2\n\t}\n}", "func Update1(fun TermT, arg1 TermT, value TermT) TermT {\n\treturn TermT(C.yices_update1(C.term_t(fun), C.term_t(arg1), C.term_t(value)))\n}", "func (s *BaseMySqlParserListener) EnterSpecificFunctionCall(ctx *SpecificFunctionCallContext) {}", "func (s *BaseMySqlParserListener) EnterStringLiteral1(ctx *StringLiteral1Context) {}", "func mathAsin(ctx phpv.Context, args []*phpv.ZVal) (*phpv.ZVal, error) {\n\tvar f phpv.ZFloat\n\t_, err := core.Expand(ctx, args, &f)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn phpv.ZFloat(math.Asin(float64(f))).ZVal(), nil\n}", "func (s *BaseGraffleParserListener) EnterFunction_call(ctx *Function_callContext) {}", "func FacebookIDIn(vs ...string) predicate.Patient {\n\tv := make([]interface{}, len(vs))\n\tfor i := range v {\n\t\tv[i] = vs[i]\n\t}\n\treturn predicate.Patient(func(s *sql.Selector) {\n\t\t// if not arguments were provided, append the FALSE constants,\n\t\t// since we can't apply \"IN ()\". This will make this predicate falsy.\n\t\tif len(vs) == 0 {\n\t\t\ts.Where(sql.False())\n\t\t\treturn\n\t\t}\n\t\ts.Where(sql.In(s.C(FieldFacebookID), v...))\n\t})\n}", "func (s *BasePlSqlParserListener) EnterFunction_argument(ctx *Function_argumentContext) {}", "func (s *BasejossListener) EnterFuncExp(ctx *FuncExpContext) {}", "func Function(name string, col Columnar) ColumnElem {\n\treturn col.Column().AddOperator(FuncClause{Name: name})\n}", "func integerValue(tls *libc.TLS, zArg uintptr) int32 { /* speedtest1.c:210:12: */\n\tvar v sqlite3_int64 = int64(0)\n\tvar i int32\n\tvar isNeg int32 = 0\n\tif int32(*(*int8)(unsafe.Pointer(zArg + uintptr(0)))) == '-' {\n\t\tisNeg = 1\n\t\tzArg++\n\t} else if int32(*(*int8)(unsafe.Pointer(zArg + uintptr(0)))) == '+' {\n\t\tzArg++\n\t}\n\tif (int32(*(*int8)(unsafe.Pointer(zArg + uintptr(0)))) == '0') && (int32(*(*int8)(unsafe.Pointer(zArg + uintptr(1)))) == 'x') {\n\t\tvar x int32\n\t\tzArg += uintptr(2)\n\t\tfor (libc.AssignInt32(&x, hexDigitValue(tls, *(*int8)(unsafe.Pointer(zArg + uintptr(0)))))) >= 0 {\n\t\t\tv = ((v << 4) + sqlite3_int64(x))\n\t\t\tzArg++\n\t\t}\n\t} else {\n\t\tfor libc.Xisdigit(tls, int32(*(*int8)(unsafe.Pointer(zArg + uintptr(0))))) != 0 {\n\t\t\tv = (((v * int64(10)) + sqlite3_int64(*(*int8)(unsafe.Pointer(zArg + uintptr(0))))) - int64('0'))\n\t\t\tzArg++\n\t\t}\n\t}\n\tfor i = 0; uint32(i) < (uint32(unsafe.Sizeof(aMult)) / uint32(unsafe.Sizeof(struct {\n\t\tzSuffix uintptr\n\t\tiMult int32\n\t}{}))); i++ {\n\t\tif sqlite3.Xsqlite3_stricmp(tls, aMult[i].zSuffix, zArg) == 0 {\n\t\t\tv = v * (sqlite3_int64(aMult[i].iMult))\n\t\t\tbreak\n\t\t}\n\t}\n\tif v > int64(0x7fffffff) {\n\t\tfatal_error(tls, ts+2153 /* \"parameter too la...\" */, 0)\n\t}\n\treturn func() int32 {\n\t\tif isNeg != 0 {\n\t\t\treturn int32(-v)\n\t\t}\n\t\treturn int32(v)\n\t}()\n}", "func AlphaFunc(xfunc uint32, ref float32) {\n\tsyscall.Syscall(gpAlphaFunc, 2, uintptr(xfunc), uintptr(math.Float32bits(ref)), 0)\n}", "func (vm *VM) opIn(instr []uint16) int {\n\ta := instr[0] - 32768\n\tchr, err := vm.r.ReadByte()\n\tif err != nil {\n\t\tvm.Status = err.Error()\n\t\treturn 0\n\t}\n\tvm.registers[a] = uint16(chr)\n\treturn 2\n}", "func NameIdIn(vs ...uint) predicate.K8sEvent {\n\tv := make([]interface{}, len(vs))\n\tfor i := range v {\n\t\tv[i] = vs[i]\n\t}\n\treturn predicate.K8sEvent(func(s *sql.Selector) {\n\t\t// if not arguments were provided, append the FALSE constants,\n\t\t// since we can't apply \"IN ()\". This will make this predicate falsy.\n\t\tif len(v) == 0 {\n\t\t\ts.Where(sql.False())\n\t\t\treturn\n\t\t}\n\t\ts.Where(sql.In(s.C(FieldNameId), v...))\n\t})\n}", "func (s *BaseSyslParserListener) EnterExpr_func(ctx *Expr_funcContext) {}", "func (pv *ProjectionVisitor) OnEnterFunctionInvocation(name string, distinct bool) error {\n\tif name == \"COUNT\" {\n\t\tpv.functionInvocationContext = new(FunctionInvocationContext)\n\t\tpv.functionInvocationContext.Distinct = distinct\n\t\tpv.functionInvocationContext.FunctionName = name\n\t\tpv.Aggregation = true\n\t} else {\n\t\treturn fmt.Errorf(\"Function %s is not supported\", name)\n\t}\n\treturn nil\n}", "func (s *BaseSyslParserListener) EnterFunc_arg(ctx *Func_argContext) {}", "func op_PUSH1(pc *uint64, in *interpreter, ctx *callCtx) uint64 {\n\tvar (\n\t\tcodeLen = uint64(len(ctx.contract.Code))\n\t\tinteger = new(uint256.Int)\n\t)\n\t*pc++\n\tif *pc < codeLen {\n\t\tto_push := integer.SetUint64(uint64(ctx.contract.Code[*pc]))\n\t\tctx.stack.Push(to_push)\n\t} else {\n\t\tctx.stack.Push(integer.Clear())\n\t}\n\treturn 0\n}", "func (s *BaseGraffleParserListener) EnterBuiltin_function_call(ctx *Builtin_function_callContext) {}", "func UPidIn(vs ...string) predicate.OnlineSession {\n\tv := make([]interface{}, len(vs))\n\tfor i := range v {\n\t\tv[i] = vs[i]\n\t}\n\treturn predicate.OnlineSession(func(s *sql.Selector) {\n\t\t// if not arguments were provided, append the FALSE constants,\n\t\t// since we can't apply \"IN ()\". This will make this predicate falsy.\n\t\tif len(v) == 0 {\n\t\t\ts.Where(sql.False())\n\t\t\treturn\n\t\t}\n\t\ts.Where(sql.In(s.C(FieldUPid), v...))\n\t})\n}", "func (t *SimpleChaincode) Query(stub *shim.ChaincodeStub, function string, args []string) ([]byte, error) {\n\tfmt.Println(\"query is running \" + function)\n\n\t// Handle different functions\n\tif function == \"read\" { //read a variable\n\t\treturn t.read(stub, args)\n\t} else if function == \"getLoc\" {\n\t//\ti,err := strconv.Atoi(args[0])\n\t//\tfmt.Println(err); \n\t\treturn t.getLoc(stub, args);\n\t\t \n\t} else if function == \"getNumberOfLocs\" {\n\t\n\t\treturn t.getNumberOfLocs(stub, args);\n\t}\n\t\n\tfmt.Println(\"query did not find func: \" + function)\n\n\treturn nil, errors.New(\"Received unknown function query\")\n}", "func (s *BaseMySqlParserListener) EnterConvertedDataType1(ctx *ConvertedDataType1Context) {}", "func (s *BasePlSqlParserListener) EnterC_agent_in_clause(ctx *C_agent_in_clauseContext) {}", "func fibonacci01() func() int {\n\tfN, xN := 1, 0\n\treturn func() int {\n\t\tfN, xN = xN, fN+xN\n\t\treturn fN\n\t}\n}", "func fn2() {}", "func Int8ToStringIn(vs ...string) predicate.Conversion {\n\tv := make([]interface{}, len(vs))\n\tfor i := range v {\n\t\tv[i] = vs[i]\n\t}\n\treturn predicate.Conversion(func(s *sql.Selector) {\n\t\t// if not arguments were provided, append the FALSE constants,\n\t\t// since we can't apply \"IN ()\". This will make this predicate falsy.\n\t\tif len(v) == 0 {\n\t\t\ts.Where(sql.False())\n\t\t\treturn\n\t\t}\n\t\ts.Where(sql.In(s.C(FieldInt8ToString), v...))\n\t})\n}", "func f1(arg int) (int, error) {\n\tif arg == 42 {\n\t\treturn -1, errors.New(\"can’t work with 42\")\n\t}\n\n\treturn arg + 3, nil\n}", "func BenchmarkFNV1a(b *testing.B) {\n\tstr := \"[email protected]\"\n\tbytes := []byte(str)\n\th := fnv.New32a()\n\tfor i := 0; i < b.N; i++ {\n\t\th.Reset()\n\t\th.Write(bytes)\n\t\t_ = h.Sum([]byte{})\n\t}\n}", "func d1_gen() func() int {\n\ta, b := 1, 2\n\treturn func() int {\n\t\ta, b = a+b, b+2\n\t\treturn a\n\t}\n}", "func HandleIn(em *Emulator, a int, b int) {\n addr := em.GetReg(b)\n \n if em.getPortAccess(addr) {\n data := em.LoadIOPort(addr)\n em.SetReg(a, data)\n em.LogInstruction(\"in %s, %s -- ports[0x%02X] = 0x%02X\", RegisterNames[a],\n RegisterNames[b], addr, data)\n \n } else {\n em.LogInstruction(\"in %s, %s -- not authorised\", RegisterNames[a], RegisterNames[b])\n }\n \n em.timer += 6;\n}", "func (f *File) evalInfixExpFunc(ctx *calcContext, sheet, cell string, token, nextToken efp.Token, opfStack, opdStack, opftStack, opfdStack, argsStack *Stack) formulaArg {\n\tif !isFunctionStopToken(token) {\n\t\treturn newEmptyFormulaArg()\n\t}\n\tprepareEvalInfixExp(opfStack, opftStack, opfdStack, argsStack)\n\t// call formula function to evaluate\n\targ := callFuncByName(&formulaFuncs{f: f, sheet: sheet, cell: cell, ctx: ctx}, strings.NewReplacer(\n\t\t\"_xlfn.\", \"\", \".\", \"dot\").Replace(opfStack.Peek().(efp.Token).TValue),\n\t\t[]reflect.Value{reflect.ValueOf(argsStack.Peek().(*list.List))})\n\tif arg.Type == ArgError && opfStack.Len() == 1 {\n\t\treturn arg\n\t}\n\targsStack.Pop()\n\topftStack.Pop() // remove current function separator\n\topfStack.Pop()\n\tif opfStack.Len() > 0 { // still in function stack\n\t\tif nextToken.TType == efp.TokenTypeOperatorInfix || (opftStack.Len() > 1 && opfdStack.Len() > 0) {\n\t\t\t// mathematics calculate in formula function\n\t\t\topfdStack.Push(arg)\n\t\t} else {\n\t\t\targsStack.Peek().(*list.List).PushBack(arg)\n\t\t}\n\t} else {\n\t\tval := arg.Value()\n\t\tif arg.Type == ArgMatrix && len(arg.Matrix) > 0 && len(arg.Matrix[0]) > 0 {\n\t\t\tval = arg.Matrix[0][0].Value()\n\t\t}\n\t\topdStack.Push(newStringFormulaArg(val))\n\t}\n\treturn newEmptyFormulaArg()\n}", "func (s *BaselimboListener) EnterFunction_name_part(ctx *Function_name_partContext) {}", "func BenchmarkReg1(b *testing.B) {\n\tbenchmarkQuery(`select ?p, ?o as ?o1 from ?test where {/u<joe> ?p ?o};`, b)\n}" ]
[ "0.59811527", "0.5737294", "0.5608908", "0.542825", "0.52071387", "0.50139046", "0.5005405", "0.49846172", "0.49317142", "0.49308378", "0.48855373", "0.48657376", "0.4847981", "0.48454347", "0.48078746", "0.48072076", "0.47299212", "0.47262934", "0.47253907", "0.47093183", "0.47026718", "0.46915492", "0.46862593", "0.46767145", "0.467464", "0.46729052", "0.4667813", "0.46531957", "0.46516937", "0.46482724", "0.46403265", "0.46396977", "0.46325377", "0.46275124", "0.4592304", "0.45893225", "0.45804018", "0.4574927", "0.45637396", "0.45485038", "0.45421097", "0.45197552", "0.44930452", "0.4487466", "0.44852376", "0.4471353", "0.44678733", "0.4455959", "0.4455553", "0.44465366", "0.44407502", "0.44395596", "0.4439429", "0.44353312", "0.4431242", "0.44301286", "0.44293475", "0.44238275", "0.44215468", "0.44178805", "0.44082406", "0.4397623", "0.43960917", "0.43951067", "0.43932238", "0.43883166", "0.43774322", "0.4372596", "0.43686804", "0.4367666", "0.4364545", "0.43387753", "0.43372595", "0.43330193", "0.43298113", "0.43237773", "0.43216845", "0.43082044", "0.43080977", "0.4308059", "0.43059272", "0.43005094", "0.42959073", "0.42938516", "0.42913288", "0.42873487", "0.42839158", "0.42837468", "0.42788863", "0.42786312", "0.42773485", "0.4272552", "0.42692456", "0.42682552", "0.4267829", "0.42616767", "0.4259275", "0.42579028", "0.4257081", "0.4250796" ]
0.86705595
0
XXH64 returns new hash.Hash64
func XXH64(seed uint64) hash.Hash64 { d := &digest64{seed: seed, buf: new(bytes.Buffer)} d.Reset() return d }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (this *XXHash64) Hash(data []byte) uint64 {\n\tend := len(data)\n\tvar h64 uint64\n\tn := 0\n\n\tif end >= 32 {\n\t\tend32 := end - 32\n\t\tv1 := this.seed + _XXHASH_PRIME64_1 + _XXHASH_PRIME64_2\n\t\tv2 := this.seed + _XXHASH_PRIME64_2\n\t\tv3 := this.seed\n\t\tv4 := this.seed - _XXHASH_PRIME64_1\n\n\t\tfor n <= end32 {\n\t\t\tbuf := data[n : n+32]\n\t\t\tv1 = xxHash64Round(v1, binary.LittleEndian.Uint64(buf[0:8]))\n\t\t\tv2 = xxHash64Round(v2, binary.LittleEndian.Uint64(buf[8:16]))\n\t\t\tv3 = xxHash64Round(v3, binary.LittleEndian.Uint64(buf[16:24]))\n\t\t\tv4 = xxHash64Round(v4, binary.LittleEndian.Uint64(buf[24:32]))\n\t\t\tn += 32\n\t\t}\n\n\t\th64 = ((v1 << 1) | (v1 >> 31)) + ((v2 << 7) | (v2 >> 25)) +\n\t\t\t((v3 << 12) | (v3 >> 20)) + ((v4 << 18) | (v4 >> 14))\n\n\t\th64 = xxHash64MergeRound(h64, v1)\n\t\th64 = xxHash64MergeRound(h64, v2)\n\t\th64 = xxHash64MergeRound(h64, v3)\n\t\th64 = xxHash64MergeRound(h64, v4)\n\t} else {\n\t\th64 = this.seed + _XXHASH_PRIME64_5\n\t}\n\n\th64 += uint64(end)\n\n\tfor n+8 <= end {\n\t\th64 ^= xxHash64Round(0, binary.LittleEndian.Uint64(data[n:n+8]))\n\t\th64 = ((h64<<27)|(h64>>37))*_XXHASH_PRIME64_1 + _XXHASH_PRIME64_4\n\t\tn += 8\n\t}\n\n\tfor n+4 <= end {\n\t\th64 ^= (uint64(binary.LittleEndian.Uint32(data[n:n+4])) * _XXHASH_PRIME64_1)\n\t\th64 = ((h64<<23)|(h64>>41))*_XXHASH_PRIME64_2 + _XXHASH_PRIME64_3\n\t\tn += 4\n\t}\n\n\tfor n < end {\n\t\th64 += (uint64(data[n]) * _XXHASH_PRIME64_5)\n\t\th64 = ((h64 << 11) | (h64 >> 53)) * _XXHASH_PRIME64_1\n\t\tn++\n\t}\n\n\th64 ^= (h64 >> 33)\n\th64 *= _XXHASH_PRIME64_2\n\th64 ^= (h64 >> 29)\n\th64 *= _XXHASH_PRIME64_3\n\treturn h64 ^ (h64 >> 32)\n}", "func New64() hash.Hash64 { return New64WithSeed(0) }", "func NewXXHash64(seed uint64) (*XXHash64, error) {\n\tthis := new(XXHash64)\n\tthis.seed = seed\n\treturn this, nil\n}", "func HashXXH3_64(input []byte, seed uint64) (result uint64) {\n\treturn parser.HashXXH3_64(input, seed)\n}", "func (s *Store) Hash64() uint64 {\n\treturn s.txthash\n}", "func (xxh *xxHash) Sum64() uint64 {\n\tvar h64 uint64\n\tif xxh.totalLen >= 32 {\n\t\th64 = ((xxh.v1 << 1) | (xxh.v1 >> 63)) +\n\t\t\t((xxh.v2 << 7) | (xxh.v2 >> 57)) +\n\t\t\t((xxh.v3 << 12) | (xxh.v3 >> 52)) +\n\t\t\t((xxh.v4 << 18) | (xxh.v4 >> 46))\n\n\t\txxh.v1 *= prime64_2\n\t\th64 ^= ((xxh.v1 << 31) | (xxh.v1 >> 33)) * prime64_1\n\t\th64 = h64*prime64_1 + prime64_4\n\n\t\txxh.v2 *= prime64_2\n\t\th64 ^= ((xxh.v2 << 31) | (xxh.v2 >> 33)) * prime64_1\n\t\th64 = h64*prime64_1 + prime64_4\n\n\t\txxh.v3 *= prime64_2\n\t\th64 ^= ((xxh.v3 << 31) | (xxh.v3 >> 33)) * prime64_1\n\t\th64 = h64*prime64_1 + prime64_4\n\n\t\txxh.v4 *= prime64_2\n\t\th64 ^= ((xxh.v4 << 31) | (xxh.v4 >> 33)) * prime64_1\n\t\th64 = h64*prime64_1 + prime64_4 + xxh.totalLen\n\t} else {\n\t\th64 = xxh.seed + prime64_5 + xxh.totalLen\n\t}\n\n\tp := 0\n\tn := xxh.bufused\n\tfor n := n - 8; p <= n; p += 8 {\n\t\tp64 := (uint64(xxh.buf[p+7])<<56 | uint64(xxh.buf[p+6])<<48 | uint64(xxh.buf[p+5])<<40 | uint64(xxh.buf[p+4])<<32 | uint64(xxh.buf[p+3])<<24 | uint64(xxh.buf[p+2])<<16 | uint64(xxh.buf[p+1])<<8 | uint64(xxh.buf[p])) * prime64_2\n\t\th64 ^= ((p64 << 31) | (p64 >> 33)) * prime64_1\n\t\th64 = ((h64<<27)|(h64>>37))*prime64_1 + prime64_4\n\t}\n\tif p+4 <= n {\n\t\th64 ^= (uint64(xxh.buf[p+3])<<24 | uint64(xxh.buf[p+2])<<16 | uint64(xxh.buf[p+1])<<8 | uint64(xxh.buf[p])) * prime64_1\n\t\th64 = ((h64<<23)|(h64>>41))*prime64_2 + prime64_3\n\t\tp += 4\n\t}\n\tfor ; p < n; p++ {\n\t\th64 ^= uint64(xxh.buf[p]) * prime64_5\n\t\th64 = ((h64 << 11) | (h64 >> 53)) * prime64_1\n\t}\n\n\th64 ^= h64 >> 33\n\th64 *= prime64_2\n\th64 ^= h64 >> 29\n\th64 *= prime64_3\n\th64 ^= h64 >> 32\n\n\treturn h64\n}", "func hash64(key, mask uint64) uint64 {\n\tkey = (^key + (key << 21)) & mask\n\tkey = key ^ key>>24\n\tkey = ((key + (key << 3)) + (key << 8)) & mask\n\tkey = key ^ key>>14\n\tkey = ((key + (key << 2)) + (key << 4)) & mask\n\tkey = key ^ key>>28\n\tkey = (key + (key << 31)) & mask\n\treturn key\n}", "func New64(key []byte) (hash.Hash64, error) {\n\tif k := len(key); k != KeySize {\n\t\treturn nil, KeySizeError(k)\n\t}\n\th := new(digest64)\n\th.key[0] = binary.LittleEndian.Uint64(key)\n\th.key[1] = binary.LittleEndian.Uint64(key[8:])\n\th.Reset()\n\treturn h, nil\n}", "func New(seed uint64) hash.Hash64 {\n\txxh := &xxHash{seed: seed}\n\txxh.Reset()\n\treturn xxh\n}", "func hash(key uint64) uint64 {\r\n\tkey ^= key >> 33\r\n\tkey *= 0xff51afd7ed558ccd\r\n\tkey ^= key >> 33\r\n\tkey *= 0xc4ceb9fe1a85ec53\r\n\tkey ^= key >> 33\r\n\treturn key\r\n}", "func Hash(value int64) uint64 {\n\treturn FNVHash64(uint64(value))\n}", "func newSHA256() hash.Hash { return sha256.New() }", "func NewIEEE() hash.Hash32 {}", "func (c *Cache) hash64(b []byte) uint64 {\n\tc.hasher.Reset()\n\tc.hasher.Write(b)\n\treturn c.hasher.Sum64()\n}", "func New() hash.Hash64 {\n\td := new(digest)\n\td.Reset()\n\treturn d\n}", "func hash4x64(u uint64, h uint8) uint32 {\n\treturn (uint32(u) * prime4bytes) >> ((32 - h) & 31)\n}", "func (in *Instance) hash(x, y, mu *big.Int, T uint64) *big.Int {\n\tb := sha512.New()\n\tb.Write(x.Bytes())\n\tb.Write(y.Bytes())\n\tb.Write(mu.Bytes())\n\tbits := make([]byte, 8)\n\tbinary.LittleEndian.PutUint64(bits, T)\n\tb.Write(bits)\n\tres := new(big.Int).SetBytes(b.Sum(nil))\n\tres.Mod(res, in.rsaModulus)\n\treturn res\n}", "func Hash64(p []byte) uint64 {\n\treturn crc64.Checksum(p, table)\n}", "func hash128to64(x Uint128) uint64 {\n\t// Murmur-inspired hashing.\n\tconst kMul uint64 = 0x9ddfea08eb382d69\n\ta := (x.First ^ x.Second) * kMul\n\ta ^= (a >> 47)\n\tb := (x.Second ^ a) * kMul\n\tb ^= (b >> 47)\n\tb *= kMul\n\treturn b\n}", "func combineHash(left *crypto.HashType, right *crypto.HashType) *crypto.HashType {\n\tvar hash [crypto.HashSize * 2]byte\n\tcopy(hash[:crypto.HashSize], left[:])\n\tcopy(hash[crypto.HashSize:], right[:])\n\n\tnewHash := crypto.DoubleHashH(hash[:])\n\treturn &newHash\n}", "func New64(seed uint32) Hash128 {\n\tseed64 := uint64(seed)\n\treturn &sum64_128{seed64, seed64, 0, 0, 0, 0}\n}", "func Hash(length int, key string) int64 {\n\tif key == \"\" {\n\t\treturn 0\n\t}\n\thc := hashCode(key)\n\treturn (hc ^ (hc >> 16)) % int64(length)\n}", "func d2h(val uint64) (result *Key) {\n\tresult = new(Key)\n\tfor i := 0; val > 0; i++ {\n\t\tresult[i] = byte(val & 0xFF)\n\t\tval /= 256\n\t}\n\treturn\n}", "func (h xxhasher) Sum64(key string) uint64 {\n\treturn xxhash.Sum64String(key)\n}", "func Hash64WithSeeds(s []byte, seed0, seed1 uint64) uint64 {\n\treturn hash64Len16(Hash64(s)-seed0, seed1)\n}", "func Hash64(s []byte) uint64 {\n\tn := uint64(len(s))\n\tif n <= 32 {\n\t\tif n <= 16 {\n\t\t\treturn hash64Len0to16(s)\n\t\t}\n\t\treturn hash64Len17to32(s)\n\t} else if n <= 64 {\n\t\treturn hash64Len33to64(s)\n\t}\n\n\t// For strings over 64 bytes we hash the end first, and then as we loop we\n\t// keep 56 bytes of state: v, w, x, y, and z.\n\tx := fetch64(s[n-40:])\n\ty := fetch64(s[n-16:]) + fetch64(s[n-56:])\n\tz := hash64Len16(fetch64(s[n-48:])+n, fetch64(s[n-24:]))\n\n\tv1, v2 := weakHashLen32WithSeeds(s[n-64:], n, z)\n\tw1, w2 := weakHashLen32WithSeeds(s[n-32:], y+k1, x)\n\tx = x*k1 + fetch64(s)\n\n\t// Decrease n to the nearest multiple of 64, and operate on 64-byte chunks.\n\tn = (n - 1) &^ 63\n\tfor {\n\t\tx = ror64(x+y+v1+fetch64(s[8:]), 37) * k1\n\t\ty = ror64(y+v2+fetch64(s[48:]), 42) * k1\n\t\tx ^= w2\n\t\ty += v1 + fetch64(s[40:])\n\t\tz = ror64(z+w1, 33) * k1\n\t\tv1, v2 = weakHashLen32WithSeeds(s, v2*k1, x+w1)\n\t\tw1, w2 = weakHashLen32WithSeeds(s[32:], z+w2, y+fetch64(s[16:]))\n\t\tz, x = x, z\n\t\ts = s[64:]\n\t\tn -= 64\n\t\tif n == 0 {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn hash64Len16(hash64Len16(v1, w1)+shiftMix(y)*k1+z, hash64Len16(v2, w2)+x)\n}", "func fnv_1_64(key string) uint64 {\n\thash := uint64(14695981039346656037)\n\tconst prime64 = uint64(1099511628211)\n\tfor i := 0; i < len(key); i++ {\n\t\thash *= prime64\n\t\thash ^= uint64(key[i])\n\t}\n\treturn hash\n}", "func fnv_1a_64(key string) uint64 {\n\thash := uint64(14695981039346656037)\n\tconst prime64 = uint64(1099511628211)\n\tfor i := 0; i < len(key); i++ {\n\t\thash ^= uint64(key[i])\n\t\thash *= prime64\n\t}\n\treturn hash\n}", "func FNVHash64(value uint64) uint64 {\n\thash := FNVOffsetBasis64\n\tfor i := 0; i < 8; i++ {\n\t\toctet := value & 0x00FF\n\t\tvalue >>= 8\n\n\t\thash ^= octet\n\t\thash *= FNVPrime64\n\t}\n\treturn hash\n}", "func fnvHash64(data []byte) uint64 {\n\thash := offset64\n\tfor _, c := range data {\n\t\thash *= prime64\n\t\thash ^= uint64(c)\n\t}\n\treturn hash\n}", "func HashASM(k0, k1 uint64, p []byte) uint64", "func NewHash(inner, outer hash.Hash) hash.Hash {\n\treturn &doubleHash{inner, outer}\n}", "func New(bits uint, saltLen uint, extra string) *Hash {\n\th := &Hash{\n\t\thasher: sha1.New(),\n\t\tbits: bits,\n\t\tsaltLen: saltLen,\n\t\textra: extra}\n\th.zeros = uint(math.Ceil(float64(h.bits) / 4.0))\n\treturn h\n}", "func Hash64WithSeed(s []byte, seed uint64) uint64 {\n\treturn Hash64WithSeeds(s, k2, seed)\n}", "func newHashID() (*hashids.HashID, error) {\n\t// Defaults\n\tsalt := \"Best salt\"\n\tminLength := 8\n\n\t// Initiliazing HashID\n\thd := hashids.NewData()\n\thd.Salt = salt\n\thd.MinLength = minLength\n\th, err := hashids.NewWithData(hd)\n\treturn h, err\n}", "func fnv64(key string) uint {\n\tvar hash uint = offset64\n\tfor i := 0; i < len(key); i++ {\n\t\thash ^= uint(key[i])\n\t\thash *= prime64\n\t}\n\treturn hash\n}", "func Hash(k0, k1 uint64, p []byte) uint64 {\n\tvar d digest\n\td.size = Size\n\td.k0 = k0\n\td.k1 = k1\n\td.Reset()\n\td.Write(p)\n\treturn d.Sum64()\n}", "func DJB64(str []byte) uint64 {\n\tvar hash uint64 = 5381\n\tfor i := 0; i < len(str); i++ {\n\t\thash += (hash << 5) + uint64(str[i])\n\t}\n\treturn hash\n}", "func New64() *Digest64 {\n\treturn New64WithSeed(0)\n}", "func NewMockHash64(ctrl *gomock.Controller) *MockHash64 {\n\tmock := &MockHash64{ctrl: ctrl}\n\tmock.recorder = &MockHash64MockRecorder{mock}\n\treturn mock\n}", "func NewHamt64() hamt64.Hamt {\n\treturn hamt64.Hamt{}\n}", "func New(inner, outer crypto.Hash) hash.Hash {\n\treturn &doubleHash{inner.New(), outer.New()}\n}", "func (me TDigestValueType) ToXsdtBase64Binary() xsdt.Base64Binary { return xsdt.Base64Binary(me) }", "func New(tab *Table) hash.Hash64 {}", "func (addr *Address) calcDoubleHash() []byte {\n\treturn Sha512(addr.calcSingleHash())\n}", "func createHash(tokenId []byte) []byte {\n\tu := TOKEN_HASH_BYTES_POOL.Get().([]byte)\n\tdefer TOKEN_HASH_BYTES_POOL.Put(u)\n\n\t//let's generate real hash...\n\tcopy(u, TOKEN_HASH_SECRET)\n\tcopy(u[len(TOKEN_HASH_SECRET):], tokenId)\n\n\t//\tlogx.D(\"u:\", base64.URLEncoding.EncodeToString(u), \", len(U):\", len(u))\n\n\thash := sha256.Sum256(u)\n\n\treturn hash[0:32]\n}", "func hash(passphrase, token string, timestamp int64) string {\n\tbase := fmt.Sprintf(\"%s-%s-%d\", passphrase, token, timestamp)\n\treturn fmt.Sprintf(\"%x\", sha512.Sum512([]byte(base)))\n}", "func HTo64(hexString string) string {\n\treturn STo64(HToS(hexString))\n}", "func Hashit(tox string) string {\n h:= sha256.New()\n h.Write([]byte(tox))\n bs := h.Sum([]byte{})\n str := base64.StdEncoding.EncodeToString(bs)\n return str\n}", "func strhash(a unsafe.Pointer, h uintptr) uintptr", "func (p *siprng) Uint64() uint64 {\n\tp.mu.Lock()\n\tif p.ctr == 0 || p.ctr > 8*1024*1024 {\n\t\tp.rekey()\n\t}\n\tv := siphash(p.k0, p.k1, p.ctr)\n\tp.ctr++\n\tp.mu.Unlock()\n\treturn v\n}", "func (store *SessionCookieStore) hash(src []byte) []byte {\n\thash := hmac.New(sha512.New512_256, []byte(store.SigningKey))\n\thash.Write(src)\n\treturn hash.Sum(nil)\n}", "func new256Asm() hash.Hash { return nil }", "func siphash(k0, k1, m uint64) uint64 {\n\t// Initialization.\n\tv0 := k0 ^ 0x736f6d6570736575\n\tv1 := k1 ^ 0x646f72616e646f6d\n\tv2 := k0 ^ 0x6c7967656e657261\n\tv3 := k1 ^ 0x7465646279746573\n\tt := uint64(8) << 56\n\n\t// Compression.\n\tv3 ^= m\n\n\t// Round 1.\n\tv0 += v1\n\tv1 = v1<<13 | v1>>(64-13)\n\tv1 ^= v0\n\tv0 = v0<<32 | v0>>(64-32)\n\n\tv2 += v3\n\tv3 = v3<<16 | v3>>(64-16)\n\tv3 ^= v2\n\n\tv0 += v3\n\tv3 = v3<<21 | v3>>(64-21)\n\tv3 ^= v0\n\n\tv2 += v1\n\tv1 = v1<<17 | v1>>(64-17)\n\tv1 ^= v2\n\tv2 = v2<<32 | v2>>(64-32)\n\n\t// Round 2.\n\tv0 += v1\n\tv1 = v1<<13 | v1>>(64-13)\n\tv1 ^= v0\n\tv0 = v0<<32 | v0>>(64-32)\n\n\tv2 += v3\n\tv3 = v3<<16 | v3>>(64-16)\n\tv3 ^= v2\n\n\tv0 += v3\n\tv3 = v3<<21 | v3>>(64-21)\n\tv3 ^= v0\n\n\tv2 += v1\n\tv1 = v1<<17 | v1>>(64-17)\n\tv1 ^= v2\n\tv2 = v2<<32 | v2>>(64-32)\n\n\tv0 ^= m\n\n\t// Compress last block.\n\tv3 ^= t\n\n\t// Round 1.\n\tv0 += v1\n\tv1 = v1<<13 | v1>>(64-13)\n\tv1 ^= v0\n\tv0 = v0<<32 | v0>>(64-32)\n\n\tv2 += v3\n\tv3 = v3<<16 | v3>>(64-16)\n\tv3 ^= v2\n\n\tv0 += v3\n\tv3 = v3<<21 | v3>>(64-21)\n\tv3 ^= v0\n\n\tv2 += v1\n\tv1 = v1<<17 | v1>>(64-17)\n\tv1 ^= v2\n\tv2 = v2<<32 | v2>>(64-32)\n\n\t// Round 2.\n\tv0 += v1\n\tv1 = v1<<13 | v1>>(64-13)\n\tv1 ^= v0\n\tv0 = v0<<32 | v0>>(64-32)\n\n\tv2 += v3\n\tv3 = v3<<16 | v3>>(64-16)\n\tv3 ^= v2\n\n\tv0 += v3\n\tv3 = v3<<21 | v3>>(64-21)\n\tv3 ^= v0\n\n\tv2 += v1\n\tv1 = v1<<17 | v1>>(64-17)\n\tv1 ^= v2\n\tv2 = v2<<32 | v2>>(64-32)\n\n\tv0 ^= t\n\n\t// Finalization.\n\tv2 ^= 0xff\n\n\t// Round 1.\n\tv0 += v1\n\tv1 = v1<<13 | v1>>(64-13)\n\tv1 ^= v0\n\tv0 = v0<<32 | v0>>(64-32)\n\n\tv2 += v3\n\tv3 = v3<<16 | v3>>(64-16)\n\tv3 ^= v2\n\n\tv0 += v3\n\tv3 = v3<<21 | v3>>(64-21)\n\tv3 ^= v0\n\n\tv2 += v1\n\tv1 = v1<<17 | v1>>(64-17)\n\tv1 ^= v2\n\tv2 = v2<<32 | v2>>(64-32)\n\n\t// Round 2.\n\tv0 += v1\n\tv1 = v1<<13 | v1>>(64-13)\n\tv1 ^= v0\n\tv0 = v0<<32 | v0>>(64-32)\n\n\tv2 += v3\n\tv3 = v3<<16 | v3>>(64-16)\n\tv3 ^= v2\n\n\tv0 += v3\n\tv3 = v3<<21 | v3>>(64-21)\n\tv3 ^= v0\n\n\tv2 += v1\n\tv1 = v1<<17 | v1>>(64-17)\n\tv1 ^= v2\n\tv2 = v2<<32 | v2>>(64-32)\n\n\t// Round 3.\n\tv0 += v1\n\tv1 = v1<<13 | v1>>(64-13)\n\tv1 ^= v0\n\tv0 = v0<<32 | v0>>(64-32)\n\n\tv2 += v3\n\tv3 = v3<<16 | v3>>(64-16)\n\tv3 ^= v2\n\n\tv0 += v3\n\tv3 = v3<<21 | v3>>(64-21)\n\tv3 ^= v0\n\n\tv2 += v1\n\tv1 = v1<<17 | v1>>(64-17)\n\tv1 ^= v2\n\tv2 = v2<<32 | v2>>(64-32)\n\n\t// Round 4.\n\tv0 += v1\n\tv1 = v1<<13 | v1>>(64-13)\n\tv1 ^= v0\n\tv0 = v0<<32 | v0>>(64-32)\n\n\tv2 += v3\n\tv3 = v3<<16 | v3>>(64-16)\n\tv3 ^= v2\n\n\tv0 += v3\n\tv3 = v3<<21 | v3>>(64-21)\n\tv3 ^= v0\n\n\tv2 += v1\n\tv1 = v1<<17 | v1>>(64-17)\n\tv1 ^= v2\n\tv2 = v2<<32 | v2>>(64-32)\n\n\treturn v0 ^ v1 ^ v2 ^ v3\n}", "func New() *hash {\n\treturn &hash{m: map[string]reference.I{}}\n}", "func Hash(password []byte) (hash, salt []byte) {\n\tsalt = Salt(saltLength)\n\treturn argon2.IDKey(password, salt, uint32(time), uint32(memory), uint8(threads), uint32(keyLength)), salt\n}", "func Hash128To64(lo, hi uint64) uint64 {\n\t// Murmur-inspired hashing.\n\tconst multiplier = 0x9ddfea08eb382d69\n\n\ta := (lo ^ hi) * multiplier\n\ta ^= (a >> 47)\n\tb := (hi ^ a) * multiplier\n\tb ^= (b >> 47)\n\tb *= multiplier\n\treturn b\n}", "func space_hash(x, y, n uint64) (SpaceMapKey) {\n return SpaceMapKey((x*1640531513 ^ y*2654435789) % n)\n}", "func new512Asm() hash.Hash { return nil }", "func (hasher *SHA256) HashLength() uint {\n\treturn 64\n}", "func memhash(p unsafe.Pointer, h, s uintptr) uintptr", "func memhash(p unsafe.Pointer, h, s uintptr) uintptr", "func hash(key, value string) int64 {\n\thash := siphash.New(sipConst)\n\thash.Write([]byte(key + \":::\" + value))\n\treturn int64(hash.Sum64())\n}", "func new384Asm() hash.Hash { return nil }", "func Hash(key []byte) uint64 {\n\treturn murmur3.Sum64(key)\n}", "func Hash128(s []byte) (lo, hi uint64) {\n\tif len(s) >= 16 {\n\t\treturn Hash128WithSeed(s[16:], fetch64(s), fetch64(s[8:])+k0)\n\t}\n\treturn Hash128WithSeed(s, k0, k1)\n}", "func hash_func(x, y, n HashValue) (HashValue) {\n return (x*1640531513 ^ y*2654435789) % n\n}", "func splitmix64(key uint64) uint64 {\n\tkey = (key ^ (key >> 31) ^ (key >> 62)) * uint64(0x319642b2d24d8ec3)\n\tkey = (key ^ (key >> 27) ^ (key >> 54)) * uint64(0x96de1b173f119089)\n\tkey = key ^ (key >> 30) ^ (key >> 60)\n\treturn key\n}", "func encodeHash(x uint64, p, pPrime uint) (hashCode uint64) {\n\tif x&onesFromTo(64-pPrime, 63-p) == 0 {\n\t\tr := rho(extractShift(x, 0, 63-pPrime))\n\t\treturn concat([]concatInput{\n\t\t\t{x, 64 - pPrime, 63},\n\t\t\t{uint64(r), 0, 5},\n\t\t\t{1, 0, 0}, // this just adds a 1 bit at the end\n\t\t})\n\t} else {\n\t\treturn concat([]concatInput{\n\t\t\t{x, 64 - pPrime, 63},\n\t\t\t{0, 0, 0}, // this just adds a 0 bit at the end\n\t\t})\n\t}\n}", "func createHash(byteStr []byte) []byte {\n\tvar hashVal hash.Hash\n\thashVal = sha1.New()\n\thashVal.Write(byteStr)\n\n\tvar bytes []byte\n\n\tbytes = hashVal.Sum(nil)\n\treturn bytes\n}", "func expand(private kyber.Scalar) (kyber.Scalar) {\n h := testSuite.Hash()\n private.MarshalTo(h)\n hash := h.Sum(nil)\n hash[0] &= 248\n hash[31] &= 63\n hash[31] |= 64\n m := testSuite.Scalar().Zero()\n m.UnmarshalBinary(hash[:32])\n return m\n}", "func expand(private kyber.Scalar) (kyber.Scalar) {\n h := testSuite.Hash()\n private.MarshalTo(h)\n hash := h.Sum(nil)\n hash[0] &= 248\n hash[31] &= 63\n hash[31] |= 64\n m := testSuite.Scalar().Zero()\n m.UnmarshalBinary(hash[:32])\n return m\n}", "func newShake128Asm() ShakeHash {\n\treturn nil\n}", "func Hash(s string, maxKey uint64) Key {\n\th := fnv.New64a()\n\th.Write([]byte(s))\n\treturn NewKey(h.Sum64() % maxKey)\n}", "func (n *notifier) hash(other *memberlist.Node) uint64 {\n\treturn uint64(murmur.Murmur3([]byte(other.Name), murmur.M3Seed))\n}", "func nilinterhash(a unsafe.Pointer, h uintptr) uintptr", "func PJW64(str []byte) uint64 {\n\tvar (\n\t\tBitsInUnsignedInt uint64 = 4 * 8\n\t\tThreeQuarters uint64 = (BitsInUnsignedInt * 3) / 4\n\t\tOneEighth uint64 = BitsInUnsignedInt / 8\n\t\tHighBits uint64 = (0xFFFFFFFFFFFFFFFF) << (BitsInUnsignedInt - OneEighth)\n\t\thash uint64 = 0\n\t\ttest uint64 = 0\n\t)\n\tfor i := 0; i < len(str); i++ {\n\t\thash = (hash << OneEighth) + uint64(str[i])\n\t\tif test = hash & HighBits; test != 0 {\n\t\t\thash = (hash ^ (test >> ThreeQuarters)) & (^HighBits + 1)\n\t\t}\n\t}\n\treturn hash\n}", "func strhash0(p unsafe.Pointer, h uintptr) uintptr", "func Hash(b []byte, seed uint64) uint64", "func FNV64(s string) uint64 {\n\treturn uint64Hasher(fnv.New64(), s)\n}", "func (h Hasher) New() hash.Hash {\n\tswitch h {\n\tcase SHA2_224:\n\t\treturn sha256.New224()\n\tcase SHA2_256:\n\t\treturn sha256.New()\n\tcase SHA2_384:\n\t\treturn sha512.New384()\n\tcase SHA2_512:\n\t\treturn sha512.New()\n\tcase SHA3_224:\n\t\treturn sha3.New224()\n\tcase SHA3_256:\n\t\treturn sha3.New256()\n\tcase SHA3_384:\n\t\treturn sha3.New384()\n\tcase SHA3_512:\n\t\treturn sha3.New512()\n\tcase BLAKE2B_256:\n\t\tb, _ := blake2b.New256(nil)\n\t\treturn b\n\tcase BLAKE2B_384:\n\t\tb, _ := blake2b.New384(nil)\n\t\treturn b\n\tcase BLAKE2B_512:\n\t\tb, _ := blake2b.New512(nil)\n\t\treturn b\n\tcase BLAKE3_256:\n\t\treturn blake3.New()\n\tdefault:\n\t\treturn nil\n\t}\n}", "func Sha256d(input []byte) Hash {\n sha := sha256.New()\n sha.Write(input)\n intermediate := sha.Sum(nil)\n sha.Reset()\n sha.Write(intermediate)\n hash, err := HashFromBytes(sha.Sum(nil), LittleEndian)\n if err != nil {\n panic(\"impossible flow, this is a bug: \" + err.Error())\n }\n return hash\n}", "func newHMACSHA256(key []byte) *hmacsha256 {\n\th := new(hmacsha256)\n\th.inner = sha256.New()\n\th.outer = sha256.New()\n\th.initKey(key)\n\treturn h\n}", "func SHA256RNDS2(x, mx, x1 operand.Op) { ctx.SHA256RNDS2(x, mx, x1) }", "func init() {\n\tphpctx.RegisterExt(&phpctx.Ext{\n\t\tName: \"hash\",\n\t\tVersion: core.VERSION,\n\t\tClasses: []phpv.ZClass{\n\t\t\tHashContext,\n\t\t},\n\t\tFunctions: map[string]*phpctx.ExtFunction{\n\t\t\t\"hash\": &phpctx.ExtFunction{Func: fncHash, Args: []*phpctx.ExtFunctionArg{}},\n\t\t\t\"hash_algos\": &phpctx.ExtFunction{Func: fncHashAlgos, Args: []*phpctx.ExtFunctionArg{}},\n\t\t\t\"hash_equals\": &phpctx.ExtFunction{Func: fncHashEquals, Args: []*phpctx.ExtFunctionArg{}},\n\t\t\t\"hash_final\": &phpctx.ExtFunction{Func: fncHashFinal, Args: []*phpctx.ExtFunctionArg{}},\n\t\t\t\"hash_hkdf\": &phpctx.ExtFunction{Func: fncHashHkdf, Args: []*phpctx.ExtFunctionArg{}},\n\t\t\t\"hash_hmac\": &phpctx.ExtFunction{Func: fncHashHmac, Args: []*phpctx.ExtFunctionArg{}},\n\t\t\t\"hash_init\": &phpctx.ExtFunction{Func: fncHashInit, Args: []*phpctx.ExtFunctionArg{}},\n\t\t\t\"hash_pbkdf2\": &phpctx.ExtFunction{Func: fncHashPbkdf2, Args: []*phpctx.ExtFunctionArg{}},\n\t\t\t\"hash_update\": &phpctx.ExtFunction{Func: fncHashUpdate, Args: []*phpctx.ExtFunctionArg{}},\n\t\t},\n\t\tConstants: map[phpv.ZString]phpv.Val{\n\t\t\t\"HASH_HMAC\": phpv.ZInt(1),\n\t\t},\n\t})\n}", "func NewKeccak512() hash.Hash { return &state{rate: 72, outputLen: 64, dsbyte: 0x01} }", "func (dtk *DcmTagKey) Hash() uint32 {\n\treturn ((uint32(int(dtk.group)<<16) & 0xffff0000) | (uint32(int(dtk.element) & 0xffff)))\n}", "func (lx LXRHash) FlatHash(src []byte) []byte {\n\t// Keep the byte intermediate results as int64 values until reduced.\n\ths := make([]uint64, lx.HashSize)\n\t// as accumulates the state as we walk through applying the source data through the lookup map\n\t// and combine it with the state we are building up.\n\tvar as = lx.Seed\n\t// We keep a series of states, and roll them along through each byte of source processed.\n\tvar s1, s2, s3 uint64\n\t// Since MapSize is specified in bits, the index mask is the size-1\n\tmk := lx.MapSize - 1\n\n\tidx := uint64(0)\n\t// Fast spin to prevent caching state\n\tfor _, v2 := range src {\n\t\tif idx >= lx.HashSize { // Use an if to avoid modulo math\n\t\t\tidx = 0\n\t\t}\n\n\t\tas, s1, s2, s3 = lx.fastStepf(uint64(v2), as, s1, s2, s3, idx, hs)\n\t\tidx++\n\t}\n\n\tidx = 0\n\t// Actual work to compute the hash\n\tfor _, v2 := range src {\n\t\tif idx >= lx.HashSize { // Use an if to avoid modulo math\n\t\t\tidx = 0\n\t\t}\n\n\t\tas, s1, s2, s3 = lx.stepf(as, s1, s2, s3, uint64(v2), hs, idx, mk)\n\t\tidx++\n\t}\n\n\t// Reduction pass\n\t// Done by Interating over hs[] to produce the bytes[] hash\n\t//\n\t// At this point, we have HBits of state in hs. We need to reduce them down to a byte,\n\t// And we do so by doing a bit more bitwise math, and mapping the values through our byte map.\n\n\tbytes := make([]byte, lx.HashSize)\n\t// Roll over all the hs (one int64 value for every byte in the resulting hash) and reduce them to byte values\n\tfor i := len(hs) - 1; i >= 0; i-- {\n\t\tas, s1, s2, s3 = lx.stepf(as, s1, s2, s3, uint64(hs[i]), hs, uint64(i), mk)\n\t\tbytes[i] = lx.ByteMap[as&mk] ^ lx.ByteMap[hs[i]&mk] // Xor two resulting sequences\n\t}\n\n\t// Return the resulting hash\n\treturn bytes\n}", "func hash(stav Stav) uint64{\n\tstr := \"\"\n\n\tfor i := 0; i < len(stav.Auta); i++ {\n\t\tstr += stav.Auta[i].Farba\n\t\tstr += strconv.Itoa(int(stav.Auta[i].X))\n\t\tstr += strconv.Itoa(int(stav.Auta[i].Y))\n\t\tstr += strconv.FormatBool(stav.Auta[i].Smer)\n\t\tstr += strconv.Itoa(int(stav.Auta[i].Dlzka))\n\t}\n\n\th := fnv.New64a()\n\th.Write([]byte(str))\n\treturn h.Sum64()\n\n}", "func (lx LXRHash) Hash(src []byte) []byte {\n\t// Keep the byte intermediate results as int64 values until reduced.\n\ths := make([]uint64, lx.HashSize)\n\t// as accumulates the state as we walk through applying the source data through the lookup map\n\t// and combine it with the state we are building up.\n\tvar as = lx.Seed\n\t// We keep a series of states, and roll them along through each byte of source processed.\n\tvar s1, s2, s3 uint64\n\t// Since MapSize is specified in bits, the index mask is the size-1\n\tmk := lx.MapSize - 1\n\n\tB := func(v uint64) uint64 { return uint64(lx.ByteMap[v&mk]) }\n\tb := func(v uint64) byte { return byte(B(v)) }\n\n\tfaststep := func(v2 uint64, idx uint64) {\n\t\tb := B(as ^ v2)\n\t\tas = as<<7 ^ as>>5 ^ v2<<20 ^ v2<<16 ^ v2 ^ b<<20 ^ b<<12 ^ b<<4\n\t\ts1 = s1<<9 ^ s1>>3 ^ hs[idx]\n\t\ths[idx] = s1 ^ as\n\t\ts1, s2, s3 = s3, s1, s2\n\t}\n\n\t// Define a function to move the state by one byte. This is not intended to be fast\n\t// Requires the previous byte read to process the next byte read. Forces serial evaluation\n\t// and removes the possibility of scheduling byte access.\n\t//\n\t// (Note that use of _ = 0 in lines below are to keep go fmt from messing with comments on the right of the page)\n\tstep := func(v2 uint64, idx uint64) {\n\t\ts1 = s1<<9 ^ s1>>1 ^ as ^ B(as>>5^v2)<<3 // Shifts are not random. They are selected to ensure that\n\t\ts1 = s1<<5 ^ s1>>3 ^ B(s1^v2)<<7 // Prior bytes pulled from the ByteMap contribute to the\n\t\ts1 = s1<<7 ^ s1>>7 ^ B(as^s1>>7)<<5 // next access of the ByteMap, either by contributing to\n\t\ts1 = s1<<11 ^ s1>>5 ^ B(v2^as>>11^s1)<<27 // the lower bits of the index, or in the upper bits that\n\t\t_ = 0 // move the access further in the map.\n\t\ths[idx] = s1 ^ as ^ hs[idx]<<7 ^ hs[idx]>>13 //\n\t\t_ = 0 // We also pay attention not only to where the ByteMap bits\n\t\tas = as<<17 ^ as>>5 ^ s1 ^ B(as^s1>>27^v2)<<3 // are applied, but what bits we use in the indexing of\n\t\tas = as<<13 ^ as>>3 ^ B(as^s1)<<7 // the ByteMap\n\t\tas = as<<15 ^ as>>7 ^ B(as>>7^s1)<<11 //\n\t\tas = as<<9 ^ as>>11 ^ B(v2^as^s1)<<3 // Tests run against this set of shifts show that the\n\t\t_ = 0 // bytes pulled from the ByteMap are evenly distributed\n\t\ts1 = s1<<7 ^ s1>>27 ^ as ^ B(as>>3)<<13 // over possible byte values (0-255) and indexes into\n\t\ts1 = s1<<3 ^ s1>>13 ^ B(s1^v2)<<11 // the ByteMap are also evenly distributed, and the\n\t\ts1 = s1<<8 ^ s1>>11 ^ B(as^s1>>11)<<9 // deltas between bytes provided map to a curve expected\n\t\ts1 = s1<<6 ^ s1>>9 ^ B(v2^as^s1)<<3 // (fewer maximum and minimum deltas, and most deltas around\n\t\t_ = 0 // zero.\n\t\tas = as<<23 ^ as>>3 ^ s1 ^ B(as^v2^s1>>3)<<7\n\t\tas = as<<17 ^ as>>7 ^ B(as^s1>>3)<<5\n\t\tas = as<<13 ^ as>>5 ^ B(as>>5^s1)<<1\n\t\tas = as<<11 ^ as>>1 ^ B(v2^as^s1)<<7\n\n\t\ts1 = s1<<5 ^ s1>>3 ^ as ^ B(as>>7^s1>>3)<<6\n\t\ts1 = s1<<8 ^ s1>>6 ^ B(s1^v2)<<11\n\t\ts1 = s1<<11 ^ s1>>11 ^ B(as^s1>>11)<<5\n\t\ts1 = s1<<7 ^ s1>>5 ^ B(v2^as>>7^as^s1)<<17\n\n\t\ts2 = s2<<3 ^ s2>>17 ^ s1 ^ B(as^s2>>5^v2)<<13\n\t\ts2 = s2<<6 ^ s2>>13 ^ B(s2)<<11\n\t\ts2 = s2<<11 ^ s2>>11 ^ B(as^s1^s2>>11)<<23\n\t\ts2 = s2<<4 ^ s2>>23 ^ B(v2^as>>8^as^s2>>10)<<1\n\n\t\ts1 = s2<<3 ^ s2>>1 ^ hs[idx] ^ v2\n\t\tas = as<<9 ^ as>>7 ^ s1>>1 ^ B(s2>>1^hs[idx])<<5\n\n\t\ts1, s2, s3 = s3, s1, s2\n\t}\n\n\tidx := uint64(0)\n\t// Fast spin to prevent caching state\n\tfor _, v2 := range src {\n\t\tif idx >= lx.HashSize { // Use an if to avoid modulo math\n\t\t\tidx = 0\n\t\t}\n\t\tfaststep(uint64(v2), idx)\n\t\tidx++\n\t}\n\n\tidx = 0\n\t// Actual work to compute the hash\n\tfor _, v2 := range src {\n\t\tif idx >= lx.HashSize { // Use an if to avoid modulo math\n\t\t\tidx = 0\n\t\t}\n\t\tstep(uint64(v2), idx)\n\t\tidx++\n\t}\n\n\t// Reduction pass\n\t// Done by Interating over hs[] to produce the bytes[] hash\n\t//\n\t// At this point, we have HBits of state in hs. We need to reduce them down to a byte,\n\t// And we do so by doing a bit more bitwise math, and mapping the values through our byte map.\n\n\tbytes := make([]byte, lx.HashSize)\n\t// Roll over all the hs (one int64 value for every byte in the resulting hash) and reduce them to byte values\n\tfor i := len(hs) - 1; i >= 0; i-- {\n\t\tstep(hs[i], uint64(i)) // Step the hash functions and then\n\t\tbytes[i] = b(as) ^ b(hs[i]) // Xor two resulting sequences\n\t}\n\n\t// Return the resulting hash\n\treturn bytes\n}", "func DERToHash(der string) string {\n\n bites, _ := base64.StdEncoding.DecodeString(der)\n \n return Bashit(bites)\n\n}", "func Hash(mdfcge []byte) [32]byte {\n\treturn sha256.Sum256(mdfcge)\n}", "func new224Asm() hash.Hash { return nil }", "func New() hash.Hash {\n\td := new(digest)\n\td.Reset()\n\treturn d\n}", "func New() hash.Hash {\n\td := new(digest)\n\td.Reset()\n\treturn d\n}", "func New() hash.Hash {\n\td := new(digest)\n\td.Reset()\n\treturn d\n}", "func New() hash.Hash {\n\td := new(digest)\n\td.Reset()\n\treturn d\n}", "func DefaultHashFn(data []byte) uint64 {\n\tfn := fnv.New64a()\n\tsum := fn.Sum(data)\n\tfn.Reset()\n\tfn.Write(sum)\n\treturn fn.Sum64()\n}", "func hmacHash(msg, key []byte) uint64 {\n\tmac := hmac.New(sha256.New, key)\n\tmac.Write(msg)\n\tres := binary.BigEndian.Uint64(mac.Sum(nil))\n\treturn res\n}", "func (e Aes128CtsHmacSha256128) GetHashFunc() func() hash.Hash {\n\treturn sha256.New\n}" ]
[ "0.708379", "0.6949178", "0.6914841", "0.6849021", "0.6677332", "0.66591746", "0.664148", "0.65541524", "0.6538839", "0.64558864", "0.64174473", "0.6362205", "0.6334637", "0.6288086", "0.6278777", "0.62381715", "0.6187761", "0.6118733", "0.6098435", "0.6039612", "0.603536", "0.60332304", "0.60301864", "0.5995436", "0.59695756", "0.596104", "0.5948482", "0.5948266", "0.592495", "0.5880767", "0.587929", "0.5871833", "0.58667564", "0.58441573", "0.58298546", "0.5823322", "0.58115935", "0.5808229", "0.5804333", "0.5786776", "0.5773701", "0.5759038", "0.5747068", "0.57396275", "0.5717247", "0.56970894", "0.56922454", "0.5691284", "0.5680581", "0.5674942", "0.56690097", "0.56672794", "0.5660636", "0.56425965", "0.56393343", "0.56141037", "0.5613188", "0.56109214", "0.560761", "0.56051713", "0.5597629", "0.5597629", "0.55909175", "0.5579548", "0.5572036", "0.55694324", "0.55690694", "0.55658865", "0.55429065", "0.5526157", "0.5521157", "0.5521157", "0.5508601", "0.5507909", "0.5504343", "0.5503882", "0.55014527", "0.550057", "0.5493082", "0.5483766", "0.5483164", "0.5482092", "0.54779404", "0.54779017", "0.54758465", "0.54620427", "0.5459604", "0.545828", "0.5456548", "0.54546636", "0.5453328", "0.5441255", "0.5434052", "0.5428842", "0.5428842", "0.5428842", "0.5428842", "0.5427832", "0.54269284", "0.54266524" ]
0.7710503
0
Attack implements the brokenrsa method against ciphertext in multiple keys.
func Attack(ks []*keys.RSA, ch chan error) { k := ks[0] if k.CipherText == nil { ch <- fmt.Errorf("invalid arguments for attack %s: this attack requires the ciphertext", name) return } d, u, _ := ln.XGCD(k.Key.PublicKey.E, k.Key.N) if !d.Equals(ln.BigOne) { ch <- fmt.Errorf("n and e were not coprime so %s attack will not work: GCE(e,n) == %v", name, d) return } ct := ln.BytesToNumber(k.CipherText) pt := new(fmp.Fmpz).Mul(ct, u) k.PlainText = ln.NumberToBytes(pt.Mod(pt, k.Key.N)) ch <- nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func TestChallenge12(test *testing.T) {\n\t// Feed identical bytes of your-string to the function 1 at a time --- start with 1 byte (\"A\"),\n\t// then \"AA\", then \"AAA\" and so on. Discover the block size of the cipher. You know it, but do this step anyway.\n\toracle := challenge12Oracle{key(16)}\n\tblockSize, err := unsafeaes.DetectBlockSize(oracle)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif blockSize != 16 {\n\t\ttest.Errorf(\"Expected block size to be 16 but was %d\", blockSize)\n\t}\n\n\t// Detect that the function is using ECB. You already know, but do this step anyways.\n\tmode, err := unsafeaes.DetectMode(oracle)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif mode != \"ECB\" {\n\t\ttest.Errorf(\"Expected to detect AES mode ECB, but instead detected %s\", mode)\n\t}\n\n\t// Knowing the block size, craft an input block that is exactly 1 byte short (for instance, if the block size is\n\t// 8 bytes, make \"AAAAAAA\"). Think about what the oracle function is going to put in that last byte position.\n\tplaintextSize, err := findTextLength(oracle)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t// round up to the nearest full block size, so we have enough capacity\n\t// in our chosen text to slurp up the target text char by char\n\tblocks := (plaintextSize / blockSize) + 1\n\tplaintext := make([]byte, 0, plaintextSize)\n\tattackSize := blocks * blockSize\n\n\tfor i := 1; i < plaintextSize; i++ {\n\t\tchosentext := make([]byte, attackSize-i)\n\t\t// Make a dictionary of every possible last byte by feeding different strings to the oracle; for instance, \"AAAAAAAA\",\n\t\t// \"AAAAAAAB\", \"AAAAAAAC\", remembering the first block of each invocation.\n\t\tlastbyte := make(map[string]byte)\n\n\t\tfor b := 0; b < 256; b++ {\n\t\t\tknowntext := append(chosentext, plaintext...)\n\t\t\ttesttext := append(knowntext, byte(b))\n\t\t\tciphertext, err := oracle.Encrypt(testtext)\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tattackBlocks := ciphertext[:attackSize]\n\t\t\tlastbyte[encodings.BytesToHex(attackBlocks)] = byte(b)\n\t\t}\n\n\t\tciphertext, err := oracle.Encrypt(chosentext)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\t// Match the output of the one-byte-short input to one of the entries in your dictionary.\n\t\t// You've now discovered the first byte of unknown-string.\n\t\tattackBlocks := ciphertext[:attackSize]\n\t\tdecodedByte := lastbyte[encodings.BytesToHex(attackBlocks)]\n\t\tplaintext = append(plaintext, decodedByte)\n\t}\n\n\texpected := \"Rollin' in my 5.0\\n\" +\n\t\t\"With my rag-top down so my hair can blow\\n\" +\n\t\t\"The girlies on standby waving just to say hi\\n\" +\n\t\t\"Did you stop? No, I just drove by\"\n\tif string(plaintext) != expected {\n\t\ttest.Errorf(\"Expected:\\n%s\\nActual:\\n%s\\n\", expected, string(plaintext))\n\t}\n}", "func BreakCTRManually() {\n\tnonce := Key(8)\n\tkey := Key(16)\n\n\tlines, err := ReadFileByLine(\"input/19.txt\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tvar ciphertexts [][]byte\n\tfor _, line := range lines {\n\t\tplaintext, _ := base64.StdEncoding.DecodeString(line)\n\t\tciphertext := CTR_Cipher(plaintext, key, nonce)\n\t\tciphertexts = append(ciphertexts, ciphertext)\n\t}\n\n\tinput, err := ReadFileByLine(\"input/19_guesses.txt\")\n\tif err != nil {\n\t\treturn\n\t}\n\tfirst := []byte(input[0])\n\n\tvar ciphertext []byte\n\tfor _, ct := range ciphertexts {\n\t\tif len(ct) > len(ciphertext) {\n\t\t\tciphertext = ct\n\t\t}\n\t}\n\n\tvar guessedKey []byte\n\n\tfor index, letter := range first {\n\t\tciphertextByte := ciphertext[index]\n\t\tkeyByte := ciphertextByte ^ byte(letter)\n\t\tguessedKey = append(guessedKey, keyByte)\n\t}\n\tfor _, ct := range ciphertexts {\n\t\tlongest := len(ct)\n\t\tif len(guessedKey) < longest {\n\t\t\tlongest = len(guessedKey)\n\t\t}\n\t\tlog.Println(string(XOr(guessedKey[:longest], ct[:longest])))\n\t}\n}", "func (k *Kyber) Encrypt(packedPK, msg, r []byte) []byte {\n\n\tif len(msg) < n/8 {\n\t\tprintln(\"Message is too short to be encrypted.\")\n\t\treturn nil\n\t}\n\n\tif len(packedPK) != k.SIZEPK() {\n\t\tprintln(\"Cannot encrypt with this public key.\")\n\t\treturn nil\n\t}\n\n\tif len(r) != SEEDBYTES {\n\t\tr = make([]byte, SEEDBYTES)\n\t\trand.Read(r[:])\n\t}\n\n\tK := k.params.K\n\tpk := k.UnpackPK(packedPK)\n\tAhat := expandSeed(pk.Rho[:], true, K)\n\n\tsp := make(Vec, K)\n\tfor i := 0; i < K; i++ {\n\t\tsp[i] = polyGetNoise(k.params.ETA1, r[:], byte(i))\n\t\tsp[i].ntt()\n\t\tsp[i].reduce()\n\t}\n\tep := make(Vec, K)\n\tfor i := 0; i < K; i++ {\n\t\tep[i] = polyGetNoise(eta2, r[:], byte(i+K))\n\t\tep[i].ntt()\n\t}\n\tepp := polyGetNoise(eta2, r[:], byte(2*K))\n\tepp.ntt()\n\n\tu := make(Vec, K)\n\tfor i := 0; i < K; i++ {\n\t\tu[i] = vecPointWise(Ahat[i], sp, K)\n\t\tu[i].toMont()\n\t\tu[i] = add(u[i], ep[i])\n\t\tu[i].invntt()\n\t\tu[i].reduce()\n\t\tu[i].fromMont()\n\t}\n\n\tm := polyFromMsg(msg)\n\tm.ntt()\n\n\tv := vecPointWise(pk.T, sp, K)\n\tv.toMont()\n\tv = add(v, epp)\n\tv = add(v, m)\n\tv.invntt()\n\tv.reduce()\n\tv.fromMont()\n\n\tc := make([]byte, k.params.SIZEC)\n\tcopy(c[:], u.compress(k.params.DU, K))\n\tcopy(c[K*k.params.DU*n/8:], v.compress(k.params.DV))\n\treturn c[:]\n}", "func BreakRepeatingKeyXOR(cipher []byte) (mostProbableKey []byte, plaintext []byte) {\n\t// You have to play with the max keysize, it might be bigger than you thought\n\tmaxKeysize := 200\n\tkeyAnalysis := make(map[int]float32, maxKeysize-2)\n\tkeySizes := make([]int, maxKeysize-2)\n\tfor keysize := 2; keysize < maxKeysize; keysize++ {\n\t\tdist, _ := HammingDistance(cipher[:keysize], cipher[keysize:keysize*2])\n\t\tkeyAnalysis[keysize] = float32(dist) / float32(keysize)\n\t\tkeySizes[keysize-2] = keysize\n\t}\n\n\t// Sort key sizes according to normalized hamming distance in ascending order (insertion sort)\n\tfor i := 0; i < (maxKeysize - 2); i++ {\n\t\tj := i\n\t\tfor j > 0 && keyAnalysis[keySizes[j-1]] > keyAnalysis[keySizes[j]] {\n\t\t\tkeySizes[j-1], keySizes[j] = keySizes[j], keySizes[j-1]\n\t\t\tj--\n\t\t}\n\t}\n\n\t// Keep only the 5 most probable key sizes (i.e. with the smallest normalized hamming distance)\n\t// You might need to play with this parameter as well\n\tprobableKeySizes := keySizes[:5]\n\n\t// Cut the ciphertext to solve as many 'single-byte XOR' encryption\n\t// as there are bytes in the key\n\tvar bestNormalizedScore float64 = 0\n\tfor _, probableKeySize := range probableKeySizes {\n\t\tcipherBlocks := SplitBytesByMod(cipher, probableKeySize)\n\t\t// Solve each block as if it was a single-character XOR\n\t\tlocalScore := 0\n\t\tprobableKey := make([]byte, probableKeySize)\n\t\tfor i := 0; i < probableKeySize; i++ {\n\t\t\tscore, repeatingKey, _ := SingleByteXorCrackFromByte(cipherBlocks[i])\n\t\t\tprobableKey[i] = repeatingKey\n\t\t\tlocalScore += score\n\t\t}\n\t\tnormalizedScore := float64(localScore) / float64(probableKeySize)\n\t\tif normalizedScore > bestNormalizedScore {\n\t\t\tmostProbableKey = probableKey\n\t\t}\n\t}\n\n\t// Decrypt the cipher with the most probable key\n\tplaintext = RepeatingXOR(mostProbableKey, cipher)\n\n\treturn mostProbableKey, plaintext\n}", "func BruteAttack(charset, cipher string) (guesses []string, err error) {\n\tcsLen := len(charset)\n\tfor i := 0; i < csLen; i++ {\n\t\tnewCipher, err := broken.Caesar(charset, cipher, i)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tguesses = append(guesses, newCipher)\n\t}\n\n\treturn guesses, nil\n}", "func verifyXorKeys(r *bufio.Reader, key1, key2 []byte) (bool, error) {\n\td, err := r.Peek(3)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\t// decrypt the first 3 bytes\n\txorBuff(3, 0, d, key1, key2)\n\tvalid := false\n\tif d[0] == 0 && d[1] == 0 && d[2] == 0 {\n\t\tvalid = true\n\t}\n\t// reverse the previous decryption\n\txorBuff(3, 0, d, key1, key2)\n\treturn valid, nil\n}", "func TestAuthenticationKeyRequest(t *testing.T) {\n\ttestKeys := MakeTestKeys(3)\n\n\t// Give sish a temp directory to generate a server ssh host key\n\tdir, err := os.MkdirTemp(\"\", \"sish_keys\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer os.RemoveAll(dir)\n\tviper.Set(\"private-keys-directory\", dir)\n\tviper.Set(\"authentication\", true)\n\n\ttestCases := []struct {\n\t\tclientPrivateKey *rsa.PrivateKey\n\t\tclientUser string\n\t\tvalidPublicKeys []rsa.PublicKey\n\t\tvalidUsernames []string\n\t\texpectSuccessAuth bool\n\t\toverrideHttpUrl string\n\t}{\n\t\t// valid key, should succeed auth\n\t\t{\n\t\t\tclientPrivateKey: testKeys[0],\n\t\t\tclientUser: \"ubuntu\",\n\t\t\tvalidPublicKeys: []rsa.PublicKey{testKeys[0].PublicKey},\n\t\t\tvalidUsernames: []string{\"ubuntu\"},\n\t\t\texpectSuccessAuth: true,\n\t\t\toverrideHttpUrl: \"\",\n\t\t},\n\t\t// invalid key, should be rejected\n\t\t{\n\t\t\tclientPrivateKey: testKeys[0],\n\t\t\tclientUser: \"ubuntu\",\n\t\t\tvalidPublicKeys: []rsa.PublicKey{testKeys[1].PublicKey, testKeys[2].PublicKey},\n\t\t\tvalidUsernames: []string{\"ubuntu\"},\n\t\t\texpectSuccessAuth: false,\n\t\t\toverrideHttpUrl: \"\",\n\t\t},\n\t\t// invalid username, should be rejected\n\t\t{\n\t\t\tclientPrivateKey: testKeys[0],\n\t\t\tclientUser: \"windows\",\n\t\t\tvalidPublicKeys: []rsa.PublicKey{testKeys[0].PublicKey},\n\t\t\tvalidUsernames: []string{\"ubuntu\"},\n\t\t\texpectSuccessAuth: false,\n\t\t\toverrideHttpUrl: \"\",\n\t\t},\n\t\t// no http service listening on server url, should be rejected\n\t\t{\n\t\t\tclientPrivateKey: testKeys[0],\n\t\t\tclientUser: \"ubuntu\",\n\t\t\tvalidPublicKeys: []rsa.PublicKey{testKeys[0].PublicKey},\n\t\t\tvalidUsernames: []string{\"ubuntu\"},\n\t\t\texpectSuccessAuth: false,\n\t\t\toverrideHttpUrl: \"http://localhost:61234\",\n\t\t},\n\t\t// invalid http url, should be rejected\n\t\t{\n\t\t\tclientPrivateKey: testKeys[0],\n\t\t\tclientUser: \"ubuntu\",\n\t\t\tvalidPublicKeys: []rsa.PublicKey{testKeys[0].PublicKey},\n\t\t\tvalidUsernames: []string{\"ubuntu\"},\n\t\t\texpectSuccessAuth: false,\n\t\t\toverrideHttpUrl: \"notarealurl\",\n\t\t},\n\t}\n\n\tfor caseIdx, c := range testCases {\n\t\tif c.overrideHttpUrl == \"\" {\n\t\t\t// start an http server that will validate against the specified public keys\n\t\t\thttpSrv := httptest.NewServer(http.HandlerFunc(PubKeyHttpHandler(&c.validPublicKeys, &c.validUsernames)))\n\t\t\tdefer httpSrv.Close()\n\n\t\t\t// set viper to this http server URL as the auth request url it will\n\t\t\t// send public keys to for auth validation\n\t\t\tviper.Set(\"authentication-key-request-url\", httpSrv.URL)\n\t\t} else {\n\t\t\tviper.Set(\"authentication-key-request-url\", c.overrideHttpUrl)\n\t\t}\n\n\t\tsshListener, err := net.Listen(\"tcp\", \"localhost:0\")\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t\tdefer sshListener.Close()\n\n\t\tsuccessAuth := make(chan bool)\n\t\tgo HandleSSHConn(sshListener, &successAuth)\n\n\t\t// attempt to connect to the ssh server using the specified private key\n\t\tsigner, err := ssh.NewSignerFromKey(c.clientPrivateKey)\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t\tclientConfig := &ssh.ClientConfig{\n\t\t\tAuth: []ssh.AuthMethod{\n\t\t\t\tssh.PublicKeys(signer),\n\t\t\t},\n\t\t\tHostKeyCallback: ssh.InsecureIgnoreHostKey(),\n\t\t\tUser: c.clientUser,\n\t\t}\n\t\tt.Log(clientConfig)\n\n\t\tclient, err := ssh.Dial(\"tcp\", sshListener.Addr().String(), clientConfig)\n\t\tif err != nil {\n\t\t\tt.Log(\"ssh client rejected\", err)\n\t\t} else {\n\t\t\tt.Log(\"ssh client connected\")\n\t\t\tclient.Close()\n\t\t}\n\n\t\tdidAuth := <-successAuth\n\n\t\tif didAuth != c.expectSuccessAuth {\n\t\t\tt.Errorf(\"Auth %t when should have been %t for case %d\", didAuth, c.expectSuccessAuth, caseIdx)\n\t\t}\n\t}\n}", "func RunChallenge10() {\n\tutil.PrintChallengeHeader(2, 10)\n\n\t// The same key is used throughout this challenge\n\tkey := []byte(\"YELLOW SUBMARINE\")\n\n\t// Test ECB Encryption\n\tecbPlainText := \"This test really contains at exactly (3) blocks.\"\n\n\tecbCipher := aes.NewAesEcbCipher(key)\n\tecbCipherText, err := ecbCipher.Encrypt([]byte(ecbPlainText))\n\tif err != nil {\n\t\tfmt.Println(\"error encrypting with ecb:\", err.Error())\n\t\treturn\n\t}\n\tpaddedPlaintext, err := ecbCipher.Decrypt(ecbCipherText)\n\tif err != nil {\n\t\tfmt.Println(\"error decrypting with ecb:\", err.Error())\n\t\treturn\n\t}\n\tfinalEcb, err := util.RemovePkcs7Padding(paddedPlaintext, 16)\n\tif err != nil {\n\t\tfmt.Println(\"error removing padding from ecb:\", err.Error())\n\t\treturn\n\t}\n\tutil.PrintResults(ecbPlainText, string(finalEcb))\n\n\t// Test CBC Encryption\n\tcbcPlainText := \"This is not a block aligned\"\n\tcbcIv := \"DEFINITELYSECRET\"\n\n\tcbcCipher := aes.NewAesCbcCipher(key)\n\tcbcCipherText, err := cbcCipher.Encrypt([]byte(cbcPlainText), []byte(cbcIv))\n\tif err != nil {\n\t\tfmt.Println(\"error encrypting with cbc:\", err.Error())\n\t\treturn\n\t}\n\tfinalCbc, err := cbcCipher.Decrypt(cbcCipherText, []byte(cbcIv))\n\tif err != nil {\n\t\tfmt.Println(\"error decrypting with cbc:\", err.Error())\n\t\treturn\n\t}\n\tutil.PrintResults(cbcPlainText, string(finalCbc))\n\n\t// Load Data\n\tinput, err := util.ReadFileRemoveNewline(\"set2/resources/challenge10.txt\")\n\tif err != nil {\n\t\tfmt.Println(\"error reading filedata\", err.Error())\n\t\treturn\n\t}\n\tdata, _ := base64.StdEncoding.DecodeString(input)\n\tiv := make([]byte, 16)\n\n\t// Decrypt\n\tcbcCipher = aes.NewAesCbcCipher(key)\n\tplaintext, err := cbcCipher.Decrypt(data, iv)\n\tfmt.Println(string(plaintext))\n}", "func AESKEYGENASSIST(i, mx, x operand.Op) { ctx.AESKEYGENASSIST(i, mx, x) }", "func encrypt(sk, dst, src []byte) {\n\n}", "func SshKeyExpire(mdb *mongo.Client, mongo_instance string, ldap *ldap_client.LDAPClient){\n\tlog.Println(\"[*] Undergoing key expiration procedure\")\n\tlog.Println(\" |___\")\n\n\t// vars\n\tusers := mdb.Database(mongo_instance).Collection(\"users\")\n\texpirationDelta := 9\n\n\tfindOptProj := options.Find().SetProjection(bson.M{\"sys_username\":1, \"email\":1, \"pubKey\": 1, \"otp_secret\":1, \"key_last_unlock\":1})\n\tcur, err := users.Find(context.TODO(), bson.M{ \"pubKey\": bson.M{ \"$exists\": true, \"$nin\": bson.A{nil, \"\"} }}, findOptProj)\n\tCheck(err)\n\tdefer cur.Close(context.TODO())\n\tfor cur.Next(context.TODO()) {\n\t\tvar user User\n\t\terr := cur.Decode(&user)\n\t\tCheck(err)\n\t\tdiff := TimeHoursDiff(user.Key_last_unlock)\n\t\tif (diff >= expirationDelta) {\n\t\t\t//cipher string only if it is unciphered\n\t\t\tif(strings.Contains(user.PubKey, \"ssh-rsa\")) {\n\t\t\t\t//return a byte string\n\t\t\t\tb32_decoded_otp_secret, err := base32.StdEncoding.WithPadding(base32.NoPadding).DecodeString(user.Otp_secret)\n\t\t\t\tCheck(err)\n\t\t\t\tkey := b32_decoded_otp_secret\n\t\t\t\tencKey := AESencrypt(string(key), user.PubKey)\n\t\t\t\t_, err = users.UpdateOne(context.TODO(), bson.M{\"email\":user.Email }, bson.M{ \"$set\": bson.M{ \"pubKey\" : encKey}})\n\t\t\t\tCheck(err)\n\t\t\t\t_, err = ldap.SetUserAttribute(user.Sys_username, \"sshPublicKey\", encKey)\n\t\t\t\tCheck(err)\n\t\t\t\tlog.Println(\" |- SSH public key for user \"+user.Sys_username+\" Locked due to expiration\")\n\t\t\t}\n\t\t}\n\t}\n\tlog.Println(\"[+] Expired keys locked successfully\")\n}", "func encrypt(msg []byte, k key) (c []byte, err error) {\n\tnonce, err := randomNonce()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tc = box.SealAfterPrecomputation(c, msg, nonce, k)\n\tc = append((*nonce)[:], c...)\n\treturn c, nil\n}", "func (r *rsaPublicKey) encrypt(data []byte) ([]byte, error) {\n // The label parameter must be the same for decrypt function\n encrypted, err := rsa.EncryptOAEP(r.Hash.New(), rand.Reader, r.PublicKey, data, []byte(\"~pc*crypt^pkg!\")); if err != nil {\n return nil, err\n }\n return encrypted, nil\n}", "func main() {\n // Parameters begin\n message := []byte(\"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.\")\n n := 5\n // Parameters end\n \n fmt.Printf(\"{\\n\")\n // Generate key pairs\n var privates []kyber.Scalar\n var publics []kyber.Point\n for i := 0; i < n; i++ {\n // kp := key.NewKeyPair(testSuite)\n private := testSuite.Scalar().Pick(random.Stream)\n public := testSuite.Point().Mul(expand(private), nil)\n privates = append(privates, private)\n publics = append(publics, public)\n }\n\n fmt.Printf(\" privKeys: [\")\n for i := 0; i < n; i++ {\n if i != 0 {\n fmt.Printf(\", \")\n }\n fmt.Printf(\"BufferUtils.fromHex(\\\"%s\\\")\", fmtPad(\"%x\", 64, reverse(privates[i].Bytes())))\n }\n fmt.Printf(\"],\\n\")\n\n fmt.Printf(\" pubKeys: [\")\n for i := 0; i < n; i++ {\n if i != 0 {\n fmt.Printf(\", \")\n }\n fmt.Printf(\"BufferUtils.fromHex(\\\"%s\\\")\", fmtPad(\"%s\", 64, publics[i].String()))\n }\n fmt.Printf(\"],\\n\")\n\n // Delinearize\n var delinearizedPrivates []kyber.Scalar\n var delinearizedPublics []kyber.Point\n for i := 0; i < n; i++ {\n dl := delinearization(publics, publics[i])\n dlPrivate := testSuite.Scalar().Mul(dl, expand(privates[i]))\n dlPublic := testSuite.Point().Mul(dl, publics[i])\n delinearizedPrivates = append(delinearizedPrivates, dlPrivate)\n delinearizedPublics = append(delinearizedPublics, dlPublic)\n }\n\n fmt.Printf(\" pubKeysHash: BufferUtils.fromHex(\\\"%s\\\"),\\n\", fmtPad(\"%x\", 128, publicKeysHash(publics)))\n\n fmt.Printf(\" delinearizedPrivKeys: [\")\n for i := 0; i < n; i++ {\n if i != 0 {\n fmt.Printf(\", \")\n }\n fmt.Printf(\"BufferUtils.fromHex(\\\"%s\\\")\", fmtPad(\"%x\", 64, reverse(delinearizedPrivates[i].Bytes())))\n }\n fmt.Printf(\"],\\n\")\n\n fmt.Printf(\" delinearizedPubKeys: [\")\n for i := 0; i < n; i++ {\n if i != 0 {\n fmt.Printf(\", \")\n }\n fmt.Printf(\"BufferUtils.fromHex(\\\"%s\\\")\", fmtPad(\"%s\", 64, delinearizedPublics[i].String()))\n }\n fmt.Printf(\"],\\n\")\n\n // Init masks\n var masks []*cosi.Mask\n var byteMasks [][]byte\n for i := 0; i < n; i++ {\n m, err := cosi.NewMask(testSuite, delinearizedPublics, delinearizedPublics[i])\n if err != nil {\n panic(err.Error())\n }\n masks = append(masks, m)\n byteMasks = append(byteMasks, masks[i].Mask())\n }\n\n // Compute commitments\n var v []kyber.Scalar // random\n var V []kyber.Point // commitment\n for i := 0; i < n; i++ {\n x, X := cosi.Commit(testSuite, nil)\n v = append(v, x)\n V = append(V, X)\n }\n\n fmt.Printf(\" secrets: [\")\n for i := 0; i < n; i++ {\n if i != 0 {\n fmt.Printf(\", \")\n }\n fmt.Printf(\"BufferUtils.fromHex(\\\"%x\\\")\", fmtPad(\"%x\", 64, reverse(v[i].Bytes())))\n }\n fmt.Printf(\"],\\n\")\n\n fmt.Printf(\" commitments: [\")\n for i := 0; i < n; i++ {\n if i != 0 {\n fmt.Printf(\", \")\n }\n fmt.Printf(\"BufferUtils.fromHex(\\\"%s\\\")\", fmtPad(\"%s\", 64, V[i].String()))\n }\n fmt.Printf(\"],\\n\")\n\n // Aggregate commitments\n aggV, aggMask, err := cosi.AggregateCommitments(testSuite, V, byteMasks)\n if err != nil {\n panic(err.Error())\n }\n fmt.Printf(\" aggCommitment: BufferUtils.fromHex(\\\"%s\\\"),\\n\", fmtPad(\"%s\", 64, aggV.String()))\n\n // Set aggregate mask in nodes\n for i := 0; i < n; i++ {\n masks[i].SetMask(aggMask)\n }\n\n // Compute challenge\n var c []kyber.Scalar\n for i := 0; i < n; i++ {\n ci, err := cosi.Challenge(testSuite, aggV, masks[i].AggregatePublic, message)\n if err != nil {\n panic(err.Error())\n }\n c = append(c, ci)\n }\n fmt.Printf(\" aggPubKey: BufferUtils.fromHex(\\\"%s\\\"),\\n\", fmtPad(\"%s\", 64, masks[0].AggregatePublic.String()))\n\n // Compute responses\n var r []kyber.Scalar\n for i := 0; i < n; i++ {\n ri, _ := cosi.Response(testSuite, delinearizedPrivates[i], v[i], c[i])\n r = append(r, ri)\n }\n\n fmt.Printf(\" partialSignatures: [\")\n for i := 0; i < n; i++ {\n if i != 0 {\n fmt.Printf(\", \")\n }\n fmt.Printf(\"BufferUtils.fromHex(\\\"%s\\\")\", fmtPad(\"%x\", 64, reverse(r[i].Bytes())))\n }\n fmt.Printf(\"],\\n\")\n\n // Aggregate responses\n aggr, err := cosi.AggregateResponses(testSuite, r)\n if err != nil {\n panic(err.Error())\n }\n fmt.Printf(\" aggSignature: BufferUtils.fromHex(\\\"%s\\\"),\\n\", fmtPad(\"%x\", 64, reverse(aggr.Bytes())))\n\n for i := 0; i < n; i++ {\n // Sign\n sig, err := cosi.Sign(testSuite, aggV, aggr, masks[i])\n if err != nil {\n panic(err.Error())\n }\n // Verify (using default policy)\n if err := cosi.Verify(testSuite, delinearizedPublics, message, sig, nil); err != nil {\n panic(err.Error())\n }\n }\n\n fmt.Printf(\" signature: BufferUtils.fromHex(\\\"%s\\\"),\\n\", fmtPad(\"%s%x\", 128, aggV.String(), reverse(aggr.Bytes())))\n fmt.Printf(\" message: BufferUtils.fromAscii(\\\"%s\\\")\\n\", message)\n fmt.Printf(\"}\\n\")\n}", "func Cipher(msg string, key string) string {\n\tvar ciphered string \n\tvar keylen int = len(key)\n\tif (keylen == 0) { return msg } // No key provided\n\tfor i := 0; i < len(msg); i++ {\n\t\tvar keyIndex int = i % keylen // Calculate the key index e.g. (i=10, keylen=4, keyIndex 2), (i=11, keylen=4, keyIndex=3)\n\t\tciphered += string(msg[i] ^ key[keyIndex])\n\t}\n\treturn ciphered\n}", "func main() {\n\t// Read a key from a file in authorized keys file line format\n\t// This could be an rsa.pub file or a line from authorized_keys\n\tpubKeyBytes := []byte(`ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCzMETD7LIf/P0IjgjpiTqgrrxsu1UfCfMhD8htNLgek6vIWWKVCJOajHHVMUu4ce5kA2s5Rh9EzolJ/IWRXEghx/SpW+/CP4Dhn7Q909UJuMQy2sN74dO0xpdD1tBimSHYmPkxs6XzxqKDwJYV77d1sZZCwNXvw8UEsBijK3B/dgFHSUGnX2jrTWSbIALVlbp9P3x3i0ypXK84XY8FIhPWduZ6fFlbUb14aTyJgQgw1oghGYOFhv/B48A/t9F3cE15xXqxDKsyWRDxnoxJaJD9iyEfa91NbqO6u1sb+ByLEE/i7C0UGJryqifcDMM0nDEF89RG+DjrSOb7x43u+83ixXYFx+eIfqwvAvXO+SVxPjX5yPRN8x0ybi5TiAszjioIMGRx0hUi74ugZApDTYsNv45G6cpNEEaiLZAR9qGUpoUnGGYtMJSmSg4mRO84QJtXcah3vnOAG5X83KgwDWYDqmhyewxG7kCaY/pybP+pSV1QCx146QTJN7jL3nNZN+70m3/TQ64p4vxCnlwOd8chopnLpPe6G8BgFpxRr4lAsfDe5nN9xTZFR0+2TfdqkRrplWmV4JoGuFTuz8VOfzbnvPwWDgxPhfGC9bn32ZnRM7O2syR+YT4BbcEU7epVk6pLmSFq7lBspWuIgJGawfUIUl02BPen2nMdoPrZRLeMpQ==`)\n\n\t// Parse the key, other info ignored\n\tpk, _, _, _, err := ssh.ParseAuthorizedKey(pubKeyBytes)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t// Get the fingerprint\n\tf := ssh.FingerprintLegacyMD5(pk)\n\n\t// Print the fingerprint\n\tfmt.Printf(\"%s\\n\", f)\n}", "func genKeyAndSendCipher(kx *KX, pk *[sntrup4591761.PublicKeySize]byte, ek *[32]byte) (*[32]byte, error) {\n\tc, k, err := sntrup4591761.Encapsulate(rand.Reader, pk)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif ek != nil {\n\t\terr = kx.writeWithKey(c[:], ek)\n\t} else {\n\t\t_, err = xdr.Marshal(kx.Conn, c)\n\t}\n\treturn k, err\n}", "func caesarCipher(s string, k int32) string {\n\t// Write your code here\n\n\tencrypted := \"\"\n\n\tfor _, ch := range s {\n\t\tif int32(ch) >= 65 && int32(ch) <= 90 {\n\t\t\tnewChar := int32(ch) + k\n\t\t\tfor newChar > 90 {\n\t\t\t\tnewChar -= 26\n\t\t\t}\n\t\t\tencrypted += string(rune(newChar))\n\t\t} else if int32(ch) >= 97 && int32(ch) <= 122 {\n\t\t\tnewChar := int32(ch) + k\n\t\t\tfor newChar > 122 {\n\t\t\t\tnewChar -= 26\n\t\t\t}\n\t\t\tencrypted += string(rune(newChar))\n\t\t} else {\n\t\t\tencrypted += string(ch)\n\t\t}\n\t}\n\n\treturn encrypted\n\n}", "func (g *Generator) rekey() error {\n\tfor i := keySize / g.cipher.BlockSize(); i > 0; i-- {\n\t\tg.readBlock(g.key[g.cipher.BlockSize()*i:])\n\t}\n\n\treturn g.updateCipher()\n}", "func encryptionKeys(seedStr string) (validKeys []crypto.TwofishKey) {\n\tdicts := []mnemonics.DictionaryID{\"english\", \"german\", \"japanese\"}\n\tfor _, dict := range dicts {\n\t\tseed, err := modules.StringToSeed(seedStr, dict)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tvalidKeys = append(validKeys, crypto.TwofishKey(crypto.HashObject(seed)))\n\t}\n\tvalidKeys = append(validKeys, crypto.TwofishKey(crypto.HashObject(seedStr)))\n\treturn validKeys\n}", "func bruteForceKey(cipherText []byte, minKeyLength int, maxKeyLength int) ([]byte, []byte) {\n\n\t_, bestBlocks := findBlockSize(cipherText, minKeyLength, maxKeyLength)\n\n\t//This will effectively be the transpose of the matrix so that each of the bytes that use the same key byte will be together\n\t//This will allow us to use existing code to brute force them.\n\tsingleByteGroups := make([][]byte, len(bestBlocks[0]))\n\t//transpose the matrix... First byte of each will use same key, so we will brute force them together.\n\tfor i := 0; i < len(bestBlocks[0]); i++ {\n\t\tgroup := make([]byte, len(bestBlocks))\n\t\tsingleByteGroups[i] = group\n\t\tfor j, block := range bestBlocks {\n\t\t\tgroup[j] = block[i]\n\t\t}\n\t}\n\n\tkey := make([]byte, len(singleByteGroups))\n\tfor i, group := range singleByteGroups {\n\t\tgroup, groupKey, _ := bruteForceSingleByte(group)\n\t\tsingleByteGroups[i] = group\n\t\tkey = append(key, groupKey)\n\t\tfmt.Println(singleByteGroups[i])\n\t}\n\tfmt.Println(hex.Dump(singleByteGroups[0]))\n\t//transpose the matrix back. First byte of each will use same key, so we will brute force them together.\n\tfmt.Println(\"\")\n\tfmt.Println(hex.Dump(bestBlocks[0]))\n\tfor i, group := range singleByteGroups {\n\t\tfor j, block := range bestBlocks {\n\t\t\tblock[i] = group[j]\n\t\t\tbestBlocks[j] = block\n\t\t}\n\t}\n\n\tplainText := make([]byte, len(cipherText))\n\tfor _, block := range bestBlocks {\n\t\tplainText = append(plainText, block...)\n\t}\n\t//fmt.Println(hex.Dump(plainText))\n\t//fmt.Println(hex.Dump(key))\n\treturn plainText, key\n}", "func testRsaEncryption(t *testing.T, key crypto.Decrypter, nbits int) {\n\ttestRsaEncryptionPKCS1v15(t, key)\n\ttestRsaEncryptionOAEP(t, key, crypto.SHA1, []byte{})\n\t// testRsaEncryptionOAEP(t, key, crypto.SHA224, []byte{})\n\tif nbits > 1024 { // key too smol for SHA256\n\t\t// testRsaEncryptionOAEP(t, key, crypto.SHA256, []byte{})\n\t}\n\t//testRsaEncryptionOAEP(t, key, crypto.SHA384, []byte{})\n\tif nbits > 1024 { // key too smol for SHA512\n\t\t// testRsaEncryptionOAEP(t, key, crypto.SHA512, []byte{})\n\t}\n\n\t//\n\t// With label\n\t//\n\n\tif nbits == 1024 {\n\t\t// testRsaEncryptionOAEP(t, key, crypto.SHA1, []byte{1, 2, 3, 4})\n\t}\n\t//testRsaEncryptionOAEP(t, key, crypto.SHA224, []byte{5, 6, 7, 8})\n\t// testRsaEncryptionOAEP(t, key, crypto.SHA256, []byte{9})\n\t// testRsaEncryptionOAEP(t, key, crypto.SHA384, []byte{10, 11, 12, 13, 14, 15})\n\tif nbits > 1024 {\n\t\t// testRsaEncryptionOAEP(t, key, crypto.SHA512, []byte{16, 17, 18})\n\t}\n}", "func GroupEncrypt(plaintext string, publicKeys map[string]string) (*Encrypted, error) {\n\n\tkeySize := 32\n\tkey, err := RandomBytes(keySize)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tciphertext, iv, err := AESEncrypt([]byte(plaintext), key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tinputs := make(map[string]string)\n\tinputs[\"iv\"] = string(Base64Encode(iv))\n\n\tencryptedKeys := make(map[string]string)\n\tfor id, publicKeyString := range publicKeys {\n\t\tpublicKey, err := PemDecodePublic([]byte(publicKeyString))\n\t\tencryptedKey, err := Encrypt(key, publicKey)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tencryptedKeys[id] = string(Base64Encode(encryptedKey))\n\t}\n\n\treturn &Encrypted{Ciphertext: string(Base64Encode(ciphertext)), Mode: \"aes-cbc-256+rsa\", Inputs: inputs, Keys: encryptedKeys}, nil\n}", "func deriveKeys(forwardSecure bool, sharedSecret, nonces []byte, connID protocol.ConnectionID, chlo, scfg, cert, divNonce []byte, keyLen int, swap bool) ([]byte, []byte, []byte, []byte, error) {\n\tvar info bytes.Buffer\n\tif forwardSecure {\n\t\tinfo.Write([]byte(\"QUIC forward secure key expansion\\x00\"))\n\t} else {\n\t\tinfo.Write([]byte(\"QUIC key expansion\\x00\"))\n\t}\n\tinfo.Write(connID)\n\tinfo.Write(chlo)\n\tinfo.Write(scfg)\n\tinfo.Write(cert)\n\n\tr := hkdf.New(sha256.New, sharedSecret, nonces, info.Bytes())\n\n\ts := make([]byte, 2*keyLen+2*4)\n\tif _, err := io.ReadFull(r, s); err != nil {\n\t\treturn nil, nil, nil, nil, err\n\t}\n\n\tkey1 := s[:keyLen]\n\tkey2 := s[keyLen : 2*keyLen]\n\tiv1 := s[2*keyLen : 2*keyLen+4]\n\tiv2 := s[2*keyLen+4:]\n\n\tvar otherKey, myKey []byte\n\tvar otherIV, myIV []byte\n\n\tif !forwardSecure {\n\t\tif err := diversify(key2, iv2, divNonce); err != nil {\n\t\t\treturn nil, nil, nil, nil, err\n\t\t}\n\t}\n\n\tif swap {\n\t\totherKey = key2\n\t\tmyKey = key1\n\t\totherIV = iv2\n\t\tmyIV = iv1\n\t} else {\n\t\totherKey = key1\n\t\tmyKey = key2\n\t\totherIV = iv1\n\t\tmyIV = iv2\n\t}\n\n\treturn otherKey, myKey, otherIV, myIV, nil\n}", "func breakRepeatingKeyXor(text []byte, minKeyLen, maxKeyLen int) []byte {\n\tkeySize := findKeySize(text, minKeyLen, maxKeyLen)\n\tblocks := make([][]byte, len(text)/keySize)\n\tfor i := 0; i < len(text)/keySize; i++ {\n\t\tblocks[i] = text[i*keySize : (i+1)*keySize]\n\t}\n\ttransposed := make([][]byte, keySize)\n\tfor i := 0; i < keySize; i++ {\n\t\tt := make([]byte, len(blocks))\n\t\tfor j := 0; j < len(blocks); j++ {\n\t\t\tt[j] = blocks[j][i]\n\t\t}\n\t\ttransposed[i] = t\n\t}\n\tkey := make([]byte, keySize)\n\tfor i := 0; i < keySize; i++ {\n\t\t_, k := breakSingleByteXor(transposed[i])\n\t\tkey[i] = k\n\t}\n\treturn key\n}", "func isReplayAttack(ciphertext []byte) bool {\n\tif len(ciphertext) == 0 {\n\t\treturn false\n\t}\n\tsha := sha256.New()\n\tsha.Write(ciphertext)\n\tdigest := base64.RawStdEncoding.EncodeToString(sha.Sum(nil))\n\treplayMutex.Lock()\n\tdefer replayMutex.Unlock()\n\tif _, ok := (*replay)[digest]; ok {\n\t\t// {{if .Debug}}\n\t\tlog.Printf(\"WARNING: Replay attack detected\")\n\t\t// {{end}}\n\t\treturn true\n\t}\n\t(*replay)[digest] = true\n\treturn false\n}", "func (kw *pkcs11KeyWrapper) WrapKeys(ec *config.EncryptConfig, optsData []byte) ([]byte, error) {\n\tpkcs11Recipients, err := addPubKeys(&ec.DecryptConfig, append(ec.Parameters[\"pkcs11-pubkeys\"], ec.Parameters[\"pkcs11-yamls\"]...))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// no recipients is not an error...\n\tif len(pkcs11Recipients) == 0 {\n\t\treturn nil, nil\n\t}\n\n\tjsonString, err := pkcs11.EncryptMultiple(pkcs11Recipients, optsData)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"PKCS11 EncryptMulitple failed: %w\", err)\n\t}\n\treturn jsonString, nil\n}", "func XorDecryptMain(r io.Reader, w io.Writer, keyLen int, numResults int) error {\n\tif keyLen != 1 {\n\t\treturn InvalidKeyLen\n\t}\n\n\tif numResults < 1 {\n\t\treturn InvalidNumResults\n\t}\n\n\tscanner := bufio.NewScanner(r)\n\tfor scanner.Scan() {\n\t\tciphertext := scanner.Bytes()\n\n\t\tcandidates := make(DecryptionCandidates, 0, numResults)\n\t\tfor key := byte(0); true; key++ {\n\t\t\tnewCandidate := NewCandidate(ciphertext, []byte{key})\n\n\t\t\tif len(candidates) < numResults {\n\t\t\t\tcandidates := append(candidates, newCandidate)\n\t\t\t\tsort.Sort(&candidates)\n\t\t\t} else if newCandidate.score < candidates[len(candidates)-1].score {\n\t\t\t\tcandidates[len(candidates)-1] = newCandidate\n\t\t\t\tsort.Sort(&candidates)\n\t\t\t}\n\n\t\t\t// Break manually due to overflow\n\t\t\tif key == 0xff {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tfor _, candidate := range candidates {\n\t\t\toutput := fmt.Sprintf(\"Key: %#x\\nPlaintext: %q\\nScore: %-02f\\n\\n\", candidate.key, candidate.plaintext, candidate.score)\n\t\t\tw.Write([]byte(output))\n\t\t}\n\t}\n\n\treturn nil\n}", "func SplitKey(privateKey *big.Int, publicKey *Key, n int) ([]*Trustee, []*big.Int, error) {\n\t// Choose n-1 random private keys and compute the nth as privateKey -\n\t// (key_1 + key_2 + ... + key_{n-1}). This computation must be\n\t// performed in the exponent group of g, which is\n\t// Z_{Key.ExponentPrime}.\n\ttrustees := make([]*Trustee, n)\n\tkeys := make([]*big.Int, n)\n\tsum := big.NewInt(0)\n\tvar err error\n\tfor i := 0; i < n-1; i++ {\n\t\tkeys[i], err = rand.Int(rand.Reader, publicKey.ExponentPrime)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\n\t\ttpk := &Key{\n\t\t\tGenerator: new(big.Int).Set(publicKey.Generator),\n\t\t\tPrime: new(big.Int).Set(publicKey.Prime),\n\t\t\tExponentPrime: new(big.Int).Set(publicKey.ExponentPrime),\n\t\t\tPublicValue: new(big.Int).Exp(publicKey.Generator, keys[i], publicKey.Prime),\n\t\t}\n\n\t\ttrustees[i] = &Trustee{PublicKey: tpk}\n\t\tsum.Add(sum, keys[i])\n\t\tsum.Mod(sum, publicKey.ExponentPrime)\n\t}\n\n\t// The choice of random private keys in the loop fully determines the\n\t// final key.\n\tkeys[n-1] = new(big.Int).Sub(privateKey, sum)\n\tkeys[n-1].Mod(keys[n-1], publicKey.ExponentPrime)\n\t//npok, err := NewSchnorrProof(keys[n-1], publicKey)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tntpk := &Key{\n\t\tGenerator: new(big.Int).Set(publicKey.Generator),\n\t\tPrime: new(big.Int).Set(publicKey.Prime),\n\t\tExponentPrime: new(big.Int).Set(publicKey.ExponentPrime),\n\t\tPublicValue: new(big.Int).Exp(publicKey.Generator, keys[n-1], publicKey.Prime),\n\t}\n\n\t//trustees[n-1] = &Trustee{PoK: npok, PublicKey: ntpk}\n\ttrustees[n-1] = &Trustee{PublicKey: ntpk}\n\n\treturn trustees, keys, nil\n}", "func (m *Manager) ReplaceKey(bits int) error {\n\tcurrent, replaced, err := m.pkey()\n\tif err != nil || replaced {\n\t\treturn err\n\t}\n\n\tcurrentKey := base64.RawStdEncoding.EncodeToString(\n\t\tcurrent.PublicKey().Marshal(),\n\t)\n\n\trawPKey, err := GenerateRSA(bits)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpkey, err := ssh.ParsePrivateKey(rawPKey)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tnewKey := base64.RawStdEncoding.EncodeToString(\n\t\tpkey.PublicKey().Marshal(),\n\t)\n\n\trnd := time.Now().UnixNano()\n\t_, _, err = m.conn.Output(\n\t\tfmt.Sprintf(\n\t\t\t`tmp=\"$HOME/.ssh/authorized_keys.%d\" && \\\n\t\t\tbu=\"$HOME/.ssh/authorized_keys.gonzalo.backup\" && \\\n\t\t\tcp \"$HOME/.ssh/authorized_keys\" \"$bu\" && \\\n\t\t\tcp \"$HOME/.ssh/authorized_keys\" \"$tmp\" && \\\n\t\t\techo 'ssh-rsa %s' >> \"$tmp\" && \\\n\t\t\tline=$(cat -n \"$tmp\" | grep '%s' | cut -f1 | xargs) && \\\n\t\t\tsed -i \"${line}d\" \"$tmp\" && \\\n\t\t\tmv \"$tmp\" \"$HOME/.ssh/authorized_keys\"`,\n\t\t\trnd,\n\t\t\tnewKey,\n\t\t\tcurrentKey,\n\t\t),\n\t\tnil,\n\t)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := m.setPKey(rawPKey); err != nil {\n\t\tm.conn.Output(\n\t\t\t`cp \"$HOME/.ssh/authorized_keys.gonzalo.backup\" \\\n\t\t\t\"$HOME/.ssh/authorized_keys\"`,\n\t\t\tnil,\n\t\t)\n\n\t\treturn err\n\t}\n\n\tm.conn.SetPrivateKey(pkey)\n\treturn nil\n}", "func main() {\n\n\n wordlist := getJsonList(LISTPATH + \"/\" + WORDLIST)\n keywordlist := getJsonList(LISTPATH + \"/\" + KEYWORDLIST)\n\n\n /*\n * Flags\n */\n var knowntext string\n var letters string\n var numbers string\n flag.StringVar(&knowntext, \"knowntext\", \"\", \"Insert the Known Text with dots where you don't have a letter es: B...ST\")\n flag.StringVar(&letters, \"letters\", \"\", \"Insert the key letters es: DEFMPRST\")\n flag.StringVar(&numbers, \"numbers\", \"\", \"Insert the key numbers es: 48110398\")\n\n flag.Parse()\n\n if knowntext == \"\" {\n flag.PrintDefaults()\n os.Exit(1)\n }\n\n if letters == \"\" {\n flag.PrintDefaults()\n os.Exit(1)\n }\n\n if numbers == \"\" {\n flag.PrintDefaults()\n os.Exit(1)\n }\n\n var num []int\n for i :=0; i< len(numbers); i++ {\n n, err := strconv.Atoi(string(numbers[i]))\n if err != nil {\n fmt.Println(\"Error inserting numbers\")\n flag.PrintDefaults()\n os.Exit(2)\n }\n num = append(num, n)\n\n\n }\n\n code := Code {letters: letters , numbers: num }\n\n keywordlist = getKeywordList(knowntext, keywordlist)\n s:=cipherKeyWords(keywordlist,code)\n\n fmt.Println(getCodeFromCiphers(s, wordlist))\n\n}", "func main() {\n // Parameters begin\n message := []byte(\"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.\")\n n := 5\n // Parameters end\n \n fmt.Printf(\"{\\n\")\n // Generate key pairs\n var privates []kyber.Scalar\n var publics []kyber.Point\n for i := 0; i < n; i++ {\n // kp := key.NewKeyPair(testSuite)\n private := testSuite.Scalar().Pick(random.Stream)\n public := testSuite.Point().Mul(expand(private), nil)\n privates = append(privates, private)\n publics = append(publics, public)\n }\n\n fmt.Printf(\" privKeys: [\")\n for i := 0; i < n; i++ {\n if i != 0 {\n fmt.Printf(\", \")\n }\n fmt.Printf(\"BufferUtils.fromHex(\\\"%032x\\\")\", reverse(privates[i].Bytes()))\n }\n fmt.Printf(\"],\\n\")\n\n fmt.Printf(\" pubKeys: [\")\n for i := 0; i < n; i++ {\n if i != 0 {\n fmt.Printf(\", \")\n }\n fmt.Printf(\"BufferUtils.fromHex(\\\"%032s\\\")\", publics[i].String())\n }\n fmt.Printf(\"],\\n\")\n\n // Init masks\n var masks []*cosi.Mask\n var byteMasks [][]byte\n for i := 0; i < n; i++ {\n m, err := cosi.NewMask(testSuite, publics, publics[i])\n if err != nil {\n panic(err.Error())\n }\n masks = append(masks, m)\n byteMasks = append(byteMasks, masks[i].Mask())\n }\n\n // Compute commitments\n var v []kyber.Scalar // random\n var V []kyber.Point // commitment\n for i := 0; i < n; i++ {\n x, X := cosi.Commit(testSuite, nil)\n v = append(v, x)\n V = append(V, X)\n }\n\n fmt.Printf(\" secrets: [\")\n for i := 0; i < n; i++ {\n if i != 0 {\n fmt.Printf(\", \")\n }\n fmt.Printf(\"BufferUtils.fromHex(\\\"%032x\\\")\", reverse(v[i].Bytes()))\n }\n fmt.Printf(\"],\\n\")\n\n fmt.Printf(\" commitments: [\")\n for i := 0; i < n; i++ {\n if i != 0 {\n fmt.Printf(\", \")\n }\n fmt.Printf(\"BufferUtils.fromHex(\\\"%032s\\\")\", V[i].String())\n }\n fmt.Printf(\"],\\n\")\n\n // Aggregate commitments\n aggV, aggMask, err := cosi.AggregateCommitments(testSuite, V, byteMasks)\n if err != nil {\n panic(err.Error())\n }\n fmt.Printf(\" aggCommitment: BufferUtils.fromHex(\\\"%032s\\\"),\\n\", aggV.String())\n\n // Set aggregate mask in nodes\n for i := 0; i < n; i++ {\n masks[i].SetMask(aggMask)\n }\n\n // Compute challenge\n var c []kyber.Scalar\n for i := 0; i < n; i++ {\n ci, err := cosi.Challenge(testSuite, aggV, masks[i].AggregatePublic, message)\n if err != nil {\n panic(err.Error())\n }\n c = append(c, ci)\n }\n fmt.Printf(\" aggPubKey: BufferUtils.fromHex(\\\"%032s\\\"),\\n\", masks[0].AggregatePublic.String())\n\n // Compute responses\n var r []kyber.Scalar\n for i := 0; i < n; i++ {\n ri, _ := cosi.Response(testSuite, expand(privates[i]), v[i], c[i])\n r = append(r, ri)\n }\n\n fmt.Printf(\" partialSignatures: [\")\n for i := 0; i < n; i++ {\n if i != 0 {\n fmt.Printf(\", \")\n }\n fmt.Printf(\"BufferUtils.fromHex(\\\"%032x\\\")\", reverse(r[i].Bytes()))\n }\n fmt.Printf(\"],\\n\")\n\n // Aggregate responses\n aggr, err := cosi.AggregateResponses(testSuite, r)\n if err != nil {\n panic(err.Error())\n }\n fmt.Printf(\" aggSignature: BufferUtils.fromHex(\\\"%032x\\\"),\\n\", reverse(aggr.Bytes()))\n\n for i := 0; i < n; i++ {\n // Sign\n sig, err := cosi.Sign(testSuite, aggV, aggr, masks[i])\n if err != nil {\n panic(err.Error())\n }\n // Verify (using default policy)\n if err := cosi.Verify(testSuite, publics, message, sig, nil); err != nil {\n panic(err.Error())\n }\n }\n\n fmt.Printf(\" signature: BufferUtils.fromHex(\\\"%032s%032x\\\"),\\n\", aggV.String(), reverse(aggr.Bytes()))\n fmt.Printf(\" message: BufferUtils.fromAscii(\\\"%032s\\\")\\n\", message)\n fmt.Printf(\"}\\n\")\n}", "func cipherKeyWords (words []string, code Code ) []CipherKeyWord {\n\n result := []CipherKeyWord{}\n var privateKey string\n var d Code\n\n for i:=0; i< len(words); i++ {\n\n privateKey = returnCipherString(words[i])\n d = decryptKey(privateKey, code)\n\n ckw := CipherKeyWord {\n cipherword : words[i],\n cipherstring : privateKey,\n cipherwordlen : len(words[i]),\n decryptedKey : d,\n }\n\n result = append(result, ckw)\n\n }\n\n return result\n\n}", "func verifyAuthorizedKeys(data interface{}, verifiers map[string]signatures.Verifier, sigs map[string]*sigpb.DigitallySigned) error {\n\tfor _, verifier := range verifiers {\n\t\tif sig, ok := sigs[verifier.KeyID()]; ok {\n\t\t\tif err := verifier.Verify(data, sig); err == nil {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}\n\treturn mutator.ErrInvalidSig\n}", "func Encrypt(key string, text string) (string, error) {\n\n // decode PEM encoding to ANS.1 PKCS1 DER\n block, _ := pem.Decode([]byte(key))\n pub, err := x509.ParsePKIXPublicKey(block.Bytes)\n pubkey, _ := pub.(*rsa.PublicKey) \n\n // create the propertiess\n message := []byte(text)\n label := []byte(\"\")\n hash := sha256.New()\n\n ciphertext, err := rsa.EncryptOAEP(hash, rand.Reader, pubkey, message, label)\n return string(base64.StdEncoding.EncodeToString(ciphertext)), err\n\n}", "func (pk *PublicKey) EncapsulateTo(ct, ss []byte, seed []byte) {\n\tif seed == nil {\n\t\tseed = make([]byte, EncapsulationSeedSize)\n\t\tcryptoRand.Read(seed[:])\n\t} else {\n\t\tif len(seed) != EncapsulationSeedSize {\n\t\t\tpanic(\"seed must be of length EncapsulationSeedSize\")\n\t\t}\n\t}\n\n\tif len(ct) != CiphertextSize {\n\t\tpanic(\"ct must be of length CiphertextSize\")\n\t}\n\n\tif len(ss) != SharedKeySize {\n\t\tpanic(\"ss must be of length SharedKeySize\")\n\t}\n\n\t// m = H(seed)\n\tvar m [32]byte\n\th := sha3.New256()\n\th.Write(seed[:])\n\th.Read(m[:])\n\n\t// (K', r) = G(m ‖ H(pk))\n\tvar kr [64]byte\n\tg := sha3.New512()\n\tg.Write(m[:])\n\tg.Write(pk.hpk[:])\n\tg.Read(kr[:])\n\n\t// c = Kyber.CPAPKE.Enc(pk, m, r)\n\tpk.pk.EncryptTo(ct, m[:], kr[32:])\n\n\t// Compute H(c) and put in second slot of kr, which will be (K', H(c)).\n\th.Reset()\n\th.Write(ct[:CiphertextSize])\n\th.Read(kr[32:])\n\n\t// K = KDF(K' ‖ H(c))\n\tkdf := sha3.NewShake256()\n\tkdf.Write(kr[:])\n\tkdf.Read(ss[:SharedKeySize])\n}", "func keyExpansion(key []byte) []uint32 {\n\tnwords := 4\n\trounds := 10\n\n\texpkeys := make([]uint32, 4*(rounds+1))\n\t// the key occupies the first nwords slots of the expanded key\n\tvar i int\n\tfor i < nwords {\n\t\texpkeys[i] = uint32(key[i*4])<<24 | uint32(key[i*4+1])<<16 | uint32(key[i*4+2])<<8 | uint32(key[i*4+3])\n\t\ti++\n\t}\n\n\tfor i < 4*(rounds+1) {\n\t\texpkeys[i] = expkeys[i-1]\n\t\texpkeys[i] = rotWordLeft(expkeys[i], 1)\n\t\texpkeys[i] = subWord(expkeys[i])\n\t\texpkeys[i] ^= rcon(i/nwords - 1)\n\t\texpkeys[i] ^= expkeys[i-nwords]\n\n\t\tfor j := 1; j <= 3; j++ {\n\t\t\texpkeys[i+j] = expkeys[i+j-1] ^ expkeys[i+j-nwords]\n\t\t}\n\n\t\ti += nwords\n\t}\n\tfor j := 0; j < len(expkeys); j += 4 {\n\t\ttranspose(expkeys[j : j+4])\n\t}\n\n\treturn expkeys\n}", "func Attacker(\n\tch <-chan string,\n\tconf *ssh.ClientConfig,\n\tinterpreter string,\n\tscript []byte,\n\ttimeout time.Duration,\n\twg *sync.WaitGroup,\n\tnoQuoteOutput bool,\n) {\n\tdefer wg.Done()\n\n\tofs := \"q\"\n\tif noQuoteOutput {\n\t\tofs = \"s\"\n\t}\n\n\tfor t := range ch {\n\t\to, err := attack(\n\t\t\tt,\n\t\t\tconf,\n\t\t\tinterpreter,\n\t\t\tbytes.NewReader(script),\n\t\t\ttimeout,\n\t\t)\n\t\t/* Output with no error is a good thing */\n\t\tif nil == err {\n\t\t\tm := fmt.Sprintf(\"[%v] SUCCESS\", t)\n\t\t\tif 0 != len(o) {\n\t\t\t\tm += fmt.Sprintf(\": %\"+ofs, string(o))\n\t\t\t}\n\t\t\tlog.Printf(\"%v\", m)\n\t\t\tcontinue\n\t\t}\n\n\t\t/* An error with nil output is a setup error, not a script\n\t\terror */\n\t\tif nil == o {\n\t\t\tlog.Printf(\"[%v] ERROR: %v\", t, err)\n\t\t\tcontinue\n\t\t}\n\n\t\t/* All other errors are execution errors */\n\t\tm := fmt.Sprintf(\"[%v] FAIL (%v)\", t, err)\n\t\tif 0 != len(o) {\n\t\t\tm += fmt.Sprintf(\": %\"+ofs, string(o))\n\t\t}\n\t\tlog.Printf(\"%v\", m)\n\t}\n}", "func RepeatingKeyXOR(msg []byte, key []byte) (res []byte) {\n\tvar keyPos int\n\tkeyLen := len(key)\n\n\tfor _, b := range msg {\n\t\tif keyPos >= keyLen {\n\t\t\tkeyPos = 0\n\t\t}\n\n\t\tresb := b ^ key[keyPos]\n\t\tres = append(res, resb)\n\t\tkeyPos++\n\t}\n\treturn\n}", "func parseKey(index int, keys []*github.Key, pks []ssh.PublicKey, wg *sync.WaitGroup, errChan chan<- error) {\n\tdefer wg.Done()\n\n\tvar err error\n\tif pks[index], _, _, _, err = ssh.ParseAuthorizedKey([]byte(keys[index].GetKey())); err == nil {\n\t\treturn\n\t}\n\n\tselect {\n\tcase errChan <- err:\n\tdefault:\n\t}\n}", "func (k *Key) encrypt(ks *keys, ciphertext, plaintext []byte) (int, error) {\n\tif cap(ciphertext) < len(plaintext)+ivSize+hmacSize {\n\t\treturn 0, ErrBufferTooSmall\n\t}\n\n\t_, err := io.ReadFull(rand.Reader, ciphertext[:ivSize])\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"unable to generate new random iv: %v\", err))\n\t}\n\n\tc, err := aes.NewCipher(ks.Encrypt)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"unable to create cipher: %v\", err))\n\t}\n\n\te := cipher.NewCTR(c, ciphertext[:ivSize])\n\te.XORKeyStream(ciphertext[ivSize:cap(ciphertext)], plaintext)\n\tciphertext = ciphertext[:ivSize+len(plaintext)]\n\n\thm := hmac.New(sha256.New, ks.Sign)\n\n\tn, err := hm.Write(ciphertext)\n\tif err != nil || n != len(ciphertext) {\n\t\tpanic(fmt.Sprintf(\"unable to calculate hmac of ciphertext: %v\", err))\n\t}\n\n\tciphertext = hm.Sum(ciphertext)\n\n\treturn len(ciphertext), nil\n}", "func EciesEncrypt(sender *Account, recipentPub string, plainText []byte, iv []byte) (content string, err error) {\n\n\t// Get the shared-secret\n\t_, secretHash, err := EciesSecret(sender, recipentPub)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\thashAgain := sha512.New()\n\t_, err = hashAgain.Write(secretHash[:])\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tkeys := hashAgain.Sum(nil)\n\tkey := append(keys[:32]) // first half of sha512 hash of secret is used as key\n\tmacKey := append(keys[32:]) // second half as hmac key\n\n\t// Generate IV\n\tvar contentBuffer bytes.Buffer\n\tif len(iv) != 16 || bytes.Equal(iv, make([]byte, 16)) {\n\t\tiv = make([]byte, 16)\n\t\t_, err = rand.Read(iv)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\tcontentBuffer.Write(iv)\n\n\t// AES CBC for encryption,\n\tblock, err := aes.NewCipher(key)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tcbc := cipher.NewCBCEncrypter(block, iv)\n\n\t//// create pkcs#7 padding\n\tplainText = append(plainText, func() []byte {\n\t\tpadLen := block.BlockSize() - (len(plainText) % block.BlockSize())\n\t\tpad := make([]byte, padLen)\n\t\tfor i := range pad {\n\t\t\tpad[i] = uint8(padLen)\n\t\t}\n\t\treturn pad\n\t}()...)\n\n\t// encrypt the plaintext\n\tcipherText := make([]byte, len(plainText))\n\tcbc.CryptBlocks(cipherText, plainText)\n\tcontentBuffer.Write(cipherText)\n\n\t// Sign the message using sha256 hmac, *second* half of sha512 hash used as key\n\tsigner := hmac.New(sha256.New, macKey)\n\t_, err = signer.Write(contentBuffer.Bytes())\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tsignature := signer.Sum(nil)\n\tcontentBuffer.Write(signature)\n\n\t// base64 encode the message, and it's ready to be embedded in our FundsReq.Content or RecordSend.Content fields\n\tb64Buffer := bytes.NewBuffer([]byte{})\n\tencoded := base64.NewEncoder(base64.StdEncoding, b64Buffer)\n\t_, err = encoded.Write(contentBuffer.Bytes())\n\t_ = encoded.Close()\n\treturn string(b64Buffer.Bytes()), nil\n}", "func TestChallenge10(test *testing.T) {\n\tkey := []byte(\"YELLOW SUBMARINE\")\n\tciphertext := encodings.Base64ToBytes(getURL(\"https://cryptopals.com/static/challenge-data/10.txt\"))\n\tdecrypted, err := unsafeaes.DecryptCBC(ciphertext, key)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tif !strings.Contains(string(decrypted), \"bell\") {\n\t\ttest.Errorf(\"Expected %s to contain the phrase I'm back and I'm ringin' the bell\", string(decrypted))\n\t}\n}", "func encryptBytes(key []byte, plaintext []byte) ([]byte, error) {\n\t// Prepend 6 empty bytes to the plaintext so that the decryptor can verify\n\t// that decryption happened correctly.\n\tzeroes := make([]byte, numVerificationBytes)\n\tfulltext := append(zeroes, plaintext...)\n\n\t// Create the cipher and aead.\n\ttwofishCipher, err := twofish.NewCipher(key)\n\tif err != nil {\n\t\treturn nil, errors.New(\"unable to create twofish cipher: \" + err.Error())\n\t}\n\taead, err := cipher.NewGCM(twofishCipher)\n\tif err != nil {\n\t\treturn nil, errors.New(\"unable to create AEAD: \" + err.Error())\n\t}\n\n\t// Generate the nonce.\n\tnonce := make([]byte, aead.NonceSize())\n\t_, err = rand.Read(nonce)\n\tif err != nil {\n\t\treturn nil, errors.New(\"failed to generate entropy for nonce: \" + err.Error())\n\t}\n\n\t// Encrypt the data and return.\n\treturn aead.Seal(nonce, nonce, fulltext, nil), nil\n}", "func decrypt(_message string, _rotors [3]int, _ref int, _key [3]int) string {\n\tvar builder strings.Builder\n\n\tfor _, char := range _message {\n\t\t_key = rotorsIncr(_key, _rotors)\n\t\tvar rd = (byte(rotors[_rotors[2]][(byte(char)-65+byte(_key[2])+26)%26]) - 65 + 26 - byte(_key[2])) % 26\n\t\tvar rm = (byte(rotors[_rotors[1]][(rd+byte(_key[1])+26)%26]) - 65 + 26 - byte(_key[1])) % 26\n\t\tvar rg = (byte(rotors[_rotors[0]][(rm+byte(_key[0])+26)%26]) - 65 + 26 - byte(_key[0])) % 26\n\t\tvar r = byte(rotors[_ref][rg] - 65)\n\n\t\tvar rg2 = (byte(rotorsInv[_rotors[0]][(r+byte(_key[0])+26)%26]) - 65 + 26 - byte(_key[0])) % 26\n\t\tvar rm2 = (byte(rotorsInv[_rotors[1]][(rg2+byte(_key[1])+26)%26]) - 65 + 26 - byte(_key[1])) % 26\n\t\tvar rd2 = (byte(rotorsInv[_rotors[2]][(rm2+byte(_key[2])+26)%26]) - 65 + 26 - byte(_key[2])) % 26\n\t\tbuilder.WriteRune(rune(rd2 + 65))\n\t}\n\n\treturn builder.String()\n}", "func burstAuth(es *elasticsearch.Client, keys []ApiKey, nconns int) error {\n\tnkeys := len(keys)\n\n\tinCh := make(chan ApiKey, nkeys)\n\toutCh := make(chan SecurityInfo, 1024)\n\n\tdefer close(inCh)\n\n\tfmt.Printf(\"Authenticating %d keys using %d connections\\n\", nkeys, nconns)\n\n\t// create the subroutines\n\tfor i := 0; i < nconns; i++ {\n\n\t\tgo func() {\n\n\t\t\tfor key := range inCh {\n\n\t\t\t\tinfo, err := key.Authenticate(context.Background(), es)\n\n\t\t\t\tif err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\n\t\t\t\toutCh <- *info\n\t\t\t}\n\n\t\t}()\n\t}\n\n\t// fill the pipe\n\tfor _, key := range keys {\n\t\tinCh <- key\n\t}\n\n\t// wait for results\n\n\tlast := time.Now()\n\tfor i := 1; i <= nkeys; i++ {\n\t\t<-outCh\n\t\tif i%1000 == 0 || time.Since(last) >= (time.Second*10) {\n\t\t\tfmt.Printf(\"%v %v/%v\\n\", time.Now().Format(\"3:04:05PM\"), i, nkeys)\n\t\t\tlast = time.Now()\n\t\t}\n\t}\n\n\treturn nil\n}", "func Authorize(ee engine.Exchange) sknet.HandlerFunc {\n\treturn func(c *sknet.Context) {\n\t\tvar (\n\t\t\treq pp.EncryptReq\n\t\t\trlt *pp.EmptyRes\n\t\t)\n\n\t\tfor {\n\t\t\tif c.BindJSON(&req) == nil {\n\t\t\t\t// validate pubkey.\n\t\t\t\tif err := validatePubkey(req.GetPubkey()); err != nil {\n\t\t\t\t\tlogger.Error(err.Error())\n\t\t\t\t\trlt = pp.MakeErrResWithCode(pp.ErrCode_WrongPubkey)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\n\t\t\t\tpubkey, err := cipher.PubKeyFromHex(req.GetPubkey())\n\t\t\t\tif err != nil {\n\t\t\t\t\tlogger.Error(err.Error())\n\t\t\t\t\trlt = pp.MakeErrResWithCode(pp.ErrCode_WrongPubkey)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\n\t\t\t\tkey := cipher.ECDH(pubkey, ee.GetServPrivKey())\n\t\t\t\tdata, err := cipher.Chacha20Decrypt(req.GetEncryptdata(), key, req.GetNonce())\n\t\t\t\tif err != nil {\n\t\t\t\t\tlogger.Error(err.Error())\n\t\t\t\t\trlt = pp.MakeErrResWithCode(pp.ErrCode_UnAuthorized)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\n\t\t\t\tok, err := regexp.MatchString(`^\\{.*\\}$`, string(data))\n\t\t\t\tif err != nil || !ok {\n\t\t\t\t\tlogger.Error(err.Error())\n\t\t\t\t\trlt = pp.MakeErrResWithCode(pp.ErrCode_UnAuthorized)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\n\t\t\t\tc.Set(\"rawdata\", data)\n\n\t\t\t\tc.Next()\n\n\t\t\t\trsp, exist := c.Get(\"response\")\n\t\t\t\tif exist {\n\t\t\t\t\t// encrypt the response.\n\t\t\t\t\tencData, nonce, err := pp.Encrypt(rsp, pubkey.Hex(), ee.GetServPrivKey().Hex())\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tpanic(err)\n\t\t\t\t\t}\n\n\t\t\t\t\t// encryptData, nonce := mustEncryptRes(cliPubkey, ee.GetServPrivKey(), rsp)\n\t\t\t\t\tres := pp.EncryptRes{\n\t\t\t\t\t\tResult: pp.MakeResultWithCode(pp.ErrCode_Success),\n\t\t\t\t\t\tEncryptdata: encData,\n\t\t\t\t\t\tNonce: nonce,\n\t\t\t\t\t}\n\t\t\t\t\tc.JSON(res)\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\t\t\trlt = pp.MakeErrRes(errors.New(\"bad request\"))\n\t\t\tbreak\n\t\t}\n\t\tc.JSON(rlt)\n\t}\n}", "func RecoverKey(constr *chow.Construction) []byte {\n\tround1, round2 := round{\n\t\tconstruction: constr,\n\t\tround: 1,\n\t}, round{\n\t\tconstruction: constr,\n\t\tround: 2,\n\t}\n\n\t// Decomposition Phase\n\tconstr1 := aspn.DecomposeSPN(round1, cspn.SAS)\n\tconstr2 := aspn.DecomposeSPN(round2, cspn.SAS)\n\n\tvar (\n\t\tleading, middle, trailing sboxLayer\n\t\tleft, right = affineLayer(constr1[1].(encoding.BlockAffine)), affineLayer(constr2[1].(encoding.BlockAffine))\n\t)\n\n\tfor pos := 0; pos < 16; pos++ {\n\t\tleading[pos] = constr1[0].(encoding.ConcatenatedBlock)[pos]\n\t\tmiddle[pos] = encoding.ComposedBytes{\n\t\t\tconstr1[2].(encoding.ConcatenatedBlock)[pos],\n\t\t\tconstr2[0].(encoding.ConcatenatedBlock)[common.ShiftRows(pos)],\n\t\t}\n\t\ttrailing[pos] = constr2[2].(encoding.ConcatenatedBlock)[pos]\n\t}\n\n\t// Disambiguation Phase\n\t// Disambiguate the affine layer.\n\tlin, lout := left.clean()\n\trin, rout := right.clean()\n\n\tleading.rightCompose(lin, common.NoShift)\n\tmiddle.leftCompose(lout, common.NoShift).rightCompose(rin, common.ShiftRows)\n\ttrailing.leftCompose(rout, common.NoShift)\n\n\t// The SPN decomposition naturally leaves the affine layers without a constant part.\n\t// We would push it into the S-boxes here if that wasn't the case.\n\n\t// Move the constant off of the input and output of the S-boxes.\n\tmcin, mcout := middle.cleanConstant()\n\tmcin, mcout = left.Decode(mcin), right.Encode(mcout)\n\n\tleading.rightCompose(encoding.DecomposeConcatenatedBlock(encoding.BlockAdditive(mcin)), common.NoShift)\n\ttrailing.leftCompose(encoding.DecomposeConcatenatedBlock(encoding.BlockAdditive(mcout)), common.NoShift)\n\n\t// Move the multiplication off of the input and output of the middle S-boxes.\n\tmlin, mlout := middle.cleanLinear()\n\n\tleading.rightCompose(mlin, common.NoShift)\n\ttrailing.leftCompose(mlout, common.NoShift)\n\n\t// fmt.Println(encoding.ProbablyEquivalentBlocks(\n\t// \tencoding.ComposedBlocks{aspn.Encoding{round1}, ShiftRows{}, aspn.Encoding{round2}},\n\t// \tencoding.ComposedBlocks{leading, left, middle, ShiftRows{}, right, trailing},\n\t// ))\n\t// Output: true\n\n\t// Extract the key from the leading S-boxes.\n\tkey := [16]byte{}\n\n\tfor pos := 0; pos < 16; pos++ {\n\t\tfor guess := 0; guess < 256; guess++ {\n\t\t\tcand := encoding.ComposedBytes{\n\t\t\t\tleading[pos], encoding.ByteAdditive(guess), encoding.InverseByte{sbox{}},\n\t\t\t}\n\n\t\t\tif isAS(cand) {\n\t\t\t\tkey[pos] = byte(guess)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\tkey = left.Encode(key)\n\n\treturn backOneRound(backOneRound(key[:], 2), 1)\n}", "func RepeatingKey(plaintext []byte, key []byte) []byte {\n\tciphertext := make([]byte, len(plaintext))\n\tkeyLength := len(key)\n\tfor i, thisByte := range plaintext {\n\t\tciphertext[i] = thisByte ^ key[i%keyLength]\n\t}\n\treturn ciphertext\n}", "func generate_keys(key string, round_keys *([]string)) {\n\t// The PC1 table\n\tpc1 := [56]int{\n\t\t57, 49, 41, 33, 25, 17, 9,\n\t\t1, 58, 50, 42, 34, 26, 18,\n\t\t10, 2, 59, 51, 43, 35, 27,\n\t\t19, 11, 3, 60, 52, 44, 36,\n\t\t63, 55, 47, 39, 31, 23, 15,\n\t\t7, 62, 54, 46, 38, 30, 22,\n\t\t14, 6, 61, 53, 45, 37, 29,\n\t\t21, 13, 5, 28, 20, 12, 4,\n\t}\n\t// The PC2 table\n\tpc2 := [48]int{\n\t\t14, 17, 11, 24, 1, 5,\n\t\t3, 28, 15, 6, 21, 10,\n\t\t23, 19, 12, 4, 26, 8,\n\t\t16, 7, 27, 20, 13, 2,\n\t\t41, 52, 31, 37, 47, 55,\n\t\t30, 40, 51, 45, 33, 48,\n\t\t44, 49, 39, 56, 34, 53,\n\t\t46, 42, 50, 36, 29, 32,\n\t}\n\t// 1. Compressing the key using the PC1 table\n\tperm_key := \"\"\n\tfor i := 0; i < 56; i++ {\n\t\tperm_key += string(key[pc1[i]-1])\n\t}\n\t// 2. Dividing the key into two equal halves\n\t// left := perm_key.substr(0, 28)\n\tleft := perm_key[0:28]\n\tright := perm_key[28:56]\n\tfor i := 0; i < 16; i++ {\n\t\t// 3.1. For rounds 1, 2, 9, 16 the key_chunks\n\t\t// are shifted by one.\n\t\tif i == 0 || i == 1 || i == 8 || i == 15 {\n\t\t\tleft = shift_left_once(left)\n\t\t\tright = shift_left_once(right)\n\t\t} else {\n\t\t\t// 3.2. For other rounds, the key_chunks\n\t\t\t// are shifted by two\n\t\t\tleft = shift_left_twice(left)\n\t\t\tright = shift_left_twice(right)\n\t\t}\n\t\t// Combining the two chunks\n\t\tcombined_key := left + right\n\t\tround_key := \"\"\n\t\t// Finally, using the PC2 table to transpose the key bits\n\t\tfor i := 0; i < 48; i++ {\n\t\t\tround_key += string(combined_key[pc2[i]-1])\n\t\t}\n\t\t(*round_keys)[i] = round_key\n\t}\n\n}", "func keyextend(mac mac, data, digest, suffix []byte, maxlen int) (guesshash, msg []byte) {\n\tregs := sha1regs(digest)\n\tfor i := 1; i <= maxlen; i++ {\n\t\tmsg = append(data, sha1glue(i+len(data))...)\n\t\tmsg = append(msg, suffix...)\n\t\tguesshash = sha1sum(suffix, regs, i+len(msg))\n\t\tif mac.valid(guesshash, msg) {\n\t\t\treturn guesshash, msg\n\t\t}\n\t}\n\treturn nil, nil\n}", "func deriveKeys(passphrase, salt []byte, logN, r, p int) (cipherKey, hmacKey []byte) {\n\tkeyLen := keySize + hashFunc.Size()\n\tkey, err := scrypt.Key(passphrase, salt, 1<<uint(logN), r, p, keyLen)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tcipherKey, hmacKey = key[:keySize], key[keySize:]\n\treturn\n}", "func decipher(c string,k []int) string{\n\tvar cipher,decipher []int\n\tfor i := 0; i < len(c); i+=2 {\n\t\tcipher = append(cipher,formSample(c[i:i+2]))\n\t}\n\tfor i := 0; i < len(k); i++ {\n\t\tdecipher = append(decipher,cipher[i]^k[i])\n\t}\n\n\tvar s string\n\tfor i := 0; i < len(decipher); i++ {\n\t\ts+=string(decipher[i])\n\t}\n\treturn s\n\n}", "func RunChallenge11() {\n\tutil.PrintChallengeHeader(2, 11)\n\n\tplaintext := \"DUPLICATEBLOCKS!DUPLICATEBLOCKS!DUPLICATEBLOCKS!\"\n\tfor i := 0; i < 10; i++ {\n\t\tciphertext, _ := aes.EncryptRandom([]byte(plaintext))\n\t\tfmt.Println(hex.EncodeToString(ciphertext))\n\t\taesMode := aes.DetectAesMode(ciphertext, 16)\n\t\tswitch aesMode {\n\t\tcase aes.ECB:\n\t\t\tprintln(\"Probably AES ECB\")\n\t\tcase aes.CBC:\n\t\t\tprintln(\"Probably AES CBC\")\n\t\t}\n\t}\n}", "func (cry *crypt) encrypt(packet []byte) []byte {\n\tif cry.kcr == nil || cry.teo.param.DisallowEncrypt {\n\t\treturn packet\n\t}\n\tbuf := make([]byte, len(packet)+int(C.ksnCryptGetBlockSize(cry.kcr))+C.sizeof_size_t)\n\tvar encryptLen C.size_t\n\tbufPtr := unsafe.Pointer(&buf[0])\n\tpacketPtr := unsafe.Pointer(&packet[0])\n\tC.ksnEncryptPackage(cry.kcr, packetPtr, C.size_t(len(packet)), bufPtr, &encryptLen)\n\tbuf = buf[:encryptLen]\n\treturn buf\n}", "func Encrypt(ciphertext io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) {\n\tif len(to) == 0 {\n\t\treturn nil, errors.InvalidArgumentError(\"no encryption recipient provided\")\n\t}\n\n\t// These are the possible ciphers that we'll use for the message.\n\tcandidateCiphers := []uint8{\n\t\tuint8(packet.CipherAES128),\n\t\tuint8(packet.CipherAES256),\n\t\tuint8(packet.CipherCAST5),\n\t}\n\t// These are the possible hash functions that we'll use for the signature.\n\tcandidateHashes := []uint8{\n\t\thashToHashId(crypto.SHA256),\n\t\thashToHashId(crypto.SHA384),\n\t\thashToHashId(crypto.SHA512),\n\t\thashToHashId(crypto.SHA1),\n\t\thashToHashId(crypto.RIPEMD160),\n\t}\n\t// In the event that a recipient doesn't specify any supported ciphers\n\t// or hash functions, these are the ones that we assume that every\n\t// implementation supports.\n\tdefaultCiphers := candidateCiphers[len(candidateCiphers)-1:]\n\tdefaultHashes := candidateHashes[len(candidateHashes)-1:]\n\n\tencryptKeys := make([]Key, len(to))\n\tfor i := range to {\n\t\tvar ok bool\n\t\tencryptKeys[i], ok = to[i].encryptionKey(config.Now())\n\t\tif !ok {\n\t\t\treturn nil, errors.InvalidArgumentError(\"cannot encrypt a message to key id \" + strconv.FormatUint(to[i].PrimaryKey.KeyId, 16) + \" because it has no encryption keys\")\n\t\t}\n\n\t\tsig := to[i].primaryIdentity().SelfSignature\n\n\t\tpreferredSymmetric := sig.PreferredSymmetric\n\t\tif len(preferredSymmetric) == 0 {\n\t\t\tpreferredSymmetric = defaultCiphers\n\t\t}\n\t\tpreferredHashes := sig.PreferredHash\n\t\tif len(preferredHashes) == 0 {\n\t\t\tpreferredHashes = defaultHashes\n\t\t}\n\t\tcandidateCiphers = intersectPreferences(candidateCiphers, preferredSymmetric)\n\t\tcandidateHashes = intersectPreferences(candidateHashes, preferredHashes)\n\t}\n\n\tif len(candidateCiphers) == 0 || len(candidateHashes) == 0 {\n\t\treturn nil, errors.InvalidArgumentError(\"cannot encrypt because recipient set shares no common algorithms\")\n\t}\n\n\tcipher := packet.CipherFunction(candidateCiphers[0])\n\t// If the cipher specified by config is a candidate, we'll use that.\n\tconfiguredCipher := config.Cipher()\n\tfor _, c := range candidateCiphers {\n\t\tcipherFunc := packet.CipherFunction(c)\n\t\tif cipherFunc == configuredCipher {\n\t\t\tcipher = cipherFunc\n\t\t\tbreak\n\t\t}\n\t}\n\n\tsymKey := make([]byte, cipher.KeySize())\n\tif _, err := io.ReadFull(config.Random(), symKey); err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, key := range encryptKeys {\n\t\tif err := packet.SerializeEncryptedKey(ciphertext, key.PublicKey, cipher, symKey, config); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tpayload, err := packet.SerializeSymmetricallyEncrypted(ciphertext, cipher, symKey, config)\n\tif err != nil {\n\t\treturn\n\t}\n\n\treturn writeAndSign(payload, candidateHashes, signed, hints, config)\n}", "func encrypt(state, expkey []uint32) {\n\tkeyi := 0\n\taddRoundKey(state, expkey[keyi:keyi+4])\n\tkeyi += 4\n\trounds := len(expkey)/4 - 2\n\tfor i := 0; i < rounds; i++ {\n\t\tsubBytes(state)\n\t\tshiftRows(state)\n\t\tmixColumns(state)\n\t\taddRoundKey(state, expkey[keyi:keyi+4])\n\t\tkeyi += 4\n\t}\n\tsubBytes(state)\n\tshiftRows(state)\n\taddRoundKey(state, expkey[keyi:keyi+4])\n}", "func mitmDHGroup(g, p *big.Int) bool {\n\t// This mitm function doesn't change anything, but I wanted to reuse code\n\t// from challenge 34.\n\tmitm := func(msg *dhMsg) dhMsg {\n\t\treturn msg.Copy()\n\t}\n\n\t// Open a channel to a simulated Bob.\n\tbobch := make(chan dhMsg)\n\tgo bob(bobch, mitm)\n\n\t// Perform the key exchange.\n\tkex := dhProtocol(bobch, g, p, mitm)\n\n\t// Encrypt the message.\n\tplaintext := []byte(\"hello\")\n\tciphertext, err := kex.Encrypt(plaintext)\n\tif err != nil {\n\t\tcryptopals.PrintError(err)\n\t\treturn false\n\t}\n\n\t// The man-in-the-middle can forge his own dhKeyExchange since he changed g\n\t// to a value that always produces a known value. When g = 1, g^(xy) is\n\t// equal to 1 and when g = p, g^(xy) is 0 (because we're working mod p). So\n\t// we forge a dhKeyExchange struct and set the secret private key x to 1 so\n\t// that we can craft Y to produce the correct session key.\n\tmitmKex := dhKeyExchange{}\n\tmitmKex.Init(g, p)\n\tone := big.NewInt(1)\n\tmitmKex.x = one\n\n\tswitch {\n\tcase g.Cmp(one) == 0:\n\t\tmitmKex.Y = one\n\tcase g.Cmp(p) == 0:\n\t\tmitmKex.Y = big.NewInt(0)\n\tdefault:\n\t\tcryptopals.PrintError(errors.New(\"g is not valid for this attack\"))\n\t\treturn false\n\t}\n\n\t// The man-in-the-middle can now decrypt messages encrypted with the\n\t// diffie-hellman session key.\n\tourtext, err := mitmKex.Decrypt(ciphertext)\n\tif err != nil {\n\t\tcryptopals.PrintError(err)\n\t\treturn false\n\t}\n\n\t// Send the ciphertext to Bob.\n\tbobch <- mitm(&dhMsg{t: dhSendMsg, msg: ciphertext})\n\n\t// Receive Bob's response.\n\tans := <-bobch\n\tif !ans.ok {\n\t\tcryptopals.PrintError(ans.err)\n\t\treturn false\n\t}\n\n\t// The MITM can decrypt Bob's message.\n\tbobtext, err := mitmKex.Decrypt(ans.msg)\n\tif err != nil {\n\t\tcryptopals.PrintError(err)\n\t\treturn false\n\t}\n\n\t// Test if the decryptions are correct.\n\tif bytes.Equal(bobtext, []byte(\"hi\")) && bytes.Equal(ourtext, []byte(\"hello\")) {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func PrepareAESOldmtp(localKey []byte, msgKey []byte) (key []byte, iv []byte) {\n\tdataA := []byte{}\n\tdataA = append(dataA, msgKey...)\n\tdataA = append(dataA, localKey[8:][:32]...)\n\n\tdataB := []byte{}\n\tdataB = append(dataB, localKey[(8 + 32):][:16]...)\n\tdataB = append(dataB, msgKey...)\n\tdataB = append(dataB, localKey[(8 + 32 + 16):][:16]...)\n\n\tdataC := []byte{}\n\tdataC = append(dataC, localKey[(8 + 32 + 16 + 16):][:32]...)\n\tdataC = append(dataC, msgKey...)\n\n\tdataD := []byte{}\n\tdataD = append(dataD, msgKey...)\n\tdataD = append(dataD, localKey[(8 + 32 + 16 + 16 + 32):][:32]...)\n\n\tsha1A := sha1.Sum(dataA)\n\tsha1B := sha1.Sum(dataB)\n\tsha1C := sha1.Sum(dataC)\n\tsha1D := sha1.Sum(dataD)\n\n\tkey = []byte{}\n\tkey = append(key, sha1A[:8]...)\n\tkey = append(key, sha1B[8:20]...)\n\tkey = append(key, sha1C[4:16]...)\n\n\tiv = []byte{}\n\tiv = append(iv, sha1A[8:20]...)\n\tiv = append(iv, sha1B[:8]...)\n\tiv = append(iv, sha1C[16:20]...)\n\tiv = append(iv, sha1D[:8]...)\n\treturn key, iv\n}", "func EncryptRepeatingKeyXor(plaintext []byte, key []byte) []byte {\n\tresult := make([]byte, len(plaintext))\n\tkeyLen := len(key)\n\tfor i, b := range plaintext {\n\t\tresult[i] = b ^ key[i%keyLen]\n\t}\n\treturn result\n}", "func (x *xorCipher) XORKeyStream(dst, src []byte) {\n\t// Panic if dst is smaller than src.\n\tfor i := range src {\n\t\tdst[i] = src[i] ^ x.key[x.pos]\n\t\tx.pos++\n\t\tif x.pos == len(x.key) {\n\t\t\tx.pos = 0\n\t\t}\n\t}\n}", "func Encrypt(msg []byte, key []byte) []byte {\n\n\tpaddedMsg := challenge_9.PadMessage(msg, len(key))\n\tcipherText := make([]byte, len(paddedMsg))\n\n\tcipherText = challenge_11.EcbEncrypt(paddedMsg, key)\n\treturn cipherText\n}", "func blockCipher(data []byte, key []byte) ([]byte, error) {\n\tif len(data)%len(key) != 0 {\n\t\t//TODO don't do this. Should I pad the data? Will I use this for only ciphertext, because then no pad, but if used for plaintext also then padding is a good idea.\n\t\t//append null characters to the end for pad.\n\t\t//make will create a []byte of length needed with initial values of 0x00\n\t\tdata = append(data, make([]byte, (len(key)-(len(data)%len(key))))...)\n\t}\n\n\t//xor every byte with the corresponding key byte\n\tencoded := make([]byte, len(data))\n\tfor i, _ := range data {\n\t\t//data is xored with the corresponding key byte\n\t\tencoded[i] = data[i] ^ key[i%len(key)]\n\t}\n\treturn encoded, nil\n}", "func (s *KeyAgentTestSuite) TestLoadKey(c *check.C) {\n\tuserdata := []byte(\"hello, world\")\n\n\t// make a new local agent\n\tlka, err := NewLocalAgent(s.keyDir, s.hostname, s.username)\n\tc.Assert(err, check.IsNil)\n\n\t// unload any keys that might be in the agent for this user\n\terr = lka.UnloadKey()\n\tc.Assert(err, check.IsNil)\n\n\t// get all the keys in the teleport and system agent\n\tteleportAgentKeys, err := lka.Agent.List()\n\tc.Assert(err, check.IsNil)\n\tteleportAgentInitialKeyCount := len(teleportAgentKeys)\n\tsystemAgentKeys, err := lka.sshAgent.List()\n\tc.Assert(err, check.IsNil)\n\tsystemAgentInitialKeyCount := len(systemAgentKeys)\n\n\t// load the key to the twice, this should only\n\t// result in one key for this user in the agent\n\t_, err = lka.LoadKey(*s.key)\n\tc.Assert(err, check.IsNil)\n\t_, err = lka.LoadKey(*s.key)\n\tc.Assert(err, check.IsNil)\n\n\t// get all the keys in the teleport and system agent\n\tteleportAgentKeys, err = lka.Agent.List()\n\tc.Assert(err, check.IsNil)\n\tsystemAgentKeys, err = lka.sshAgent.List()\n\tc.Assert(err, check.IsNil)\n\n\t// check if we have the correct counts\n\tc.Assert(teleportAgentKeys, check.HasLen, teleportAgentInitialKeyCount+2)\n\tc.Assert(systemAgentKeys, check.HasLen, systemAgentInitialKeyCount+2)\n\n\t// now sign data using the teleport agent and system agent\n\tteleportAgentSignature, err := lka.Agent.Sign(teleportAgentKeys[0], userdata)\n\tc.Assert(err, check.IsNil)\n\tsystemAgentSignature, err := lka.sshAgent.Sign(systemAgentKeys[0], userdata)\n\tc.Assert(err, check.IsNil)\n\n\t// parse the pem bytes for the private key, create a signer, and extract the public key\n\tsshPrivateKey, err := ssh.ParseRawPrivateKey(s.key.Priv)\n\tc.Assert(err, check.IsNil)\n\tsshSigner, err := ssh.NewSignerFromKey(sshPrivateKey)\n\tc.Assert(err, check.IsNil)\n\tsshPublicKey := sshSigner.PublicKey()\n\n\t// verify data signed by both the teleport agent and system agent was signed correctly\n\tsshPublicKey.Verify(userdata, teleportAgentSignature)\n\tc.Assert(err, check.IsNil)\n\tsshPublicKey.Verify(userdata, systemAgentSignature)\n\tc.Assert(err, check.IsNil)\n\n\t// unload all keys from the teleport agent and system agent\n\terr = lka.UnloadKey()\n\tc.Assert(err, check.IsNil)\n}", "func (cry *crypt) decrypt(packet []byte, key string) ([]byte, error) {\n\tif cry.kcr == nil {\n\t\treturn packet, errors.New(\"crypt module does not initialized\")\n\t}\n\n\terrCantDecript := func() (err error) {\n\t\terr = fmt.Errorf(\"can't decript %d bytes packet (try to use \"+\n\t\t\t\"without decrypt), channel key: %s\", len(packet), key)\n\t\tteolog.DebugVv(MODULE, err.Error())\n\t\treturn\n\t}\n\n\t// Empty packet\n\tif packet == nil || len(packet) == 0 {\n\t\treturn packet, errCantDecript()\n\t}\n\n\tvar err error\n\tvar decryptLen C.size_t\n\tpacketPtr := unsafe.Pointer(&packet[0])\n\tC.ksnDecryptPackage(cry.kcr, packetPtr, C.size_t(len(packet)), &decryptLen)\n\tif decryptLen > 0 {\n\t\tpacket = packet[2 : decryptLen+2]\n\t\tteolog.DebugVvf(MODULE, \"decripted to %d bytes packet, channel key: %s\\n\",\n\t\t\tdecryptLen, key)\n\t} else {\n\t\terr = errCantDecript()\n\t}\n\treturn packet, err\n}", "func TestVigenereCipher(t *testing.T) {\n\tvar vigenere Vigenere\n\n\tcases := []struct {\n\t\tcaseString string\n\t\tcaseKey string\n\t\texpected string\n\t\t// Tells if a case is of success or fail\n\t\tsuccess bool\n\t}{\n\t\t{\n\t\t\tcaseString: \"Deus e bom, o tempo todo\",\n\t\t\tcaseKey: \"UnB\",\n\t\t\texpected: \"Xrvm r ciz, p nrnjb uiqp\",\n\t\t\tsuccess: true,\n\t\t},\n\n\t\t{\n\t\t\tcaseString: \"Fim de semestre eh assim\",\n\t\t\tcaseKey: \"hard\",\n\t\t\texpected: \"Mid gl svplskul ey dzszp\",\n\t\t\tsuccess: true,\n\t\t},\n\n\t\t{\n\t\t\tcaseString: \"this year was a tragic year\",\n\t\t\tcaseKey: \"corona\",\n\t\t\texpected: \"vvzg lecf nof a vfruvc asrf\",\n\t\t\tsuccess: true,\n\t\t},\n\n\t\t{\n\t\t\tcaseString: \"die Kunst des Rechnens\",\n\t\t\tcaseKey: \"GOLANG\",\n\t\t\texpected: \"jwp Khtyh oef Xkqsnrty\",\n\t\t\tsuccess: true,\n\t\t},\n\n\t\t{\n\t\t\tcaseString: \"a chave de codificacao eh restrita\",\n\t\t\tcaseKey: \"%\",\n\t\t\texpected: \"? a,?:) () a3(-*-a?a?3 ), 6)786-8?\",\n\t\t\tsuccess: false,\n\t\t},\n\n\t\t{\n\t\t\tcaseString: \"somente caracteres alfabeticos da ascii podem ser utilizados\",\n\t\t\tcaseKey: \"123\",\n\t\t\texpected: \"c@?5?f5 43b25d6d5d 3<7326f94ac 53 1d59: b?57= d7b ff9=;j26?d\",\n\t\t\tsuccess: false,\n\t\t},\n\n\t\t{\n\t\t\tcaseString: \"Porem, tanto faz Usar MaIUsCUlo ou MINUSculo\",\n\t\t\tcaseKey: \"GOisNice\",\n\t\t\texpected: \"Vczwz, bcrzc nsm Cuex AiAHaEYrc wm ZQPYYqcdb\",\n\t\t\tsuccess: true,\n\t\t},\n\t}\n\tfor _, c := range cases {\n\t\tif c.success {\n\t\t\t// Success cases\n\t\t\tt.Logf(\"Vigenere testing: %s <key: %s> -> %s\", c.caseString, c.caseKey, c.expected)\n\t\t\tresult, err := vigenere.Cipher(c.caseString, c.caseKey)\n\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"Vigenere FAILED: %s <key: %s> -> expected: %s; got ERROR: %s\", c.caseString, c.caseKey, c.expected, err)\n\t\t\t}\n\n\t\t\tif result != c.expected {\n\t\t\t\tt.Errorf(\"Vigenere FAILED: %s <key: %s> -> expected: %s; got: %s\", c.caseString, c.caseKey, c.expected, result)\n\t\t\t}\n\t\t} else {\n\t\t\t// Fail cases\n\t\t\tt.Logf(\"Vigenere testing: %s <key: %s> -> expected err\", c.caseString, c.caseKey)\n\t\t\tresult, err := vigenere.Cipher(c.caseString, c.caseKey)\n\n\t\t\tif err == nil {\n\t\t\t\tt.Errorf(\"Vigenere FAILED: %s <key: %s> -> expected error, but got: %s\", c.caseString, c.caseKey, result)\n\t\t\t}\n\t\t}\n\t}\n}", "func KeyExchange(m messages.Base) (messages.Base, error) {\n\tif core.Debug {\n\t\tmessage(\"debug\", \"Entering into agents.KeyExchange function\")\n\t}\n\n\tserverKeyMessage := messages.Base{\n\t\tID: m.ID,\n\t\tVersion: 1.0,\n\t\tType: \"KeyExchange\",\n\t\tPadding: core.RandStringBytesMaskImprSrc(4096),\n\t}\n\n\t// Make sure the agent has previously authenticated\n\tif !isAgent(m.ID) {\n\t\treturn serverKeyMessage, fmt.Errorf(\"the agent does not exist\")\n\t}\n\n\tlogging.Server(fmt.Sprintf(\"Received new agent key exchange from %s\", m.ID))\n\n\tke := m.Payload.(messages.KeyExchange)\n\n\tif core.Debug {\n\t\tmessage(\"debug\", fmt.Sprintf(\"Received new public key from %s:\\r\\n%v\", m.ID, ke.PublicKey))\n\t}\n\n\tserverKeyMessage.ID = Agents[m.ID].ID\n\tAgents[m.ID].PublicKey = ke.PublicKey\n\n\t// Generate key pair\n\tprivateKey, rsaErr := rsa.GenerateKey(rand.Reader, 4096)\n\tif rsaErr != nil {\n\t\treturn serverKeyMessage, fmt.Errorf(\"there was an error generating the RSA key pair:\\r\\n%s\", rsaErr.Error())\n\t}\n\n\tAgents[m.ID].RSAKeys = privateKey\n\n\tif core.Debug {\n\t\tmessage(\"debug\", fmt.Sprintf(\"Server's Public Key: %v\", Agents[m.ID].RSAKeys.PublicKey))\n\t}\n\n\tpk := messages.KeyExchange{\n\t\tPublicKey: Agents[m.ID].RSAKeys.PublicKey,\n\t}\n\n\tserverKeyMessage.ID = m.ID\n\tserverKeyMessage.Payload = pk\n\n\tif core.Debug {\n\t\tmessage(\"debug\", \"Leaving agents.KeyExchange returning without error\")\n\t\tmessage(\"debug\", fmt.Sprintf(\"serverKeyMessage: %v\", serverKeyMessage))\n\t}\n\treturn serverKeyMessage, nil\n}", "func AesGCM256EncryptWithChaos(keys []string, raw []byte) ([]byte, error) {\n\tfor _, key := range keys {\n\t\tencKey, nonce := chaos(key)\n\n\t\tblock, err := aes.NewCipher(encKey)\n\t\tif err != nil {\n\t\t\treturn nil, errors.WithStack(err)\n\t\t}\n\n\t\taead, err := cipher.NewGCM(block)\n\t\tif err != nil {\n\t\t\treturn nil, errors.WithStack(err)\n\t\t}\n\n\t\traw = aead.Seal(nil, nonce, raw, nil)\n\t}\n\n\treturn raw, nil\n}", "func (hs *HandshakeState) validateKeys() error {\n\tfor _, line := range hs.hp.MessagePattern {\n\t\tfor _, token := range line[1:] {\n\t\t\tif token == pattern.TokenS && hs.mustWrite(line[0]) {\n\t\t\t\t// s must NOT be empty for writing\n\t\t\t\tif hs.localStatic == nil {\n\t\t\t\t\tif err := hs.handleMissingKeyS(); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func SSHKey(i interface{}, k string) (warnings []string, errors []error) {\n\tv, ok := i.(string)\n\tif !ok {\n\t\treturn nil, []error{fmt.Errorf(\"expected type of %q to be string\", k)}\n\t}\n\n\tif strings.TrimSpace(v) == \"\" {\n\t\treturn nil, []error{fmt.Errorf(\"expected %q to not be an empty string or whitespace\", k)}\n\t}\n\n\tkeyParts := strings.Fields(v)\n\tif len(keyParts) > 1 {\n\t\tbyteStr, err := base64.StdEncoding.DecodeString(keyParts[1])\n\t\tif err != nil {\n\t\t\treturn nil, []error{fmt.Errorf(\"decoding %q for public key data\", k)}\n\t\t}\n\t\tpubKey, err := ssh.ParsePublicKey(byteStr)\n\t\tif err != nil {\n\t\t\treturn nil, []error{fmt.Errorf(\"parsing %q as a public key object\", k)}\n\t\t}\n\n\t\tif pubKey.Type() != ssh.KeyAlgoRSA {\n\t\t\treturn nil, []error{fmt.Errorf(\"- the provided %s SSH key is not supported. Only RSA SSH keys are supported by Azure\", pubKey.Type())}\n\t\t} else {\n\t\t\trsaPubKey, ok := pubKey.(ssh.CryptoPublicKey).CryptoPublicKey().(*rsa.PublicKey)\n\t\t\tif !ok {\n\t\t\t\treturn nil, []error{fmt.Errorf(\"- could not retrieve the RSA public key from the SSH public key\")}\n\t\t\t}\n\t\t\trsaPubKeyBits := rsaPubKey.Size() * 8\n\t\t\tif rsaPubKeyBits < 2048 {\n\t\t\t\treturn nil, []error{fmt.Errorf(\"- the provided RSA SSH key has %d bits. Only ssh-rsa keys with 2048 bits or higher are supported by Azure\", rsaPubKeyBits)}\n\t\t\t}\n\t\t}\n\t} else {\n\t\treturn nil, []error{fmt.Errorf(\"%q is not a complete SSH2 Public Key\", k)}\n\t}\n\n\treturn warnings, errors\n}", "func encryptDecryptXor(input string, key byte) (output string) {\n for i := 0; i < len(input); i++ {\n output += string(input[i] ^ key)\n }\n return output\n}", "func processRecipientKeys(recipients []string) ([][]byte, [][]byte, [][]byte, [][]byte, [][]byte, [][]byte, error) {\n\tvar (\n\t\tgpgRecipients [][]byte\n\t\tpubkeys [][]byte\n\t\tx509s [][]byte\n\t\tpkcs11Pubkeys [][]byte\n\t\tpkcs11Yamls [][]byte\n\t\tkeyProviders [][]byte\n\t)\n\n\tfor _, recipient := range recipients {\n\n\t\tidx := strings.Index(recipient, \":\")\n\t\tif idx < 0 {\n\t\t\treturn nil, nil, nil, nil, nil, nil, errors.New(\"Invalid recipient format\")\n\t\t}\n\n\t\tprotocol := recipient[:idx]\n\t\tvalue := recipient[idx+1:]\n\n\t\tswitch protocol {\n\t\tcase \"pgp\":\n\t\t\tgpgRecipients = append(gpgRecipients, []byte(value))\n\n\t\tcase \"jwe\":\n\t\t\ttmp, err := os.ReadFile(value)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, nil, nil, nil, nil, nil, fmt.Errorf(\"Unable to read file: %w\", err)\n\t\t\t}\n\t\t\tif !encutils.IsPublicKey(tmp) {\n\t\t\t\treturn nil, nil, nil, nil, nil, nil, errors.New(\"File provided is not a public key\")\n\t\t\t}\n\t\t\tpubkeys = append(pubkeys, tmp)\n\n\t\tcase \"pkcs7\":\n\t\t\ttmp, err := os.ReadFile(value)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, nil, nil, nil, nil, nil, fmt.Errorf(\"Unable to read file: %w\", err)\n\t\t\t}\n\t\t\tif !encutils.IsCertificate(tmp) {\n\t\t\t\treturn nil, nil, nil, nil, nil, nil, errors.New(\"File provided is not an x509 cert\")\n\t\t\t}\n\t\t\tx509s = append(x509s, tmp)\n\n\t\tcase \"pkcs11\":\n\t\t\ttmp, err := os.ReadFile(value)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, nil, nil, nil, nil, nil, fmt.Errorf(\"Unable to read file: %w\", err)\n\t\t\t}\n\t\t\tif encutils.IsPkcs11PublicKey(tmp) {\n\t\t\t\tpkcs11Yamls = append(pkcs11Yamls, tmp)\n\t\t\t} else if encutils.IsPublicKey(tmp) {\n\t\t\t\tpkcs11Pubkeys = append(pkcs11Pubkeys, tmp)\n\t\t\t} else {\n\t\t\t\treturn nil, nil, nil, nil, nil, nil, errors.New(\"Provided file is not a public key\")\n\t\t\t}\n\n\t\tcase \"provider\":\n\t\t\tkeyProviders = append(keyProviders, []byte(value))\n\n\t\tdefault:\n\t\t\treturn nil, nil, nil, nil, nil, nil, errors.New(\"Provided protocol not recognized\")\n\t\t}\n\t}\n\treturn gpgRecipients, pubkeys, x509s, pkcs11Pubkeys, pkcs11Yamls, keyProviders, nil\n}", "func (item *Item) cipherKey(skey []byte) []byte {\n\tif item.Password == \"\" {\n\t\treturn skey\n\t}\n\tn := len(skey)\n\tk := make([]byte, n)\n\tp := []byte(item.Password)\n\t// key = (byte of password) + (bytes of default key)\n\tfor i := range k {\n\t\tif i < len(p) {\n\t\t\tk[i] = p[i]\n\t\t} else {\n\t\t\tk[i] = skey[i]\n\t\t}\n\t}\n\treturn k\n}", "func (c *cipher256) Encrypt(dst, src []byte) {\n\t// Load the input\n\tin := new([numWords256]uint64)\n\tin[0] = loadWord(src[0:8])\n\tin[1] = loadWord(src[8:16])\n\tin[2] = loadWord(src[16:24])\n\tin[3] = loadWord(src[24:32])\n\n\t// Perform encryption rounds\n\tfor d := 0; d < numRounds256; d += 8 {\n\t\t// Add round key\n\t\tin[0] += c.ks[d/4][0]\n\t\tin[1] += c.ks[d/4][1]\n\t\tin[2] += c.ks[d/4][2]\n\t\tin[3] += c.ks[d/4][3]\n\n\t\t// Four rounds of mix and permute\n\t\tin[0] += in[1]\n\t\tin[1] = ((in[1] << 14) | (in[1] >> (64 - 14))) ^ in[0]\n\t\tin[2] += in[3]\n\t\tin[3] = ((in[3] << 16) | (in[3] >> (64 - 16))) ^ in[2]\n\t\tin[1], in[3] = in[3], in[1]\n\n\t\tin[0] += in[1]\n\t\tin[1] = ((in[1] << 52) | (in[1] >> (64 - 52))) ^ in[0]\n\t\tin[2] += in[3]\n\t\tin[3] = ((in[3] << 57) | (in[3] >> (64 - 57))) ^ in[2]\n\t\tin[1], in[3] = in[3], in[1]\n\n\t\tin[0] += in[1]\n\t\tin[1] = ((in[1] << 23) | (in[1] >> (64 - 23))) ^ in[0]\n\t\tin[2] += in[3]\n\t\tin[3] = ((in[3] << 40) | (in[3] >> (64 - 40))) ^ in[2]\n\t\tin[1], in[3] = in[3], in[1]\n\n\t\tin[0] += in[1]\n\t\tin[1] = ((in[1] << 5) | (in[1] >> (64 - 5))) ^ in[0]\n\t\tin[2] += in[3]\n\t\tin[3] = ((in[3] << 37) | (in[3] >> (64 - 37))) ^ in[2]\n\t\tin[1], in[3] = in[3], in[1]\n\n\t\t// Add round key\n\t\tin[0] += c.ks[(d/4)+1][0]\n\t\tin[1] += c.ks[(d/4)+1][1]\n\t\tin[2] += c.ks[(d/4)+1][2]\n\t\tin[3] += c.ks[(d/4)+1][3]\n\n\t\t// Four rounds of mix and permute\n\t\tin[0] += in[1]\n\t\tin[1] = ((in[1] << 25) | (in[1] >> (64 - 25))) ^ in[0]\n\t\tin[2] += in[3]\n\t\tin[3] = ((in[3] << 33) | (in[3] >> (64 - 33))) ^ in[2]\n\t\tin[1], in[3] = in[3], in[1]\n\n\t\tin[0] += in[1]\n\t\tin[1] = ((in[1] << 46) | (in[1] >> (64 - 46))) ^ in[0]\n\t\tin[2] += in[3]\n\t\tin[3] = ((in[3] << 12) | (in[3] >> (64 - 12))) ^ in[2]\n\t\tin[1], in[3] = in[3], in[1]\n\n\t\tin[0] += in[1]\n\t\tin[1] = ((in[1] << 58) | (in[1] >> (64 - 58))) ^ in[0]\n\t\tin[2] += in[3]\n\t\tin[3] = ((in[3] << 22) | (in[3] >> (64 - 22))) ^ in[2]\n\t\tin[1], in[3] = in[3], in[1]\n\n\t\tin[0] += in[1]\n\t\tin[1] = ((in[1] << 32) | (in[1] >> (64 - 32))) ^ in[0]\n\t\tin[2] += in[3]\n\t\tin[3] = ((in[3] << 32) | (in[3] >> (64 - 32))) ^ in[2]\n\t\tin[1], in[3] = in[3], in[1]\n\t}\n\n\t// Add the final round key\n\tin[0] += c.ks[numRounds256/4][0]\n\tin[1] += c.ks[numRounds256/4][1]\n\tin[2] += c.ks[numRounds256/4][2]\n\tin[3] += c.ks[numRounds256/4][3]\n\n\t// Store ciphertext in destination\n\tstoreWord(dst[0:8], in[0])\n\tstoreWord(dst[8:16], in[1])\n\tstoreWord(dst[16:24], in[2])\n\tstoreWord(dst[24:32], in[3])\n}", "func implementRepeatingKeyXOR(s string, k string) string {\n\t// convert src and key to byte slices\n\tsrc := []byte(s)\n\tkey := []byte(k)\n\n\t// generate a repeating key\n\trep := getRepeatingKey(key, src)\n\n\t// XOR against src\n\tout := []byte{}\n\tfor i, letter := range src {\n\t\tout = append(out, xor(letter, rep[i]))\n\t}\n\n\t// convert back to hex\n\tresult := hex.EncodeToString(out)\n\n\treturn result\n}", "func verifyKeys(prevEntry *tpb.Entry, data interface{}, update *tpb.SignedKV, entry *tpb.Entry) error {\n\tvar verifiers map[string]signatures.Verifier\n\tvar err error\n\tif prevEntry == nil {\n\t\tverifiers, err = verifiersFromKeys(entry.GetAuthorizedKeys())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tverifiers, err = verifiersFromKeys(prevEntry.GetAuthorizedKeys())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif err := verifyAuthorizedKeys(data, verifiers, update.GetSignatures()); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (e *ECDH1PUAEADCompositeEncrypt) Encrypt(plaintext, aad []byte) ([]byte, error) {\n\tif len(e.recPublicKeys) == 0 {\n\t\treturn nil, fmt.Errorf(\"ECDH1PUAEADCompositeEncrypt: missing recipients public keys for key wrapping\")\n\t}\n\n\tvar eAlg, eTyp, kwAlg string\n\n\t// TODO add chacha alg support too, https://github.com/hyperledger/aries-framework-go/issues/1684\n\tswitch e.keyType {\n\tcase commonpb.KeyType_EC:\n\t\teAlg = composite.A256GCM\n\t\tkwAlg = A256KWAlg\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"ECDH1PUAEADCompositeEncrypt: bad key type: '%s'\", e.keyType)\n\t}\n\n\teTyp = composite.DIDCommEncType\n\n\tkeySize := e.encHelper.GetSymmetricKeySize()\n\tcek := random.GetRandomBytes(uint32(keySize))\n\n\tvar recipientsWK []*composite.RecipientWrappedKey\n\n\tvar singleRecipientAAD []byte\n\n\tfor _, rec := range e.recPublicKeys {\n\t\tsenderKW := &ECDH1PUConcatKDFSenderKW{\n\t\t\tsenderPrivateKey: e.senderPrivKey,\n\t\t\trecipientPublicKey: rec,\n\t\t\tcek: cek,\n\t\t}\n\n\t\t// TODO: add support for 25519 key wrapping https://github.com/hyperledger/aries-framework-go/issues/1637\n\t\tkek, err := senderKW.wrapKey(kwAlg, keySize)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\trecipientsWK = append(recipientsWK, kek)\n\n\t\tif len(e.recPublicKeys) == 1 {\n\t\t\tsingleRecipientAAD, err = e.encHelper.MergeSingleRecipientHeaders(kek, aad)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\taad = singleRecipientAAD\n\t\t}\n\t}\n\n\taead, err := e.encHelper.GetAEAD(cek)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tct, err := aead.Encrypt(plaintext, aad)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn e.encHelper.BuildEncData(eAlg, eTyp, recipientsWK, ct, singleRecipientAAD)\n}", "func mitmDHGroupX(g, p *big.Int) bool {\n\t// This mitm function doesn't change anything, but I wanted to reuse code\n\t// from challenge 34.\n\tmitm := func(msg *dhMsg) dhMsg {\n\t\treturn msg.Copy()\n\t}\n\n\t// Open a channel to a simulated Bob.\n\tbobch := make(chan dhMsg)\n\tgo bob(bobch, mitm)\n\n\t// Perform the key exchange.\n\tkex := dhProtocol(bobch, g, p, mitm)\n\n\t// Encrypt the message.\n\tplaintext := []byte(\"hello\")\n\tciphertext, err := kex.Encrypt(plaintext)\n\tif err != nil {\n\t\tcryptopals.PrintError(err)\n\t\treturn false\n\t}\n\n\t// Because g = p-1, we know the subgroup it generates is {1, p-1}. Therefore\n\t// kex.X and kex.Y must be 1 or p-1 and the resulting secret g^(xy) must be\n\t// 1 or p-1. Since a man-in-the-middle can view X and Y on the wire, they\n\t// have enough information to determine which of those two values the key\n\t// actually is. So we \"forge\" a dhKeyExchange struct and set the secret\n\t// private key x to 1 so that we can craft Y to produce the correct session\n\t// key.\n\tmitmKex := dhKeyExchange{}\n\tmitmKex.Init(g, p)\n\tone := big.NewInt(1)\n\tmitmKex.x = one\n\tpp := big.NewInt(0).Add(p, big.NewInt(-1)) // pp = p-1\n\n\tswitch {\n\tcase kex.X.Cmp(one)+kex.Y.Cmp(one) < 2:\n\t\t// If either X or Y is 1, then the session key is 1.\n\t\tmitmKex.Y = one\n\tcase kex.X.Cmp(pp)+kex.Y.Cmp(pp) == 0:\n\t\t// If both X and Y are p-1, then the session key is p-1.\n\t\tmitmKex.Y = pp\n\tdefault:\n\t\tcryptopals.PrintError(errors.New(\"g is not valid for this attack\"))\n\t\treturn false\n\t}\n\n\t// The man-in-the-middle can now decrypt messages encrypted with the\n\t// diffie-hellman session key.\n\tourtext, err := mitmKex.Decrypt(ciphertext)\n\tif err != nil {\n\t\tcryptopals.PrintError(err)\n\t\treturn false\n\t}\n\n\t// Send the ciphertext to Bob.\n\tbobch <- mitm(&dhMsg{t: dhSendMsg, msg: ciphertext})\n\n\t// Receive Bob's response.\n\tans := <-bobch\n\tif !ans.ok {\n\t\tcryptopals.PrintError(ans.err)\n\t\treturn false\n\t}\n\n\t// The MITM can decrypt Bob's messages, too.\n\tbobtext, err := mitmKex.Decrypt(ans.msg)\n\tif err != nil {\n\t\tcryptopals.PrintError(err)\n\t\treturn false\n\t}\n\n\t// Test if the decryptions are correct.\n\tif bytes.Equal(bobtext, []byte(\"hi\")) && bytes.Equal(ourtext, []byte(\"hello\")) {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (key PrivateKey) Encrypt(peersPublicKey PublicKey, data []byte) []byte {\n\tvar priv [KeySize]byte\n\tcopy(priv[:], key[:KeySize])\n\n\tvar pub [KeySize]byte\n\tcopy(pub[:], peersPublicKey[:])\n\n\tnonce := generateNonce()\n\tsealed := box.Seal(nil, data, &nonce, &pub, &priv)\n\n\tresult := make([]byte, 0, len(pub)+len(nonce)+len(sealed))\n\tresult = append(result, key[KeySize:]...)\n\tresult = append(result, nonce[:]...)\n\tresult = append(result, sealed...)\n\treturn result\n}", "func KeyGenerate_ed(msgprex string,ch chan interface{},id int,cointype string) bool {\n if id < 0 || id >= RpcMaxWorker || id >= len(workers) {\n\tres := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:no find worker id\",Err:GetRetErr(ErrGetWorkerIdError)}\n\tch <- res\n\treturn false\n }\n\n w := workers[id]\n GroupId := w.groupid \n fmt.Println(\"========KeyGenerate_ed============\",\"GroupId\",GroupId)\n if GroupId == \"\" {\n\tres := RpcDcrmRes{Ret:\"\",Tip:\"get group id fail\",Err:fmt.Errorf(\"get group id fail.\")}\n\tch <- res\n\treturn false\n }\n \n ns,_ := GetGroup(GroupId)\n if ns != NodeCnt {\n\tlogs.Debug(\"KeyGenerate_ed,get nodes info error.\")\n\tres := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:the group is not ready\",Err:GetRetErr(ErrGroupNotReady)}\n\tch <- res\n\treturn false \n }\n\t\t\n rand := cryptorand.Reader\n var seed [32]byte\n\n if _, err := io.ReadFull(rand, seed[:]); err != nil {\n\t fmt.Println(\"Error: io.ReadFull(rand, seed)\")\n }\n\n // 1.2 privateKey' = SHA512(seed)\n var sk [64]byte\n var pk [32]byte\n\n seedDigest := sha512.Sum512(seed[:])\n\n seedDigest[0] &= 248\n seedDigest[31] &= 127\n seedDigest[31] |= 64\n\n copy(sk[:], seedDigest[:])\n\n // 1.3 publicKey\n var temSk [32]byte\n copy(temSk[:], sk[:32])\n\n var A ed.ExtendedGroupElement\n ed.GeScalarMultBase(&A, &temSk)\n\n A.ToBytes(&pk)\n\n CPk, DPk := ed.Commit(pk)\n zkPk := ed.Prove(temSk)\n \n ids := GetIds(cointype,GroupId)\n \n mp := []string{msgprex,cur_enode}\n enode := strings.Join(mp,\"-\")\n s0 := \"EDC11\"\n s1 := string(CPk[:])\n ss := enode + common.Sep + s0 + common.Sep + s1\n logs.Debug(\"================kg ed round one,send msg,code is EDC11==================\")\n SendMsgToDcrmGroup(ss,GroupId)\n \n _,tip,cherr := GetChannelValue(ch_t,w.bedc11)\n if cherr != nil {\n\tlogs.Debug(\"get w.bedc11 timeout.\")\n\tres := RpcDcrmRes{Ret:\"\",Tip:tip,Err:fmt.Errorf(\"get ed c11 timeout.\")}\n\tch <- res\n\treturn false \n }\n\n if w.msg_edc11.Len() != (NodeCnt-1) {\n\tlogs.Debug(\"get w.msg_edc11 fail.\")\n\tres := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:get all msg_edc11 fail\",Err:fmt.Errorf(\"get all ed c11 fail.\")}\n\tch <- res\n\treturn false\n }\n var cpks = make(map[string][32]byte)\n for _,id := range ids {\n\tenodes := GetEnodesByUid(id,cointype,GroupId)\n\tif IsCurNode(enodes,cur_enode) {\n\t cpks[cur_enode] = CPk \n\t continue\n\t}\n\n\ten := strings.Split(string(enodes[8:]),\"@\")\n\t\n\titer := w.msg_edc11.Front()\n\tfor iter != nil {\n\t data := iter.Value.(string)\n\t m := strings.Split(data,common.Sep)\n\t ps := strings.Split(m[0],\"-\")\n\t if strings.EqualFold(ps[1],en[0]) {\n\t\tvar t [32]byte\n\t\tva := []byte(m[2])\n\t\tcopy(t[:], va[:32])\n\t\tcpks[en[0]] = t\n\t\tbreak\n\t }\n\t iter = iter.Next()\n\t}\n }\n\n s0 = \"EDZK\"\n s1 = string(zkPk[:])\n ss = enode + common.Sep + s0 + common.Sep + s1\n logs.Debug(\"================kg ed round one,send msg,code is EDZK==================\")\n SendMsgToDcrmGroup(ss,GroupId)\n \n _,tip,cherr = GetChannelValue(ch_t,w.bedzk)\n if cherr != nil {\n\tlogs.Debug(\"get w.bedzk timeout.\")\n\tres := RpcDcrmRes{Ret:\"\",Tip:tip,Err:fmt.Errorf(\"get ed zk timeout.\")}\n\tch <- res\n\treturn false \n }\n\n if w.msg_edzk.Len() != (NodeCnt-1) {\n\tlogs.Debug(\"get w.msg_edzk fail.\")\n\tres := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:get w.msg_edzk fail\",Err:fmt.Errorf(\"get all ed zk fail.\")}\n\tch <- res\n\treturn false\n }\n\n var zks = make(map[string][64]byte)\n for _,id := range ids {\n\tenodes := GetEnodesByUid(id,cointype,GroupId)\n\tif IsCurNode(enodes,cur_enode) {\n\t zks[cur_enode] = zkPk\n\t continue\n\t}\n\n\ten := strings.Split(string(enodes[8:]),\"@\")\n\t\n\titer := w.msg_edzk.Front()\n\tfor iter != nil {\n\t data := iter.Value.(string)\n\t m := strings.Split(data,common.Sep)\n\t ps := strings.Split(m[0],\"-\")\n\t if strings.EqualFold(ps[1],en[0]) {\n\t\tvar t [64]byte\n\t\tva := []byte(m[2])\n\t\tcopy(t[:], va[:64])\n\t\tzks[en[0]] = t\n\t\tbreak\n\t }\n\t iter = iter.Next()\n\t}\n }\n\n s0 = \"EDD11\"\n s1 = string(DPk[:])\n ss = enode + common.Sep + s0 + common.Sep + s1\n logs.Debug(\"================kg ed round one,send msg,code is EDD11==================\")\n SendMsgToDcrmGroup(ss,GroupId)\n \n _,tip,cherr = GetChannelValue(ch_t,w.bedd11)\n if cherr != nil {\n\tlogs.Debug(\"get w.bedd11 timeout.\")\n\tres := RpcDcrmRes{Ret:\"\",Tip:tip,Err:fmt.Errorf(\"get ed d11 timeout.\")}\n\tch <- res\n\treturn false \n }\n\n if w.msg_edd11.Len() != (NodeCnt-1) {\n\tlogs.Debug(\"get w.msg_edd11 fail.\")\n\tres := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:get msg_edd11 fail\",Err:fmt.Errorf(\"get all ed d11 fail.\")}\n\tch <- res\n\treturn false\n }\n var dpks = make(map[string][64]byte)\n for _,id := range ids {\n\tenodes := GetEnodesByUid(id,cointype,GroupId)\n\tif IsCurNode(enodes,cur_enode) {\n\t dpks[cur_enode] = DPk\n\t continue\n\t}\n\n\ten := strings.Split(string(enodes[8:]),\"@\")\n\t\n\titer := w.msg_edd11.Front()\n\tfor iter != nil {\n\t data := iter.Value.(string)\n\t m := strings.Split(data,common.Sep)\n\t ps := strings.Split(m[0],\"-\")\n\t if strings.EqualFold(ps[1],en[0]) {\n\t\tvar t [64]byte\n\t\tva := []byte(m[2])\n\t\tcopy(t[:], va[:64])\n\t\tdpks[en[0]] = t\n\t\tbreak\n\t }\n\t iter = iter.Next()\n\t}\n }\n\n //1.4\n //fixid := []string{\"36550725515126069209815254769857063254012795400127087205878074620099758462980\",\"86773132036836319561089192108022254523765345393585629030875522375234841566222\",\"80065533669343563706948463591465947300529465448793304408098904839998265250318\"}\n var uids = make(map[string][32]byte)\n for _,id := range ids {\n\tenodes := GetEnodesByUid(id,cointype,GroupId)\n\ten := strings.Split(string(enodes[8:]),\"@\")\n\t//num,_ := new(big.Int).SetString(fixid[k],10)\n\tvar t [32]byte\n\t//copy(t[:], num.Bytes())\n\tcopy(t[:], id.Bytes())\n\tif len(id.Bytes()) < 32 {\n\t l := len(id.Bytes())\n\t for j:= l;j<32;j++ {\n\t\tt[j] = byte(0x00)\n\t }\n\t}\n\tuids[en[0]] = t\n }\n\n for _,id := range ids {\n\tenodes := GetEnodesByUid(id,cointype,GroupId)\n\ten := strings.Split(string(enodes[8:]),\"@\")\n\tCPkFlag := ed.Verify(cpks[en[0]],dpks[en[0]])\n\tif !CPkFlag {\n\t fmt.Println(\"Error: Commitment(PK) Not Pass at User: %s\",en[0])\n\t res := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:commitment check fail in req ed pubkey\",Err:fmt.Errorf(\"Commitment(PK) Not Pass at User.\")}\n\t ch <- res\n\t return false\n\t}\n }\n\n for _,id := range ids {\n\tenodes := GetEnodesByUid(id,cointype,GroupId)\n\ten := strings.Split(string(enodes[8:]),\"@\")\n\tdpk := dpks[en[0]]\n\tvar t [32]byte\n\tcopy(t[:], dpk[32:])\n\tzkPkFlag := ed.Verify_zk(zks[en[0]], t)\n\tif !zkPkFlag {\n\t\tfmt.Println(\"Error: ZeroKnowledge Proof (Pk) Not Pass at User: %s\", en[0])\n\t\tres := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:zeroknowledge check fail\",Err:fmt.Errorf(\"ZeroKnowledge Proof (Pk) Not Pass.\")}\n\t\tch <- res\n\t\treturn false\n\t}\n }\n\n // 2.5 calculate a = SHA256(PkU1, {PkU2, PkU3})\n var a [32]byte\n var aDigest [64]byte\n var PkSet []byte\n\n for _,id := range ids {\n\tenodes := GetEnodesByUid(id,cointype,GroupId)\n\ten := strings.Split(string(enodes[8:]),\"@\")\n\tdpk := dpks[en[0]]\n\tPkSet = append(PkSet[:], (dpk[32:])...)\n }\n h := sha512.New()\n dpk := dpks[cur_enode]\n h.Write(dpk[32:])\n h.Write(PkSet)\n h.Sum(aDigest[:0])\n ed.ScReduce(&a, &aDigest)\n\n // 2.6 calculate ask\n var ask [32]byte\n var temSk2 [32]byte\n copy(temSk2[:], sk[:32])\n ed.ScMul(&ask, &a, &temSk2)\n \n // 2.7 calculate vss\n /*var inputid [][32]byte\n for _,id := range ids {\n\tenodes := GetEnodesByUid(id,cointype)\n\ten := strings.Split(string(enodes[8:]),\"@\")\n\tid := []byte(uids[en[0]])\n\tinputid = append(inputid,id[:])\n }*/\n\n _, cfsBBytes, shares := ed.Vss2(ask,ThresHold, NodeCnt,uids)\n\n for _,id := range ids {\n\tenodes := GetEnodesByUid(id,cointype,GroupId)\n\n\tif enodes == \"\" {\n\t logs.Debug(\"=========KeyGenerate_ed,don't find proper enodes========\")\n\t res := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:get enode by uid fail\",Err:GetRetErr(ErrGetEnodeByUIdFail)}\n\t ch <- res\n\t return false\n\t}\n\t\n\tif IsCurNode(enodes,cur_enode) {\n\t continue\n\t}\n\n\ten := strings.Split(string(enodes[8:]),\"@\")\n\tfor k,v := range shares {\n\t if strings.EqualFold(k,en[0]) {\n\t\ts0 := \"EDSHARE1\"\n\t\ts1 := string(v[:])\n\t\tss := enode + common.Sep + s0 + common.Sep + s1\n\t\tlogs.Debug(\"================kg ed round two,send msg,code is EDSHARE1==================\")\n\t\tSendMsgToPeer(enodes,ss)\n\t\tbreak\n\t }\n\t}\n }\n\n _,tip,cherr = GetChannelValue(ch_t,w.bedshare1)\n if cherr != nil {\n\tlogs.Debug(\"get w.bedshare1 timeout in keygenerate.\")\n\tres := RpcDcrmRes{Ret:\"\",Tip:tip,Err:fmt.Errorf(\"get ed share1 fail.\")}\n\tch <- res\n\treturn false \n }\n logs.Debug(\"================kg ed round two,receiv msg,code is EDSHARE1.==================\")\n\n if w.msg_edshare1.Len() != (NodeCnt-1) {\n\tlogs.Debug(\"get w.msg_edshare1 fail.\")\n\tres := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:get all msg_edshare1 fail\",Err:fmt.Errorf(\"get all ed share1 fail.\")}\n\tch <- res\n\treturn false\n }\n\n var edshares = make(map[string][32]byte)\n for _,id := range ids {\n\tenodes := GetEnodesByUid(id,cointype,GroupId)\n\tif IsCurNode(enodes,cur_enode) {\n\t edshares[cur_enode] = shares[cur_enode]\n\t continue\n\t}\n\n\ten := strings.Split(string(enodes[8:]),\"@\")\n\t\n\titer := w.msg_edshare1.Front()\n\tfor iter != nil {\n\t data := iter.Value.(string)\n\t m := strings.Split(data,common.Sep)\n\t ps := strings.Split(m[0],\"-\")\n\t if strings.EqualFold(ps[1],en[0]) {\n\t\tvar t [32]byte\n\t\tva := []byte(m[2]) \n\t\tcopy(t[:], va[:32])\n\t\tedshares[en[0]] = t\n\t\tbreak\n\t }\n\t iter = iter.Next()\n\t}\n }\n\n s0 = \"EDCFSB\"\n ss = enode + common.Sep + s0 + common.Sep\n for _,v := range cfsBBytes {\n\tvv := string(v[:])\n\tss = ss + vv + common.Sep\n }\n ss = ss + \"NULL\"\n\n logs.Debug(\"================kg ed round two,send msg,code is EDCFSB==================\")\n SendMsgToDcrmGroup(ss,GroupId)\n\n _,tip,cherr = GetChannelValue(ch_t,w.bedcfsb)\n if cherr != nil {\n\tlogs.Debug(\"get w.bedcfsb timeout.\")\n\tres := RpcDcrmRes{Ret:\"\",Tip:tip,Err:fmt.Errorf(\"get ed cfsb timeout.\")}\n\tch <- res\n\treturn false \n }\n\n if w.msg_edcfsb.Len() != (NodeCnt-1) {\n\tlogs.Debug(\"get w.msg_edcfsb fail.\")\n\tres := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:get all msg_edcfsb fail\",Err:fmt.Errorf(\"get all ed cfsb fail.\")}\n\tch <- res\n\treturn false\n }\n var cfsbs = make(map[string][][32]byte)\n for _,id := range ids {\n\tenodes := GetEnodesByUid(id,cointype,GroupId)\n\tif IsCurNode(enodes,cur_enode) {\n\t cfsbs[cur_enode] = cfsBBytes\n\t continue\n\t}\n\n\ten := strings.Split(string(enodes[8:]),\"@\")\n\t\n\titer := w.msg_edcfsb.Front()\n\tfor iter != nil {\n\t data := iter.Value.(string)\n\t m := strings.Split(data,common.Sep)\n\t ps := strings.Split(m[0],\"-\")\n\t if strings.EqualFold(ps[1],en[0]) {\n\t\tmm := m[2:]\n\t\tvar cfs [][32]byte\n\t\tfor _,tmp := range mm {\n\t\t if tmp == \"NULL\" {\n\t\t\tbreak\n\t\t }\n\t\t var t [32]byte\n\t\t va := []byte(tmp)\n\t\t copy(t[:], va[:32])\n\t\t cfs = append(cfs,t)\n\t\t}\n\t\tcfsbs[en[0]] = cfs\n\t\tbreak\n\t }\n\t iter = iter.Next()\n\t}\n }\n\n // 3.1 verify share\n for _,id := range ids {\n\tenodes := GetEnodesByUid(id,cointype,GroupId)\n\ten := strings.Split(string(enodes[8:]),\"@\")\n\t\n\tshareUFlag := ed.Verify_vss(edshares[en[0]],uids[cur_enode],cfsbs[en[0]])\n\n\tif !shareUFlag {\n\t\tfmt.Println(\"Error: VSS Share Verification Not Pass at User: %s\",en[0])\n\t\tres := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:VSS Share verification fail\",Err:fmt.Errorf(\"VSS Share Verification Not Pass.\")}\n\t\tch <- res\n\t\treturn false\n\t}\n }\n\n // 3.2 verify share2\n var a2 [32]byte\n var aDigest2 [64]byte\n\n var PkSet2 []byte\n for _,id := range ids {\n\tenodes := GetEnodesByUid(id,cointype,GroupId)\n\ten := strings.Split(string(enodes[8:]),\"@\")\n\tvar temPk [32]byte\n\tt := dpks[en[0]]\n\tcopy(temPk[:], t[32:])\n\tPkSet2 = append(PkSet2[:], (temPk[:])...)\n }\n \n h = sha512.New()\n for _,id := range ids {\n\tenodes := GetEnodesByUid(id,cointype,GroupId)\n\ten := strings.Split(string(enodes[8:]),\"@\")\n\tvar temPk [32]byte\n\tt := dpks[en[0]]\n\tcopy(temPk[:], t[32:])\n\n\th.Reset()\n\th.Write(temPk[:])\n\th.Write(PkSet2)\n\th.Sum(aDigest2[:0])\n\ted.ScReduce(&a2, &aDigest2)\n\n\tvar askB, A ed.ExtendedGroupElement\n\tA.FromBytes(&temPk)\n\ted.GeScalarMult(&askB, &a2, &A)\n\n\tvar askBBytes [32]byte\n\taskB.ToBytes(&askBBytes)\n\n\tt2 := cfsbs[en[0]]\n\ttt := t2[0]\n\tif !bytes.Equal(askBBytes[:], tt[:]) {\n\t\tfmt.Println(\"Error: VSS Coefficient Verification Not Pass at User: %s\",en[0])\n\t\tres := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:VSS Coefficient verification fail\",Err:fmt.Errorf(\"VSS Coefficient Verification Not Pass.\")}\n\t\tch <- res\n\t\treturn false\n\t}\n }\n\n // 3.3 calculate tSk\n var tSk [32]byte\n for _,id := range ids {\n\tenodes := GetEnodesByUid(id,cointype,GroupId)\n\ten := strings.Split(string(enodes[8:]),\"@\")\n\tt := edshares[en[0]]\n\ted.ScAdd(&tSk, &tSk, &t)\n }\n\n // 3.4 calculate pk\n var finalPk ed.ExtendedGroupElement\n var finalPkBytes [32]byte\n\n i := 0\n for _,id := range ids {\n\tenodes := GetEnodesByUid(id,cointype,GroupId)\n\ten := strings.Split(string(enodes[8:]),\"@\")\n\tvar temPk [32]byte\n\tt := dpks[en[0]]\n\tcopy(temPk[:], t[32:])\n\n\th.Reset()\n\th.Write(temPk[:])\n\th.Write(PkSet2)\n\th.Sum(aDigest2[:0])\n\ted.ScReduce(&a2, &aDigest2)\n\n\tvar askB, A ed.ExtendedGroupElement\n\tA.FromBytes(&temPk)\n\ted.GeScalarMult(&askB, &a2, &A)\n\n\tif i == 0 {\n\t\tfinalPk = askB\n\t} else {\n\t\ted.GeAdd(&finalPk, &finalPk, &askB)\n\t}\n\n\ti++\n }\n \n finalPk.ToBytes(&finalPkBytes)\n\n //save the local db\n //sk:pk:tsk:pkfinal\n save := string(sk[:]) + common.Sep11 + string(pk[:]) + common.Sep11 + string(tSk[:]) + common.Sep11 + string(finalPkBytes[:])\n \n w.edsave.PushBack(save)\n w.edpk.PushBack(string(finalPkBytes[:]))\n\n return true\n}", "func KeyGenerate_ec2(msgprex string,ch chan interface{},id int,cointype string) bool {\n if id < 0 || id >= RpcMaxWorker || id >= len(workers) {\n\tres := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:get worker id fail\",Err:GetRetErr(ErrGetWorkerIdError)}\n\tch <- res\n\treturn false\n }\n\n w := workers[id]\n GroupId := w.groupid \n fmt.Println(\"========KeyGenerate_ec2============\",\"GroupId\",GroupId)\n if GroupId == \"\" {\n\tres := RpcDcrmRes{Ret:\"\",Tip:\"get group id fail in req ec2 pubkey\",Err:fmt.Errorf(\"get group id fail.\")}\n\tch <- res\n\treturn false\n }\n \n ns,_ := GetGroup(GroupId)\n if ns != NodeCnt {\n\tres := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:the group is not ready\",Err:GetRetErr(ErrGroupNotReady)}\n\tch <- res\n\treturn false \n }\n\n //1. generate their own \"partial\" private key secretly\n u1 := GetRandomIntFromZn(secp256k1.S256().N)\n\n // 2. calculate \"partial\" public key, make \"pritial\" public key commiment to get (C,D)\n u1Gx, u1Gy := secp256k1.S256().ScalarBaseMult(u1.Bytes())\n commitU1G := new(ec2.Commitment).Commit(u1Gx, u1Gy)\n\n // 3. generate their own paillier public key and private key\n u1PaillierPk, u1PaillierSk := ec2.GenerateKeyPair(PaillierKeyLength)\n\n // 4. Broadcast\n // commitU1G.C, commitU2G.C, commitU3G.C, commitU4G.C, commitU5G.C\n // u1PaillierPk, u2PaillierPk, u3PaillierPk, u4PaillierPk, u5PaillierPk\n mp := []string{msgprex,cur_enode}\n enode := strings.Join(mp,\"-\")\n s0 := \"C1\"\n s1 := string(commitU1G.C.Bytes())\n s2 := u1PaillierPk.Length\n s3 := string(u1PaillierPk.N.Bytes()) \n s4 := string(u1PaillierPk.G.Bytes()) \n s5 := string(u1PaillierPk.N2.Bytes()) \n ss := enode + Sep + s0 + Sep + s1 + Sep + s2 + Sep + s3 + Sep + s4 + Sep + s5\n SendMsgToDcrmGroup(ss,GroupId)\n\n // 1. Receive Broadcast\n // commitU1G.C, commitU2G.C, commitU3G.C, commitU4G.C, commitU5G.C\n // u1PaillierPk, u2PaillierPk, u3PaillierPk, u4PaillierPk, u5PaillierPk\n _,tip,cherr := GetChannelValue(ch_t,w.bc1)\n if cherr != nil {\n\tres := RpcDcrmRes{Ret:\"\",Tip:tip,Err:GetRetErr(ErrGetC1Timeout)}\n\tch <- res\n\treturn false \n }\n\n // 2. generate their vss to get shares which is a set\n // [notes]\n // all nodes has their own id, in practival, we can take it as double hash of public key of fusion\n\n ids := GetIds(cointype,GroupId)\n\n u1PolyG, _, u1Shares, err := ec2.Vss(u1, ids, ThresHold, NodeCnt)\n if err != nil {\n\tres := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:generate vss fail\",Err:err}\n\tch <- res\n\treturn false \n }\n\n // 3. send the the proper share to proper node \n //example for u1:\n // Send u1Shares[0] to u1\n // Send u1Shares[1] to u2\n // Send u1Shares[2] to u3\n // Send u1Shares[3] to u4\n // Send u1Shares[4] to u5\n for _,id := range ids {\n\tenodes := GetEnodesByUid(id,cointype,GroupId)\n\n\tif enodes == \"\" {\n\t res := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:get enode by uid fail\",Err:GetRetErr(ErrGetEnodeByUIdFail)}\n\t ch <- res\n\t return false\n\t}\n\t\n\tif IsCurNode(enodes,cur_enode) {\n\t continue\n\t}\n\n\tfor _,v := range u1Shares {\n\t uid := ec2.GetSharesId(v)\n\t if uid.Cmp(id) == 0 {\n\t\tmp := []string{msgprex,cur_enode}\n\t\tenode := strings.Join(mp,\"-\")\n\t\ts0 := \"SHARE1\"\n\t\ts1 := strconv.Itoa(v.T) \n\t\ts2 := string(v.Id.Bytes()) \n\t\ts3 := string(v.Share.Bytes()) \n\t\tss := enode + Sep + s0 + Sep + s1 + Sep + s2 + Sep + s3\n\t\tSendMsgToPeer(enodes,ss)\n\t\tbreak\n\t }\n\t}\n }\n\n // 4. Broadcast\n // commitU1G.D, commitU2G.D, commitU3G.D, commitU4G.D, commitU5G.D\n // u1PolyG, u2PolyG, u3PolyG, u4PolyG, u5PolyG\n mp = []string{msgprex,cur_enode}\n enode = strings.Join(mp,\"-\")\n s0 = \"D1\"\n dlen := len(commitU1G.D)\n s1 = strconv.Itoa(dlen)\n\n ss = enode + Sep + s0 + Sep + s1 + Sep\n for _,d := range commitU1G.D {\n\tss += string(d.Bytes())\n\tss += Sep\n }\n\n s2 = strconv.Itoa(u1PolyG.T)\n s3 = strconv.Itoa(u1PolyG.N)\n ss = ss + s2 + Sep + s3 + Sep\n\n pglen := 2*(len(u1PolyG.PolyG))\n s4 = strconv.Itoa(pglen)\n\n ss = ss + s4 + Sep\n\n for _,p := range u1PolyG.PolyG {\n\tfor _,d := range p {\n\t ss += string(d.Bytes())\n\t ss += Sep\n\t}\n }\n ss = ss + \"NULL\"\n SendMsgToDcrmGroup(ss,GroupId)\n\n // 1. Receive Broadcast\n // commitU1G.D, commitU2G.D, commitU3G.D, commitU4G.D, commitU5G.D\n // u1PolyG, u2PolyG, u3PolyG, u4PolyG, u5PolyG\n _,tip,cherr = GetChannelValue(ch_t,w.bd1_1)\n if cherr != nil {\n\tres := RpcDcrmRes{Ret:\"\",Tip:tip,Err:GetRetErr(ErrGetD1Timeout)}\n\tch <- res\n\treturn false \n }\n\n // 2. Receive Personal Data\n _,tip,cherr = GetChannelValue(ch_t,w.bshare1)\n if cherr != nil {\n\tres := RpcDcrmRes{Ret:\"\",Tip:tip,Err:GetRetErr(ErrGetSHARE1Timeout)}\n\tch <- res\n\treturn false \n }\n\t \n shares := make([]string,NodeCnt-1)\n if w.msg_share1.Len() != (NodeCnt-1) {\n\tres := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:get all msg_share1 fail\",Err:GetRetErr(ErrGetAllSHARE1Fail)}\n\tch <- res\n\treturn false\n }\n itmp := 0\n iter := w.msg_share1.Front()\n for iter != nil {\n\tmdss := iter.Value.(string)\n\tshares[itmp] = mdss \n\titer = iter.Next()\n\titmp++\n }\n \n //var sstruct = make(map[string]*vss.ShareStruct)\n var sstruct = make(map[string]*ec2.ShareStruct)\n for _,v := range shares {\n\tmm := strings.Split(v, Sep)\n\t//bug\n\tif len(mm) < 5 {\n\t fmt.Println(\"===================!!! KeyGenerate_ec2,fill lib.ShareStruct map error. !!!==================\")\n\t res := RpcDcrmRes{Ret:\"\",Err:fmt.Errorf(\"fill lib.ShareStruct map error.\")}\n\t ch <- res\n\t return false\n\t}\n\t//\n\tt,_ := strconv.Atoi(mm[2])\n\tushare := &ec2.ShareStruct{T:t,Id:new(big.Int).SetBytes([]byte(mm[3])),Share:new(big.Int).SetBytes([]byte(mm[4]))}\n\tprex := mm[0]\n\tprexs := strings.Split(prex,\"-\")\n\tsstruct[prexs[len(prexs)-1]] = ushare\n }\n for _,v := range u1Shares {\n\tuid := ec2.GetSharesId(v)\n\tenodes := GetEnodesByUid(uid,cointype,GroupId)\n\tif IsCurNode(enodes,cur_enode) {\n\t sstruct[cur_enode] = v \n\t break\n\t}\n }\n\n ds := make([]string,NodeCnt-1)\n if w.msg_d1_1.Len() != (NodeCnt-1) {\n\tres := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:get all msg_d1_1 fail\",Err:GetRetErr(ErrGetAllD1Fail)}\n\tch <- res\n\treturn false\n }\n itmp = 0\n iter = w.msg_d1_1.Front()\n for iter != nil {\n\tmdss := iter.Value.(string)\n\tds[itmp] = mdss \n\titer = iter.Next()\n\titmp++\n }\n\n var upg = make(map[string]*ec2.PolyGStruct)\n for _,v := range ds {\n\tmm := strings.Split(v, Sep)\n\tdlen,_ := strconv.Atoi(mm[2])\n\tpglen,_ := strconv.Atoi(mm[3+dlen+2])\n\tpglen = (pglen/2)\n\tvar pgss = make([][]*big.Int, 0)\n\tl := 0\n\tfor j:=0;j<pglen;j++ {\n\t l++\n\t var gg = make([]*big.Int,0)\n\t gg = append(gg,new(big.Int).SetBytes([]byte(mm[5+dlen+l])))\n\t l++\n\t gg = append(gg,new(big.Int).SetBytes([]byte(mm[5+dlen+l])))\n\t pgss = append(pgss,gg)\n\t}\n\n\tt,_ := strconv.Atoi(mm[3+dlen])\n\tn,_ := strconv.Atoi(mm[4+dlen])\n\tps := &ec2.PolyGStruct{T:t,N:n,PolyG:pgss}\n\tprex := mm[0]\n\tprexs := strings.Split(prex,\"-\")\n\tupg[prexs[len(prexs)-1]] = ps\n }\n upg[cur_enode] = u1PolyG\n\n // 3. verify the share\n for _,id := range ids {\n\tenodes := GetEnodesByUid(id,cointype,GroupId)\n\ten := strings.Split(string(enodes[8:]),\"@\")\n\tif sstruct[en[0]].Verify(upg[en[0]]) == false {\n\t res := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:verification share1 fail\",Err:GetRetErr(ErrVerifySHARE1Fail)}\n\t ch <- res\n\t return false\n\t}\n }\n\n // 4.verify and de-commitment to get uG\n // for all nodes, construct the commitment by the receiving C and D\n cs := make([]string,NodeCnt-1)\n if w.msg_c1.Len() != (NodeCnt-1) {\n\tres := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:get all msg_c1 fail\",Err:GetRetErr(ErrGetAllC1Fail)}\n\tch <- res\n\treturn false\n }\n itmp = 0\n iter = w.msg_c1.Front()\n for iter != nil {\n\tmdss := iter.Value.(string)\n\tcs[itmp] = mdss \n\titer = iter.Next()\n\titmp++\n }\n\n var udecom = make(map[string]*ec2.Commitment)\n for _,v := range cs {\n\tmm := strings.Split(v, Sep)\n\tprex := mm[0]\n\tprexs := strings.Split(prex,\"-\")\n\tfor _,vv := range ds {\n\t mmm := strings.Split(vv, Sep)\n\t prex2 := mmm[0]\n\t prexs2 := strings.Split(prex2,\"-\")\n\t if prexs[len(prexs)-1] == prexs2[len(prexs2)-1] {\n\t\tdlen,_ := strconv.Atoi(mmm[2])\n\t\tvar gg = make([]*big.Int,0)\n\t\tl := 0\n\t\tfor j:=0;j<dlen;j++ {\n\t\t l++\n\t\t gg = append(gg,new(big.Int).SetBytes([]byte(mmm[2+l])))\n\t\t}\n\t\tdeCommit := &ec2.Commitment{C:new(big.Int).SetBytes([]byte(mm[2])), D:gg}\n\t\tudecom[prexs[len(prexs)-1]] = deCommit\n\t\tbreak\n\t }\n\t}\n }\n deCommit_commitU1G := &ec2.Commitment{C: commitU1G.C, D: commitU1G.D}\n udecom[cur_enode] = deCommit_commitU1G\n\n // for all nodes, verify the commitment\n for _,id := range ids {\n\tenodes := GetEnodesByUid(id,cointype,GroupId)\n\ten := strings.Split(string(enodes[8:]),\"@\")\n\tif udecom[en[0]].Verify() == false {\n\t res := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:verification commitment fail\",Err:GetRetErr(ErrKeyGenVerifyCommitFail)}\n\t ch <- res\n\t return false\n\t}\n }\n\n // for all nodes, de-commitment\n var ug = make(map[string][]*big.Int)\n for _,id := range ids {\n\tenodes := GetEnodesByUid(id,cointype,GroupId)\n\ten := strings.Split(string(enodes[8:]),\"@\")\n\t_, u1G := udecom[en[0]].DeCommit()\n\tug[en[0]] = u1G\n }\n\n // for all nodes, calculate the public key\n var pkx *big.Int\n var pky *big.Int\n for _,id := range ids {\n\tenodes := GetEnodesByUid(id,cointype,GroupId)\n\ten := strings.Split(string(enodes[8:]),\"@\")\n\tpkx = (ug[en[0]])[0]\n\tpky = (ug[en[0]])[1]\n\tbreak\n }\n\n for k,id := range ids {\n\tif k == 0 {\n\t continue\n\t}\n\n\tenodes := GetEnodesByUid(id,cointype,GroupId)\n\ten := strings.Split(string(enodes[8:]),\"@\")\n\tpkx, pky = secp256k1.S256().Add(pkx, pky, (ug[en[0]])[0],(ug[en[0]])[1])\n }\n w.pkx.PushBack(string(pkx.Bytes()))\n w.pky.PushBack(string(pky.Bytes()))\n\n // 5. calculate the share of private key\n var skU1 *big.Int\n for _,id := range ids {\n\tenodes := GetEnodesByUid(id,cointype,GroupId)\n\ten := strings.Split(string(enodes[8:]),\"@\")\n\tskU1 = sstruct[en[0]].Share\n\tbreak\n }\n\n for k,id := range ids {\n\tif k == 0 {\n\t continue\n\t}\n\n\tenodes := GetEnodesByUid(id,cointype,GroupId)\n\ten := strings.Split(string(enodes[8:]),\"@\")\n\tskU1 = new(big.Int).Add(skU1,sstruct[en[0]].Share)\n }\n skU1 = new(big.Int).Mod(skU1, secp256k1.S256().N)\n\n //save skU1/u1PaillierSk/u1PaillierPk/...\n ss = string(skU1.Bytes())\n ss = ss + SepSave\n s1 = u1PaillierSk.Length\n s2 = string(u1PaillierSk.L.Bytes()) \n s3 = string(u1PaillierSk.U.Bytes())\n ss = ss + s1 + SepSave + s2 + SepSave + s3 + SepSave\n\n for _,id := range ids {\n\tenodes := GetEnodesByUid(id,cointype,GroupId)\n\ten := strings.Split(string(enodes[8:]),\"@\")\n\tif IsCurNode(enodes,cur_enode) {\n\t s1 = u1PaillierPk.Length\n\t s2 = string(u1PaillierPk.N.Bytes()) \n\t s3 = string(u1PaillierPk.G.Bytes()) \n\t s4 = string(u1PaillierPk.N2.Bytes()) \n\t ss = ss + s1 + SepSave + s2 + SepSave + s3 + SepSave + s4 + SepSave\n\t continue\n\t}\n\tfor _,v := range cs {\n\t mm := strings.Split(v, Sep)\n\t prex := mm[0]\n\t prexs := strings.Split(prex,\"-\")\n\t if prexs[len(prexs)-1] == en[0] {\n\t\ts1 = mm[3] \n\t\ts2 = mm[4] \n\t\ts3 = mm[5] \n\t\ts4 = mm[6] \n\t\tss = ss + s1 + SepSave + s2 + SepSave + s3 + SepSave + s4 + SepSave\n\t\tbreak\n\t }\n\t}\n }\n\n sstmp := ss //////\n tmp := ss\n\n ss = ss + \"NULL\"\n\n // 6. calculate the zk\n // ## add content: zk of paillier key, zk of u\n \n // zk of paillier key\n u1zkFactProof := u1PaillierSk.ZkFactProve()\n // zk of u\n //u1zkUProof := schnorrZK.ZkUProve(u1)\n u1zkUProof := ec2.ZkUProve(u1)\n\n // 7. Broadcast zk\n // u1zkFactProof, u2zkFactProof, u3zkFactProof, u4zkFactProof, u5zkFactProof\n mp = []string{msgprex,cur_enode}\n enode = strings.Join(mp,\"-\")\n s0 = \"ZKFACTPROOF\"\n s1 = string(u1zkFactProof.H1.Bytes())\n s2 = string(u1zkFactProof.H2.Bytes())\n s3 = string(u1zkFactProof.Y.Bytes())\n s4 = string(u1zkFactProof.E.Bytes())\n s5 = string(u1zkFactProof.N.Bytes())\n ss = enode + Sep + s0 + Sep + s1 + Sep + s2 + Sep + s3 + Sep + s4 + Sep + s5\n SendMsgToDcrmGroup(ss,GroupId)\n\n // 1. Receive Broadcast zk\n // u1zkFactProof, u2zkFactProof, u3zkFactProof, u4zkFactProof, u5zkFactProof\n _,tip,cherr = GetChannelValue(ch_t,w.bzkfact)\n if cherr != nil {\n//\tlogs.Debug(\"get w.bzkfact timeout in keygenerate.\")\n\tres := RpcDcrmRes{Ret:\"\",Tip:tip,Err:GetRetErr(ErrGetZKFACTPROOFTimeout)}\n\tch <- res\n\treturn false \n }\n\n sstmp2 := s1 + SepSave + s2 + SepSave + s3 + SepSave + s4 + SepSave + s5\n\n // 8. Broadcast zk\n // u1zkUProof, u2zkUProof, u3zkUProof, u4zkUProof, u5zkUProof\n mp = []string{msgprex,cur_enode}\n enode = strings.Join(mp,\"-\")\n s0 = \"ZKUPROOF\"\n s1 = string(u1zkUProof.E.Bytes())\n s2 = string(u1zkUProof.S.Bytes())\n ss = enode + Sep + s0 + Sep + s1 + Sep + s2\n SendMsgToDcrmGroup(ss,GroupId)\n\n // 9. Receive Broadcast zk\n // u1zkUProof, u2zkUProof, u3zkUProof, u4zkUProof, u5zkUProof\n _,tip,cherr = GetChannelValue(ch_t,w.bzku)\n if cherr != nil {\n//\tlogs.Info(\"get w.bzku timeout in keygenerate.\")\n\tres := RpcDcrmRes{Ret:\"\",Tip:tip,Err:GetRetErr(ErrGetZKUPROOFTimeout)}\n\tch <- res\n\treturn false \n }\n \n // 1. verify the zk\n // ## add content: verify zk of paillier key, zk of u\n\t\n // for all nodes, verify zk of paillier key\n zkfacts := make([]string,NodeCnt-1)\n if w.msg_zkfact.Len() != (NodeCnt-1) {\n\tres := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:get msg_zkface fail\",Err:GetRetErr(ErrGetAllZKFACTPROOFFail)}\n\tch <- res\n\treturn false\n }\n itmp = 0\n iter = w.msg_zkfact.Front()\n for iter != nil {\n\tmdss := iter.Value.(string)\n\tzkfacts[itmp] = mdss \n\titer = iter.Next()\n\titmp++\n }\n\n for k,id := range ids {\n\tenodes := GetEnodesByUid(id,cointype,GroupId)\n\ten := strings.Split(string(enodes[8:]),\"@\")\n\tif IsCurNode(enodes,cur_enode) { /////bug for save zkfact\n\t sstmp = sstmp + sstmp2 + SepSave\n\t continue\n\t}\n\n\tu1PaillierPk2 := GetPaillierPk(tmp,k)\n\tfor _,v := range zkfacts {\n\t mm := strings.Split(v, Sep)\n\t prex := mm[0]\n\t prexs := strings.Split(prex,\"-\")\n\t if prexs[len(prexs)-1] == en[0] {\n\t\th1 := new(big.Int).SetBytes([]byte(mm[2]))\n\t\th2 := new(big.Int).SetBytes([]byte(mm[3]))\n\t\ty := new(big.Int).SetBytes([]byte(mm[4]))\n\t\te := new(big.Int).SetBytes([]byte(mm[5]))\n\t\tn := new(big.Int).SetBytes([]byte(mm[6]))\n\t\tzkFactProof := &ec2.ZkFactProof{H1: h1, H2: h2, Y: y, E: e,N:n}\n\t\t///////\n\t\tsstmp = sstmp + mm[2] + SepSave + mm[3] + SepSave + mm[4] + SepSave + mm[5] + SepSave + mm[6] + SepSave ///for save zkfact\n\t\t//////\n\n\t\tif !u1PaillierPk2.ZkFactVerify(zkFactProof) {\n\t\t res := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:zkfact verification fail\",Err:GetRetErr(ErrVerifyZKFACTPROOFFail)}\n\t\t ch <- res\n\t \n\t\t return false \n\t\t}\n\n\t\tbreak\n\t }\n\t}\n }\n\n fmt.Println(\"========AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA KeyGenerate_ec2, AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA ============\",\"GroupId\",GroupId)\n\n // for all nodes, verify zk of u\n zku := make([]string,NodeCnt-1)\n if w.msg_zku.Len() != (NodeCnt-1) {\n\tres := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:get all msg_zku fail\",Err:GetRetErr(ErrGetAllZKUPROOFFail)}\n\tch <- res\n\treturn false\n }\n itmp = 0\n iter = w.msg_zku.Front()\n for iter != nil {\n\tmdss := iter.Value.(string)\n\tzku[itmp] = mdss \n\titer = iter.Next()\n\titmp++\n }\n\n for _,id := range ids {\n\tenodes := GetEnodesByUid(id,cointype,GroupId)\n\ten := strings.Split(string(enodes[8:]),\"@\")\n\tfor _,v := range zku {\n\t mm := strings.Split(v, Sep)\n\t prex := mm[0]\n\t prexs := strings.Split(prex,\"-\")\n\t if prexs[len(prexs)-1] == en[0] {\n\t\te := new(big.Int).SetBytes([]byte(mm[2]))\n\t\ts := new(big.Int).SetBytes([]byte(mm[3]))\n\t\tzkUProof := &ec2.ZkUProof{E: e, S: s}\n\t\tif !ec2.ZkUVerify(ug[en[0]],zkUProof) {\n\t\t res := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:zkuproof verification fail\",Err:GetRetErr(ErrVerifyZKUPROOFFail)}\n\t\t ch <- res\n\t\t return false \n\t\t}\n\n\t\tbreak\n\t }\n\t}\n } \n \n sstmp = sstmp + \"NULL\"\n //w.save <- sstmp\n //w.save: sku1:UiSK:U1PK:U2PK:U3PK:....:UnPK:U1H1:U1H2:U1Y:U1E:U1N:U2H1:U2H2:U2Y:U2E:U2N:U3H1:U3H2:U3Y:U3E:U3N:......:NULL\n w.save.PushBack(sstmp)\n return true\n}", "func decryptKey(key string, code Code) Code {\n\n var let string\n var num []int\n\n for i := 0; i < len(code.letters); i++ {\n\n pos := strings.Index(key, string(code.letters[i]))\n let = let + string(ALPHABET[pos])\n num = append(num ,int(code.numbers[i]))\n\n }\n\n return Code {\n letters : let,\n numbers : num,\n }\n\n}", "func RSAEncryptText(target string, text string) string {\n\ttextBytes := []byte(text)\n\thash := utils.HASH_ALGO.New()\n\tpubKeyBytes, e := hex.DecodeString(target)\n\tutils.HandleError(e)\n\tpubKey, e := x509.ParsePKCS1PublicKey(pubKeyBytes)\n\tutils.HandleError(e)\n\tctext := \"\"\n\n\tchunkSize, e := utils.GetMaxEncodedChunkLength(pubKey)\n\tutils.HandleError(e)\n\n\tfor i := 0; i < len(textBytes); {\n\t\tj := int(math.Min(float64(len(textBytes)), float64(i+chunkSize)))\n\t\tel, e := rsa.EncryptOAEP(hash, rand.Reader, pubKey, textBytes[i:j], nil)\n\t\tutils.HandleError(e)\n\t\tctext += string(el)\n\t\ti = j\n\t}\n\treturn ctext\n}", "func (c *cipher256) Decrypt(dst, src []byte) {\n\t// Load the ciphertext\n\tct := new([numWords256]uint64)\n\tct[0] = loadWord(src[0:8])\n\tct[1] = loadWord(src[8:16])\n\tct[2] = loadWord(src[16:24])\n\tct[3] = loadWord(src[24:32])\n\n\t// Subtract the final round key\n\tct[0] -= c.ks[numRounds256/4][0]\n\tct[1] -= c.ks[numRounds256/4][1]\n\tct[2] -= c.ks[numRounds256/4][2]\n\tct[3] -= c.ks[numRounds256/4][3]\n\n\t// Perform decryption rounds\n\tfor d := numRounds256 - 1; d >= 0; d -= 8 {\n\t\t// Four rounds of permute and unmix\n\t\tct[1], ct[3] = ct[3], ct[1]\n\t\tct[3] = ((ct[3] ^ ct[2]) << (64 - 32)) | ((ct[3] ^ ct[2]) >> 32)\n\t\tct[2] -= ct[3]\n\t\tct[1] = ((ct[1] ^ ct[0]) << (64 - 32)) | ((ct[1] ^ ct[0]) >> 32)\n\t\tct[0] -= ct[1]\n\n\t\tct[1], ct[3] = ct[3], ct[1]\n\t\tct[3] = ((ct[3] ^ ct[2]) << (64 - 22)) | ((ct[3] ^ ct[2]) >> 22)\n\t\tct[2] -= ct[3]\n\t\tct[1] = ((ct[1] ^ ct[0]) << (64 - 58)) | ((ct[1] ^ ct[0]) >> 58)\n\t\tct[0] -= ct[1]\n\n\t\tct[1], ct[3] = ct[3], ct[1]\n\t\tct[3] = ((ct[3] ^ ct[2]) << (64 - 12)) | ((ct[3] ^ ct[2]) >> 12)\n\t\tct[2] -= ct[3]\n\t\tct[1] = ((ct[1] ^ ct[0]) << (64 - 46)) | ((ct[1] ^ ct[0]) >> 46)\n\t\tct[0] -= ct[1]\n\n\t\tct[1], ct[3] = ct[3], ct[1]\n\t\tct[3] = ((ct[3] ^ ct[2]) << (64 - 33)) | ((ct[3] ^ ct[2]) >> 33)\n\t\tct[2] -= ct[3]\n\t\tct[1] = ((ct[1] ^ ct[0]) << (64 - 25)) | ((ct[1] ^ ct[0]) >> 25)\n\t\tct[0] -= ct[1]\n\n\t\t// Subtract round key\n\t\tct[0] -= c.ks[d/4][0]\n\t\tct[1] -= c.ks[d/4][1]\n\t\tct[2] -= c.ks[d/4][2]\n\t\tct[3] -= c.ks[d/4][3]\n\n\t\t// Four rounds of permute and unmix\n\t\tct[1], ct[3] = ct[3], ct[1]\n\t\tct[3] = ((ct[3] ^ ct[2]) << (64 - 37)) | ((ct[3] ^ ct[2]) >> 37)\n\t\tct[2] -= ct[3]\n\t\tct[1] = ((ct[1] ^ ct[0]) << (64 - 5)) | ((ct[1] ^ ct[0]) >> 5)\n\t\tct[0] -= ct[1]\n\n\t\tct[1], ct[3] = ct[3], ct[1]\n\t\tct[3] = ((ct[3] ^ ct[2]) << (64 - 40)) | ((ct[3] ^ ct[2]) >> 40)\n\t\tct[2] -= ct[3]\n\t\tct[1] = ((ct[1] ^ ct[0]) << (64 - 23)) | ((ct[1] ^ ct[0]) >> 23)\n\t\tct[0] -= ct[1]\n\n\t\tct[1], ct[3] = ct[3], ct[1]\n\t\tct[3] = ((ct[3] ^ ct[2]) << (64 - 57)) | ((ct[3] ^ ct[2]) >> 57)\n\t\tct[2] -= ct[3]\n\t\tct[1] = ((ct[1] ^ ct[0]) << (64 - 52)) | ((ct[1] ^ ct[0]) >> 52)\n\t\tct[0] -= ct[1]\n\n\t\tct[1], ct[3] = ct[3], ct[1]\n\t\tct[3] = ((ct[3] ^ ct[2]) << (64 - 16)) | ((ct[3] ^ ct[2]) >> 16)\n\t\tct[2] -= ct[3]\n\t\tct[1] = ((ct[1] ^ ct[0]) << (64 - 14)) | ((ct[1] ^ ct[0]) >> 14)\n\t\tct[0] -= ct[1]\n\n\t\t// Subtract round key\n\t\tct[0] -= c.ks[(d/4)-1][0]\n\t\tct[1] -= c.ks[(d/4)-1][1]\n\t\tct[2] -= c.ks[(d/4)-1][2]\n\t\tct[3] -= c.ks[(d/4)-1][3]\n\t}\n\n\t// Store decrypted value in destination\n\tstoreWord(dst[0:8], ct[0])\n\tstoreWord(dst[8:16], ct[1])\n\tstoreWord(dst[16:24], ct[2])\n\tstoreWord(dst[24:32], ct[3])\n}", "func keySavedInSSHAgent(publicKey []byte, identities []*agent.Key) (result bool, index int) {\n\tfor i, identity := range identities {\n\t\t//In my tests, string(publicKey) has 1 extra character added at the end which would make the comparison to identity.String() fail if not removed\n\t\tpublicKeyString := string(publicKey)[:len(string(publicKey))-1]\n\t\tif publicKeyString == identity.String() {\n\t\t\treturn true, i\n\t\t}\n\t}\n\treturn false, -1\n}", "func GenerateWeakKeyPair() ([]byte, []byte, []byte, error) {\n prv, pub, ssh, err := generateKeyPairs(rsaWeakKeySize)\n return pub, prv, ssh, err\n}", "func main() {\n\tpassword := \"I am Groot. I am Groot. I am Groot. I am Groot. I am Groot. I am Groot. I am Groot. I am Groot. \"\n\toriginData := []byte(password)\n\n\t//your can\n\tpubkey := publicKey\n\tprikey := privateKey\n\n\tciphertext, e := core.EncryptRsa(originData, pubkey)\n\tif e != nil {\n\t\tfmt.Println(e)\n\t\treturn\n\t}\n\tdata, e := core.DecryptRsa(ciphertext, prikey)\n\tif e != nil {\n\t\tfmt.Println(e)\n\t\treturn\n\t}\n\n\tfmt.Println(string(data) == string(originData))\n}", "func (pv4 *ProtoV4Local) encrypt(\n\tkey SymmetricKey,\n\tmessage []byte,\n\toptionalFooter []byte,\n\tassertion []byte) (string, error) {\n\n\t// step 1\n\tconst header = headerV4Local\n\n\t// step 2\n\tnonce := make([]byte, nonceLenV4)\n\t_, err := rand.Read(nonce)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"rand.Read problem: %w\", err)\n\t}\n\n\t// this is supplementary and not exposed as a public API (for testing purposes only)\n\t// it is about replacing random bytes with specified in advance value if we called this from test\n\tif pv4.testNonce != nil {\n\t\tnonce = pv4.testNonce\n\t}\n\n\t// step 3\n\tencKey, authKey, nonce2, err := splitV4(key, nonce)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"splitV4 problem: %w\", err)\n\t}\n\n\t// step 4\n\n\tciph, err := chacha20.NewUnauthenticatedCipher(encKey, nonce2)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to create cipher: %w\", err)\n\t}\n\n\tc := make([]byte, len(message))\n\tciph.XORKeyStream(c, message)\n\n\t// step 5\n\tpreAuth := preAuthenticationEncoding([]byte(header), nonce, c, optionalFooter, assertion)\n\n\t// step 6\n\thash, err := blake2b.New(32, authKey)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"blake2b.New hash problem: %w\", err)\n\t}\n\n\tif _, err := hash.Write(preAuth); err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to hash payload: %w\", err)\n\t}\n\n\tt := hash.Sum(nil)\n\n\t// step 7\n\n\toffset := 0\n\tb64Content := make([]byte, len(nonce)+len(c)+len(t))\n\toffset += copy(b64Content[offset:], nonce)\n\toffset += copy(b64Content[offset:], c)\n\tcopy(b64Content[offset:], t)\n\tb64C := b64(b64Content)\n\n\temptyFooter := len(optionalFooter) == 0\n\tvar b64Footer string\n\tif !emptyFooter {\n\t\tb64Footer = b64(optionalFooter)\n\t}\n\n\tvar token string\n\tif emptyFooter {\n\t\ttoken = strings.Join([]string{headerV4Version, headerV4PurposeLocal, b64C}, \".\")\n\t} else {\n\t\ttoken = strings.Join([]string{headerV4Version, headerV4PurposeLocal, b64C, b64Footer}, \".\")\n\t}\n\n\treturn token, nil\n\n}", "func (t *Crypto) Encrypt(msg, aad []byte, kh interface{}) ([]byte, []byte, error) {\n\tkeyHandle, ok := kh.(*keyset.Handle)\n\tif !ok {\n\t\treturn nil, nil, errBadKeyHandleFormat\n\t}\n\n\tps, err := keyHandle.Primitives()\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"get primitives: %w\", err)\n\t}\n\n\ta, err := aead.New(keyHandle)\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"create new aead: %w\", err)\n\t}\n\n\tct, err := a.Encrypt(msg, aad)\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"encrypt msg: %w\", err)\n\t}\n\n\t// Tink appends a key prefix + nonce to ciphertext, let's remove them to get the raw ciphertext\n\tivSize := nonceSize(ps)\n\tprefixLength := len(ps.Primary.Prefix)\n\tcipherText := ct[prefixLength+ivSize:]\n\tnonce := ct[prefixLength : prefixLength+ivSize]\n\n\treturn cipherText, nonce, nil\n}", "func (round *presign1) prepare() error {\n\ti := round.PartyID().Index\n\n\txi := round.key.Xi\n\tks := round.key.Ks\n\tBigXs := round.key.BigXj\n\n\t// adding the key derivation delta to the xi's\n\t// Suppose x has shamir shares x_0, x_1, ..., x_n\n\t// So x + D has shamir shares x_0 + D, x_1 + D, ..., x_n + D\n\tmod := common.ModInt(round.Params().EC().Params().N)\n\txi = mod.Add(round.temp.keyDerivationDelta, xi)\n\tround.key.Xi = xi\n\n\tif round.Threshold()+1 > len(ks) {\n\t\treturn fmt.Errorf(\"t+1=%d is not satisfied by the key count of %d\", round.Threshold()+1, len(ks))\n\t}\n\tif wi, BigWs, err := PrepareForSigning(round.Params().EC(), i, len(ks), xi, ks, BigXs); err != nil {\n\t\treturn err\n\t} else {\n\t\tround.temp.w = wi\n\t\tround.temp.BigWs = BigWs\n\t}\n\n\treturn nil\n}", "func encrypt(data []byte, key []byte) []byte {\r\n\t// generate a new aes cipher using our 32 byte long key\r\n c, err := aes.NewCipher(key)\r\n if err != nil {\r\n fmt.Println(\"aes.NewCipher failed:\", err)\r\n\t}\r\n\r\n\t// Galois/Counter Mode\r\n\tgcm, err := cipher.NewGCM(c)\r\n if err != nil {\r\n fmt.Println(\"cipher.NewGCM failed:\", err)\r\n }\r\n\r\n\tnonce := make([]byte, gcm.NonceSize())\r\n\r\n\tif _, err = io.ReadFull(rand.Reader, nonce); err != nil {\r\n fmt.Println(err)\r\n\t}\r\n\r\n\tcode := gcm.Seal(nonce, nonce, data, nil)\r\n\r\n\treturn code\r\n}", "func signBlindlySchnorr (conn net.Conn, suite abstract.Suite, kv crypto.SchnorrKeyset, sharedinfo []byte) {\n \n defer conn.Close()\n\n fmt.Println(\"SERVER\", \"Sending initial parameters\")\n\n signerParams, err := crypto.NewPrivateParams(suite, sharedinfo)\n if err != nil {\n fmt.Println(\"SERVER\", \"Error creating new private parameters\", err.Error())\n return\n }\n\n // \"send\" these to the user.\n userPublicParams := signerParams.DerivePubParams()\n buffer := bytes.Buffer{} \n abstract.Write(&buffer, &userPublicParams, suite)\n conn.Write(buffer.Bytes())\n\n // now we need to wait for the client to send us \"e\"\n ch := make(chan []byte)\n errorCh := make(chan error)\n\n // this neat little routine for wrapping read connections\n // in a class unashamedly stolen from stackoverflow:\n // http://stackoverflow.com/a/9764191\n go func(ch chan []byte, eCh chan error) {\n for {\n // try to read the data\n fmt.Println(\"SERVER\", \"Read goroutine off and going\")\n buffer := make([]byte, 1026)\n _,err := conn.Read(buffer)\n if err != nil {\n // send an error if it's encountered\n errorCh <- err\n return\n }\n // send data if we read some.\n ch <- buffer\n }\n }(ch, errorCh)\n\n for {\n select {\n case data := <-ch:\n fmt.Println(\"SERVER\", \"Received Message\")\n\n var challenge crypto.WISchnorrChallengeMessage\n buffer := bytes.NewBuffer(data)\n err = abstract.Read(buffer, &challenge, suite)\n if err != nil {\n fmt.Println(\"SERVER\", \"Error\", err.Error())\n return\n }\n\n response := crypto.ServerGenerateResponse(suite, challenge, signerParams, kv)\n respbuffer := bytes.Buffer{} \n abstract.Write(&respbuffer, &response, suite)\n conn.Write(respbuffer.Bytes())\n\n fmt.Println(\"SERVER\", \"We're done\")\n return\n\n case err := <- errorCh:\n if err == io.EOF {\n return\n }\n // we should, really, log instead.\n fmt.Println(\"Encountered error serving client\")\n fmt.Println(err.Error())\n break\n } \n }\n\n}", "func RsaAesPublicKeyEncryptAndSign(plainText string, recipientPublicKeyHexOrPem string, senderPublicKeyHexOrPem string, senderPrivateKeyHexOrPem string) (encryptedData string, err error) {\n\t// validate inputs\n\tif util.LenTrim(plainText) == 0 {\n\t\treturn \"\", errors.New(\"Data To Encrypt is Required\")\n\t}\n\n\tif util.LenTrim(recipientPublicKeyHexOrPem) == 0 {\n\t\treturn \"\", errors.New(\"Recipient Public Key is Required\")\n\t}\n\n\tif util.LenTrim(senderPublicKeyHexOrPem) == 0 {\n\t\treturn \"\", errors.New(\"Sender Public Key is Required\")\n\t}\n\n\tif util.LenTrim(senderPrivateKeyHexOrPem) == 0 {\n\t\treturn \"\", errors.New(\"Sender Private Key is Required\")\n\t}\n\n\t//\n\t// generate random aes key for data encryption during this session\n\t//\n\taesKey, err1 := Generate32ByteRandomKey(Sha256(util.NewUUID(), util.CurrentDateTime()))\n\n\tif err1 != nil {\n\t\treturn \"\", errors.New(\"Dynamic AES New Key Error: \" + err1.Error())\n\t}\n\n\t//\n\t// rsa sender private key sign plain text data\n\t//\n\tsignature, err2 := RsaPrivateKeySign(plainText, senderPrivateKeyHexOrPem)\n\n\tif err2 != nil {\n\t\treturn \"\", errors.New(\"Dynamic AES Siganture Error: \" + err2.Error())\n\t}\n\n\t//\n\t// encrypt plain text data using aes key with aes gcm,\n\t// note: payload format = plainText<VT>senderPublicKeyHex<VT>plainTextSignature\n\t//\n\taesEncryptedData, err3 := AesGcmEncrypt(plainText+ascii.AsciiToString(ascii.VT)+senderPublicKeyHexOrPem+ascii.AsciiToString(ascii.VT)+signature, aesKey)\n\n\tif err3 != nil {\n\t\treturn \"\", errors.New(\"Dynamic AES Data Encrypt Error: \" + err3.Error())\n\t}\n\n\t//\n\t// now protect the aesKey with recipient's public key using rsa encrypt\n\t//\n\taesEncryptedKey, err4 := RsaPublicKeyEncrypt(aesKey, recipientPublicKeyHexOrPem)\n\n\tif err4 != nil {\n\t\treturn \"\", errors.New(\"Dynamic AES Key Encrypt Error: \" + err4.Error())\n\t}\n\n\t//\n\t// compose output encrypted payload\n\t// note: aesEncryptedKey_512_Bytes_Always + aesEncryptedData_Variable_Bytes + 64BytesRecipientPublicKeyHashWithSalt of 'TPK@2019' (TPK@2019 doesn't represent anything, just for backward compatibility in prior encrypted data)\n\t// parse first 512 bytes of payload = aes encrypted key (use recipient rsa private key to decrypt)\n\t// parse rest other than first 512 bytes of payload = aes encrypted data, using aes key to decrypt, contains plaintext<VT>senderPublicKey<VT>siganture\n\t//\n\tencryptedPayload := ascii.AsciiToString(ascii.STX) + aesEncryptedKey + aesEncryptedData + Sha256(recipientPublicKeyHexOrPem, \"TPK@2019\") + ascii.AsciiToString(ascii.ETX) // wrap in STX and ETX envelop to denote start and end of the encrypted payload\n\n\t//\n\t// send encrypted payload data to caller\n\t//\n\treturn encryptedPayload, nil\n}", "func (c *Client) seal(input []byte, round uint64) []byte {\n\tmsg := input\n\trnd := make([]byte, 24)\n\tbinary.PutUvarint(rnd, round)\n\tnonce := [24]byte{}\n\tcopy(nonce[:], rnd[:])\n\tfor i := range c.servers {\n\t\tidx := len(c.servers) - i - 1\n\t\tkey := [32]byte{}\n\t\tcopy(key[:], c.keys[idx][:])\n\t\tmsg = secretbox.Seal(nil, msg, &nonce, &key)\n\t}\n\treturn msg\n}", "func AttemptDecrypt(block Block, key *rsa.PrivateKey) (message string, err error) {\n\t// First off, we confirm the integrity of the block data\n\t// If the blockID doesn't match the hash of the blockdata, then it has been modified\n\t// If that occurs, report an error\n\tfmt.Println(\"Verifying hashes...\")\n\tdataString := StringifyBlockData(block.Data)\n\thasher := sha3.New512()\n\tif _, err := hasher.Write([]byte(dataString)); err != nil {\n\t\treturn \"\", err\n\t}\n\ttest := hasher.Sum(nil)\n\tif !bytes.Equal(test, block.ID[:]) {\n\t\t// The blockdata has been modified!\n\t\t// Error out\n\t\treturn \"\", errors.New(\"Blockdata hash mismatch: ID \" + string(block.ID[:64]) +\n\t\t\t\" is not equal to hash of data \" + string(test[:64]))\n\t}\n\tfmt.Println(\"Hash of data matches set ID.\\n\")\n\tfmt.Println(\"\")\n\t// No data tampering has occurred if we get here...\n\t// Or if it has, it caused a collision in SHA3-512, which is insanely unlikely\n\n\t// Controls how long we wait for decryption to complete\n\t// Go doesn't perform encryptions in constant-time...\n\t// So to prevent timing attacks, we wait after decryption\n\t// The time is taken before running the decryption, and then after encrypt\n\t// we wait until that much time has elapsed\n\t// Thus, we get pseudo-constant time behavior\n\t// This time needs to be long enough that decryption of the key and of the\n\t// message will be complete, each in one period, for any (reasonable) message.\n\tconstantDelayFactor := 250 * time.Millisecond\n\n\t// First, attempt to decrypt the encryptedKey\n\t// First, get our encrypted key as a byte array\n\tfmt.Println(\"Performing RSA decryption of encrypted AES key...\")\n\tencryptedKeyBytes, er := base64.URLEncoding.DecodeString(block.Data.EncryptedKey)\n\tif er != nil {\n\t\treturn \"\", er\n\t}\n\t// Then, get current time and add constant delay factor\n\tendpoint := time.Now().Add(constantDelayFactor)\n\t// Then actually attempt decryption\n\tAESkey, e := rsa.DecryptOAEP(sha3.New512(), rand.Reader, key, encryptedKeyBytes, block.Data.Parent[:])\n\t// Now wait until endpoint\n\ttime.Sleep(time.Until(endpoint))\n\t// Return on error\n\tif e != nil {\n\t\treturn \"\", e\n\t}\n\tfmt.Println(\"Done.\\n\")\n\tfmt.Println(\"\")\n\n\t// Now, attempt to use that key to decrypt the encryptedMessage\n\tfmt.Println(\"Performing AES256-GCM decryption of message ciphertext...\")\n\tAESCipher, err := aes.NewCipher(AESkey)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tmsg, error := base64.URLEncoding.DecodeString(block.Data.EncryptedMessage)\n\tif error != nil {\n\t\treturn \"\", error\n\t}\n\tauth, er := cipher.NewGCM(AESCipher)\n\tif er != nil {\n\t\treturn \"\", er\n\t}\n\t// First, get current time and add constant delay factor\n\tendpoint = time.Now().Add(constantDelayFactor)\n\t// Now actually attempt decryption\n\tmsg, error = auth.Open(nil, block.Data.Nonce, msg, block.Data.Salt[:])\n\t// Now wait until endpoint\n\ttime.Sleep(time.Until(endpoint))\n\tif error != nil {\n\t\treturn \"\", error\n\t}\n\tfmt.Println(\"Done.\\n\")\n\tfmt.Println(\"\")\n\t// And return\n\treturn string(msg), nil\n}", "func unmarshalTunnelKey(data []byte, info *TunnelKey) error {\n\tad, err := netlink.NewAttributeDecoder(data)\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar multiError error\n\tfor ad.Next() {\n\t\tswitch ad.Type() {\n\t\tcase tcaTunnelKeyTm:\n\t\t\ttm := &Tcft{}\n\t\t\terr = unmarshalStruct(ad.Bytes(), tm)\n\t\t\tmultiError = concatError(multiError, err)\n\t\t\tinfo.Tm = tm\n\t\tcase tcaTunnelKeyParms:\n\t\t\tparms := &TunnelParms{}\n\t\t\terr = unmarshalStruct(ad.Bytes(), parms)\n\t\t\tmultiError = concatError(multiError, err)\n\t\t\tinfo.Parms = parms\n\t\tcase tcaTunnelKeyEncIPv4Src:\n\t\t\ttmp := uint32ToIP(ad.Uint32())\n\t\t\tinfo.KeyEncSrc = &tmp\n\t\tcase tcaTunnelKeyEncIPv4Dst:\n\t\t\ttmp := uint32ToIP(ad.Uint32())\n\t\t\tinfo.KeyEncDst = &tmp\n\t\tcase tcaTunnelKeyEncIPv6Src:\n\t\t\ttmp, err := bytesToIP(ad.Bytes())\n\t\t\tmultiError = concatError(multiError, err)\n\t\t\tinfo.KeyEncSrc = &tmp\n\t\tcase tcaTunnelKeyEncIPv6Dst:\n\t\t\ttmp, err := bytesToIP(ad.Bytes())\n\t\t\tmultiError = concatError(multiError, err)\n\t\t\tinfo.KeyEncDst = &tmp\n\t\tcase tcaTunnelKeyEncKeyID:\n\t\t\ttmp := ad.Uint32()\n\t\t\tinfo.KeyEncKeyID = &tmp\n\t\tcase tcaTunnelKeyEncDstPort:\n\t\t\ttmp := ad.Uint16()\n\t\t\tinfo.KeyEncDstPort = &tmp\n\t\tcase tcaTunnelKeyNoCSUM:\n\t\t\ttmp := ad.Uint8()\n\t\t\tinfo.KeyNoCSUM = &tmp\n\t\tcase tcaTunnelKeyEncTOS:\n\t\t\ttmp := ad.Uint8()\n\t\t\tinfo.KeyEncTOS = &tmp\n\t\tcase tcaTunnelKeyEncTTL:\n\t\t\ttmp := ad.Uint8()\n\t\t\tinfo.KeyEncTTL = &tmp\n\t\tcase tcaTunnelKeyPad:\n\t\t\t// padding does not contain data, we just skip it\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"unmarshalTunnelKey()\\t%d\\n\\t%v\", ad.Type(), ad.Bytes())\n\t\t}\n\t}\n\treturn concatError(multiError, ad.Err())\n}", "func (s *SSHNegotiator) populateAuthorizedKeys() {\n\tfileList := []string{}\n\t// TODO: Path should not be hardcoded\n\terr := filepath.Walk(\"honeybadger/authorized\", func(path string, f os.FileInfo, err error) error {\n\t\tif !f.IsDir() {\n\t\t\tfileList = append(fileList, path)\n\t\t}\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\t// TODO: This shouldn't be a fatal error, we can recover gracefully from this\n\t\tsimplelog.Fatal.Println(err)\n\t}\n\n\tfor _, file := range fileList {\n\t\tb, err := ioutil.ReadFile(file)\n\t\tname := filepath.Base(file)\n\n\t\t// TODO: We should allow the possibility of more than one key per file\n\t\tpk, _, _, _, err := ssh.ParseAuthorizedKey(b)\n\t\tif err != nil {\n\t\t\tsimplelog.Error.Printf(\"Error parsing authorized keys: %+v\", err)\n\t\t\tcontinue\n\t\t}\n\t\ts.AuthKeys[name] = []ssh.PublicKey{pk}\n\t}\n}", "func (s *Scanner) AttackCredentials(targets []Stream) []Stream {\n\tresChan := make(chan Stream)\n\tdefer close(resChan)\n\n\tfor i := range targets {\n\t\tgo s.attackCameraCredentials(targets[i], resChan)\n\t}\n\n\tfor range targets {\n\t\tattackResult := <-resChan\n\t\tif attackResult.CredentialsFound {\n\t\t\ttargets = replace(targets, attackResult)\n\t\t}\n\t}\n\n\treturn targets\n}", "func processPrivateKeyFiles(keyFilesAndPwds []string) ([][]byte, [][]byte, [][]byte, [][]byte, [][]byte, [][]byte, error) {\n\tvar (\n\t\tgpgSecretKeyRingFiles [][]byte\n\t\tgpgSecretKeyPasswords [][]byte\n\t\tprivkeys [][]byte\n\t\tprivkeysPasswords [][]byte\n\t\tpkcs11Yamls [][]byte\n\t\tkeyProviders [][]byte\n\t\terr error\n\t)\n\t// keys needed for decryption in case of adding a recipient\n\tfor _, keyfileAndPwd := range keyFilesAndPwds {\n\t\tvar password []byte\n\n\t\t// treat \"provider\" protocol separately\n\t\tif strings.HasPrefix(keyfileAndPwd, \"provider:\") {\n\t\t\tkeyProviders = append(keyProviders, []byte(keyfileAndPwd[len(\"provider:\"):]))\n\t\t\tcontinue\n\t\t}\n\t\tparts := strings.Split(keyfileAndPwd, \":\")\n\t\tif len(parts) == 2 {\n\t\t\tpassword, err = processPwdString(parts[1])\n\t\t\tif err != nil {\n\t\t\t\treturn nil, nil, nil, nil, nil, nil, err\n\t\t\t}\n\t\t}\n\n\t\tkeyfile := parts[0]\n\t\ttmp, err := os.ReadFile(keyfile)\n\t\tif err != nil {\n\t\t\treturn nil, nil, nil, nil, nil, nil, err\n\t\t}\n\t\tisPrivKey, err := encutils.IsPrivateKey(tmp, password)\n\t\tif encutils.IsPasswordError(err) {\n\t\t\treturn nil, nil, nil, nil, nil, nil, err\n\t\t}\n\n\t\tif encutils.IsPkcs11PrivateKey(tmp) {\n\t\t\tpkcs11Yamls = append(pkcs11Yamls, tmp)\n\t\t} else if isPrivKey {\n\t\t\tprivkeys = append(privkeys, tmp)\n\t\t\tprivkeysPasswords = append(privkeysPasswords, password)\n\t\t} else if encutils.IsGPGPrivateKeyRing(tmp) {\n\t\t\tgpgSecretKeyRingFiles = append(gpgSecretKeyRingFiles, tmp)\n\t\t\tgpgSecretKeyPasswords = append(gpgSecretKeyPasswords, password)\n\t\t} else {\n\t\t\t// ignore if file is not recognized, so as not to error if additional\n\t\t\t// metadata/cert files exists\n\t\t\tcontinue\n\t\t}\n\t}\n\treturn gpgSecretKeyRingFiles, gpgSecretKeyPasswords, privkeys, privkeysPasswords, pkcs11Yamls, keyProviders, nil\n}", "func encryptUnsafe(plaintext []byte, key *[32]byte, nonce []byte) (ciphertext []byte, err error) {\n\tif key == nil {\n\t\treturn nil, fmt.Errorf(\"no key provided\")\n\t}\n\tblock, err := aes.NewCipher(key[:])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tgcm, err := cipher.NewGCM(block)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(nonce) != gcm.NonceSize() {\n\t\treturn nil, fmt.Errorf(\"Invalid nonce size, want: %d got %d\", gcm.NonceSize(), len(nonce))\n\t}\n\n\treturn gcm.Seal(nonce, nonce, plaintext, nil), nil\n}" ]
[ "0.53970337", "0.5308709", "0.52447593", "0.5208022", "0.5136759", "0.5136451", "0.5077559", "0.5055551", "0.50317246", "0.49933606", "0.4976835", "0.49750414", "0.49339533", "0.4925139", "0.49096018", "0.48946983", "0.48878032", "0.4884183", "0.48840725", "0.4876813", "0.4866543", "0.48523647", "0.48517436", "0.48455", "0.48274332", "0.48173645", "0.48086038", "0.48084906", "0.47798067", "0.47776145", "0.47667193", "0.4760681", "0.4752552", "0.47400486", "0.47398522", "0.47367588", "0.47162527", "0.47053218", "0.47047472", "0.46718714", "0.46631676", "0.4656715", "0.4653044", "0.46519214", "0.46467224", "0.46467167", "0.46403018", "0.46203536", "0.46194598", "0.46184748", "0.4616146", "0.46049014", "0.45992085", "0.45940915", "0.45895317", "0.45894438", "0.45837048", "0.45802486", "0.4576083", "0.4571255", "0.45653883", "0.45480436", "0.45422125", "0.45314202", "0.45255223", "0.45228726", "0.4510369", "0.45039532", "0.45037037", "0.45027438", "0.45012432", "0.44949782", "0.44949153", "0.44945407", "0.44886753", "0.44799587", "0.4478049", "0.44768792", "0.44742265", "0.4471319", "0.44692618", "0.4469172", "0.44676003", "0.44663534", "0.44559178", "0.44547606", "0.4453481", "0.44526914", "0.44489923", "0.4444656", "0.4440518", "0.44376746", "0.44367537", "0.4435609", "0.44346777", "0.4431574", "0.4431206", "0.44282234", "0.4416848", "0.44113636" ]
0.67120165
0
NewConfig returns an empty ServerConfigBuilder.
func NewConfig() ServerConfigBuilder { return &serverConfig{} }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func newConfigServer() *ConfigServer {\n\treturn &ConfigServer{}\n}", "func newConfig() *Config {\n\treturn &Config{\n\t\tgeneral{\n\t\t\tVerbose: false,\n\t\t},\n\t\tserver{\n\t\t\tType: \"http\",\n\t\t\tHost: \"0.0.0.0\",\n\t\t},\n\t\tmongo{\n\t\t\tHost: \"0.0.0.0:27017\",\n\t\t\tDatabase: \"etlog\",\n\t\t\tCollection: \"logs\",\n\t\t},\n\t}\n}", "func New() *Config {\n\t// Keep only one instance of server config\n\tif serverConfig != nil {\n\t\treturn serverConfig\n\t}\n\n\t// Get the current environment\n\tenvironment := getEnv(\"_APP_ENV\", \"development\")\n\n\t// If not running on stating nor production, fallback to local configs\n\tif environment != \"staging\" && environment != \"production\" {\n\t\tserverConfig = &Config{\n\t\t\tEnvironment: \"development\",\n\t\t\tDelay: 5,\n\t\t\tMonitoringQuantity: 5,\n\t\t}\n\t\treturn serverConfig\n\t}\n\n\t// For prod and staging\n\tserverConfig = &Config{\n\t\tEnvironment: environment,\n\t}\n\n\treturn serverConfig\n}", "func newConfig() Config {\n\treturn Config{\n\t\tDefaultContainerConfig: newDefaultContainerConfig(),\n\t\tContainersConfig: map[string]ContainerConfig{},\n\t\tExclude: []string{},\n\t}\n}", "func newConfig(envParams envParams) error {\n\t// Initialize server config.\n\tsrvCfg := newServerConfigV14()\n\n\t// If env is set for a fresh start, save them to config file.\n\tif globalIsEnvCreds {\n\t\tsrvCfg.SetCredential(envParams.creds)\n\t}\n\n\tif globalIsEnvBrowser {\n\t\tsrvCfg.SetBrowser(envParams.browser)\n\t}\n\n\t// Create config path.\n\tif err := createConfigDir(); err != nil {\n\t\treturn err\n\t}\n\n\t// hold the mutex lock before a new config is assigned.\n\t// Save the new config globally.\n\t// unlock the mutex.\n\tserverConfigMu.Lock()\n\tserverConfig = srvCfg\n\tserverConfigMu.Unlock()\n\n\t// Save config into file.\n\treturn serverConfig.Save()\n}", "func NewServerConfig(cfg *Config) *server.Config {\n\treturn &cfg.serverConfig\n}", "func newConfig() *Config {\n\n\tc := &Config{}\n\tvar logLevel, bServers, dServers string\n\n\tflag.StringVar(&c.ControlAddress, \"controlAddress\", \"localhost:4000\",\n\t\t\"Control process IP address, default localhost:4000\")\n\n\tflag.BoolVar(&c.Broadcast, \"broadcast\", true,\n\t\t\"Set to false to squash actual broadcast.\")\n\n\tflag.IntVar(&c.Bclients, \"bClients\", 1,\n\t\t\"The number of broadcast clients; Default 1\")\n\n\tflag.IntVar(&c.Dclients, \"dClients\", 1,\n\t\t\"The number of deliver clients; Default 1\")\n\n\tflag.IntVar(&c.Channels, \"channels\", 1,\n\t\t\"The number of channels; Default 1\")\n\n\tflag.StringVar(&bServers, \"bServers\", \"\",\n\t\t\"A comma-separated list of IP:PORT of broadcast servers to target; Required\")\n\n\tflag.StringVar(&dServers, \"dServers\", \"\",\n\t\t\"A comma-separated list of IP:PORT of deliver servers to target; Defaults to broadcast szervers\")\n\n\tflag.IntVar(&c.Transactions, \"transactions\", 1,\n\t\t\"The number of transactions broadcast to each client's servers; Default 1\")\n\n\tflag.IntVar(&c.Payload, \"payload\", TxHeaderSize,\n\t\t\"Payload size in bytes; Minimum/default is the performance header size (56 bytes)\")\n\n\tflag.IntVar(&c.Burst, \"burst\", 1,\n\t\t\"The number of transactions burst to each server during broadcast; Dafault 1\")\n\n\tflag.DurationVar(&c.Delay, \"delay\", 0,\n\t\t\"The delay between bursts, in the form required by time.ParseDuration(); Default is no delay\")\n\n\tflag.IntVar(&c.Window, \"window\", 100,\n\t\t\"The number of blocks allowed to be delivered without an ACK; Default 100\")\n\n\tflag.IntVar(&c.AckEvery, \"ackEvery\", 70,\n\t\t\"The deliver client will ACK every (this many) blocks; Default 70\")\n\n\tflag.DurationVar(&c.Timeout, \"timeout\", 30*time.Second,\n\t\t\"The initialization timeout, in the form required by time.ParseDuration(); Default 30s\")\n\n\tflag.BoolVar(&c.LatencyAll, \"latencyAll\", false,\n\t\t\"By default, only block latencies are reported. Set -latencyAll=true to report all transaction latencies\")\n\n\tflag.StringVar(&c.LatencyDir, \"latencyDir\", \"\",\n\t\t\"The directory to contain latency files; These files are only created if -latencyDir is specified\")\n\n\tflag.StringVar(&c.LatencyPrefix, \"latencyPrefix\", \"client\",\n\t\t\"Prefix for latency file names\")\n\n\tflag.StringVar(&logLevel, \"logLevel\", \"info\",\n\t\t\"The global logging level; Default 'info'\")\n\n\tflag.StringVar(&c.ControlLogging, \"controlLogging\", \"\",\n\t\t\"Override logging level for the 'control' process\")\n\n\tflag.StringVar(&c.BroadcastLogging, \"broadcastLogging\", \"\",\n\t\t\"Override logging level for the 'broadcast' processes\")\n\n\tflag.StringVar(&c.DeliverLogging, \"deliverLogging\", \"\",\n\t\t\"Override logging level for the 'deliver' processes\")\n\n\tflag.Parse()\n\n\tif c.ControlLogging == \"\" {\n\t\tc.ControlLogging = logLevel\n\t}\n\tif c.BroadcastLogging == \"\" {\n\t\tc.BroadcastLogging = logLevel\n\t}\n\tif c.DeliverLogging == \"\" {\n\t\tc.DeliverLogging = logLevel\n\t}\n\n\tinitLogging(c.ControlLogging)\n\n\trequireUint16(\"bclients\", c.Bclients)\n\trequireUint16(\"dclients\", c.Dclients)\n\trequireUint16(\"channels\", c.Channels)\n\trequireNonEmpty(\"bServers\", bServers)\n\tif dServers == \"\" {\n\t\tdServers = bServers\n\t}\n\trequireUint32(\"transactions\", c.Transactions)\n\trequirePosInt(\"payload\", c.Payload)\n\tif c.Payload < TxHeaderSize {\n\t\tlogger.Infof(\"Payload size will be set to the default (%d bytes)\\n\",\n\t\t\tTxHeaderSize)\n\t\tc.Payload = TxHeaderSize\n\t}\n\trequirePosInt(\"burst\", c.Burst)\n\trequirePosDuration(\"delay\", c.Delay)\n\trequirePosInt(\"window\", c.Window)\n\trequirePosInt(\"ackevery\", c.AckEvery)\n\trequireLE(\"ackevery\", \"window\", c.AckEvery, c.Window)\n\trequirePosDuration(\"timeout\", c.Timeout)\n\n\tc.Bservers = strings.Split(bServers, \",\")\n\tc.NumBservers = len(c.Bservers)\n\n\tc.Dservers = strings.Split(dServers, \",\")\n\tc.NumDservers = len(c.Dservers)\n\n\tlogger.Infof(\"Configuration\")\n\tlogger.Infof(\" Broadcast Servers: %d: %v\", c.NumBservers, c.Bservers)\n\tlogger.Infof(\" Broadcast Clients: %d\", c.Bclients)\n\tlogger.Infof(\" Deliver Servers : %d: %v\", c.NumDservers, c.Dservers)\n\tlogger.Infof(\" Deliver Clients : %d\", c.Dclients)\n\tlogger.Infof(\" Channels : %d\", c.Channels)\n\tlogger.Infof(\" Transactions : %d\", c.Transactions)\n\tlogger.Infof(\" Payload : %d\", c.Payload)\n\tlogger.Infof(\" Burst : %d\", c.Burst)\n\tlogger.Infof(\" Delay : %s\", c.Delay.String())\n\tlogger.Infof(\" Window : %d\", c.Window)\n\tlogger.Infof(\" AckEvery : %d\", c.AckEvery)\n\tlogger.Infof(\" Broadcast? : %v\", c.Broadcast)\n\n\tc.TotalBroadcastClients =\n\t\tuint64(c.NumBservers) * uint64(c.Channels) * uint64(c.Bclients)\n\tc.TxBroadcastPerClient = uint64(c.Transactions)\n\tc.BytesBroadcastPerClient = c.TxBroadcastPerClient * uint64(c.Payload)\n\tc.TotalTxBroadcast = uint64(c.TotalBroadcastClients) * c.TxBroadcastPerClient\n\tc.TotalBytesBroadcast = c.TotalTxBroadcast * uint64(c.Payload)\n\n\tc.TotalDeliverClients =\n\t\tuint64(c.NumDservers) * uint64(c.Channels) * uint64(c.Dclients)\n\tc.TxDeliveredPerClient =\n\t\tuint64(c.NumBservers) * uint64(c.Bclients) * uint64(c.Transactions)\n\tc.BytesDeliveredPerClient = c.TxDeliveredPerClient * uint64(c.Payload)\n\tc.TotalTxDelivered = c.TxDeliveredPerClient * c.TotalDeliverClients\n\tc.TotalBytesDelivered = c.TotalTxDelivered * uint64(c.Payload)\n\n\treturn c\n}", "func NewConfig(c *v2.ServerConfig) *Config {\n\treturn &Config{\n\t\tServerName: c.ServerName,\n\t\tLogPath: c.DefaultLogPath,\n\t\tLogLevel: configmanager.ParseLogLevel(c.DefaultLogLevel),\n\t\tLogRoller: c.GlobalLogRoller,\n\t\tGracefulTimeout: c.GracefulTimeout.Duration,\n\t\tUseNetpollMode: c.UseNetpollMode,\n\t}\n}", "func NewConfig() *Config {\n\treturn &Config{\n\t\tHosts: []string{\"localhost:10101\"},\n\t\tGenerate: true,\n\t\tVerify: \"update\",\n\t\tPrefix: \"imaginary-\",\n\t\tThreadCount: 0, // if unchanged, uses workloadspec.threadcount\n\t\t// if workloadspec.threadcount is also unset, defaults to 1\n\t}\n}", "func NewServerConfig(hbInfo *HeartbeatInfo, opc, dpc, aspID, tmt, nwApr, corrID uint32, rtCtxs []uint32, si, ni, mp, sls uint8) *Config {\n\treturn &Config{\n\t\tHeartbeatInfo: hbInfo,\n\t\tAspIdentifier: params.NewAspIdentifier(aspID),\n\t\tTrafficModeType: params.NewTrafficModeType(tmt),\n\t\tNetworkAppearance: params.NewNetworkAppearance(nwApr),\n\t\tRoutingContexts: params.NewRoutingContext(rtCtxs...),\n\t\tCorrelationID: params.NewCorrelationID(corrID),\n\t\tOriginatingPointCode: opc,\n\t\tDestinationPointCode: dpc,\n\t\tServiceIndicator: si,\n\t\tNetworkIndicator: ni,\n\t\tMessagePriority: mp,\n\t\tSignalingLinkSelection: sls,\n\t}\n}", "func newConfig() (*rest.Config, error) {\n // try in cluster config first, it should fail quickly on lack of env vars\n cfg, err := inClusterConfig()\n if err != nil {\n cfg, err = clientcmd.BuildConfigFromFlags(\"\", clientcmd.RecommendedHomeFile)\n if err != nil {\n return nil, errors.Wrap(err, \"failed to get InClusterConfig and Config from kube_config\")\n }\n }\n return cfg, nil\n}", "func (c CompletedConfig) New() (*GenericAPIServer, error) {\n\ts := &GenericAPIServer{\n\t\t//SecureServingInfo: c.SecureServing,\n\t\t//InsecureServingInfo: c.InsecureServing,\n\t\tmode: c.Mode,\n\t\thealthz: c.Healthz,\n\t\t//enableMetrics: c.EnableMetrics,\n\t\t//enableProfiling: c.EnableProfiling,\n\t\tmiddlewares: c.Middlewares,\n\t\tEngine: gin.New(),\n\t}\n\n\tinitGenericAPIServer(s)\n\n\treturn s, nil\n}", "func newConfig() *config {\n\treturn &config{\n\t\tAddr: \":80\",\n\t\tCacheSize: 1000,\n\t\tLogLevel: \"info\",\n\t\tRequestTimeout: 3000,\n\t\tTargetAddr: \"https://places.aviasales.ru\",\n\t}\n}", "func newServerConfig(fname, id, name, passWord, serverKey string) (err error) {\n\tconfig := Config{\n\t\tid,\n\t\tname,\n\t\t\"server\",\n\t\tpassWord,\n\t\tserverKey,\n\t\tDEFAULT_SERVER_URL,\n\t\tDEFAULT_PROCESS_USER,\n\t\tDEFAULT_PROCESS_LOCK,\n\t\tDEFAULT_PROCESS_LOG,\n\t\tDEFAULT_BASE_DIR,\n\t\tDEFAULT_DATA_DIR,\n\t\tDEFAULT_HTTP_LISTEN,\n\t\tfname,\n\t}\n\n\treturn SaveConfig(config)\n}", "func NewConfig(newServices []services.ServiceConfig, newGroups []services.ServiceGroupConfig) Config {\n\tlog.Printf(\"Creating new config with %d services and %d groups.\\n\", len(newServices), len(newGroups))\n\n\t// Find Env settings common to all services\n\tvar allEnvSlices [][]string\n\tfor _, s := range newServices {\n\t\tallEnvSlices = append(allEnvSlices, s.Env)\n\t}\n\tenv := stringSliceIntersect(allEnvSlices)\n\n\t// Remove common settings from services\n\tvar svcs []services.ServiceConfig\n\tfor _, s := range newServices {\n\t\ts.Env = stringSliceRemoveCommon(env, s.Env)\n\t\tsvcs = append(svcs, s)\n\t}\n\n\tcfg := Config{\n\t\tEnv: env,\n\t\tServices: svcs,\n\t\tGroups: []GroupDef{},\n\t}\n\n\tcfg.AddGroups(newGroups)\n\n\tlog.Printf(\"Config created: %v\", cfg)\n\n\treturn cfg\n}", "func NewConfig() Config {\n\treturn Config{\n\t\tConnections: 5,\n\t}\n}", "func NewConfig() *Config {\n\treturn &Config{\n\t\tPort: \"2000\",\n\t\tDomain: \".dev\",\n\t\tURLPattern: `.*\\.dev$`,\n\t\tServices: nil,\n\t}\n}", "func New() (*Config, error) {\n\tflags := pflag.NewFlagSet(os.Args[0], pflag.ExitOnError)\n\tcfg := &Config{\n\t\tFlags: flags,\n\t\tHTTPAddr: flags.StringP(\"http-addr\", \"l\", \":8080\", \"http listen address\"),\n\t\tHTTPReadHeaderTimeout: flags.DurationP(\"http-timeout\", \"h\", 1*time.Second, \"http timeout for reading request headers\"),\n\t\tCallTimeout: flags.DurationP(\"call-timeout\", \"t\", 0*time.Second, \"function call timeout\"),\n\t\tReadLimit: flags.Int64(\"read-limit\", -1, \"limit the amount of data which can be contained in a requests body\"),\n\t\tFramer: flags.StringP(\"framer\", \"f\", \"\", \"afterburn framer to use: line, json or http\"),\n\t\tBuffer: flags.BoolP(\"buffer\", \"b\", false, \"buffer output before writing\"),\n\t}\n\tif err := cfg.parseCommandline(); err != nil {\n\t\treturn nil, err\n\t}\n\tif err := cfg.parseEnvironment(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn cfg, nil\n}", "func NewConfig(cfg map[string]interface{}) Config {\n\treturn Config{Data: cfg}\n}", "func New() (*Config, error) {\n\n\tvar c Config\n\n\tif err := viper.Unmarshal(&c); err != nil {\n\t\treturn nil, err\n\t}\n\n\tserver := &ServerConfig{\n\t\tPort: c.Server.Port,\n\t\tReadTimeOut: c.Server.ReadTimeOut * time.Second,\n\t\tWriteTimeOut: c.Server.WriteTimeOut * time.Second,\n\t}\n\n\tdb := &DatabaseConfig{\n\t\tPort: c.Database.Port,\n\t\tUser: c.Database.User,\n\t\tPassword: c.Database.Password,\n\t\tClient: c.Database.Client,\n\t\tHost: c.Database.Host,\n\t\tDb: c.Database.Db,\n\t}\n\n\treturn &Config{\n\t\tServer: server,\n\t\tDatabase: db,\n\t}, nil\n}", "func NewConfig() *Config {\n\treturn &Config{\n\t\tMode: \t\t gin.ReleaseMode,\n\t\tMiddlewares: []string{},\n\t\tHealthz: \t true,\n\t}\n}", "func NewConfig(prefix string) *Config {\n\tc := &Config{Port: \"8000\", MaxRequestPerSec: 1}\n\tif err := envconfig.Process(prefix, c); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn c\n}", "func NewServerConf() *ServerConfig {\n return &ServerConfig{\n HTTPConf: &HTTPConfig{\n RunMode:\tviper.GetString(\"runmode\"),\n Addr:\t\tviper.GetString(\"addr\"),\n Name:\t\tviper.GetString(\"name\"),\n PingMax:\tviper.GetInt(\"PingMax\"),\n },\n RedisConf: &RedisConfig{\n Proto: viper.GetString(\"redis.Proto\"),\n Addr: viper.GetString(\"redis.Addr\"),\n Auth: viper.GetString(\"redis.Auth\"),\n MaxIdle: viper.GetInt(\"redis.MaxIdle\"),\n MaxActive: viper.GetInt(\"redis.MaxActive\"),\n IdleTimeout: time.Duration(viper.GetInt(\"redis.IdleTimeout\")),\n },\n }\n}", "func newConfig(appName string, pathToKeybase string, log Log, ignoreSnooze bool) (*config, error) {\n\tcfg := newDefaultConfig(appName, pathToKeybase, log, ignoreSnooze)\n\terr := cfg.load()\n\treturn &cfg, err\n}", "func NewConfig() *Config {\n\treturn &Config{\n\t\tBindAddr: \"127.0.0.1:8080\",\n\t\tNetwork: \"udp4\",\n\t}\n}", "func NewConfig() Config {\n\treturn Config{\n\t\tBindAddress: DefaultBindAddress,\n\t\tDatabase: DefaultDatabase,\n\t\tProtocol: DefaultProtocol,\n\t\tNamePosition: DefaultNamePosition,\n\t\tNameSeparator: DefaultNameSeparator,\n\t\tConsistencyLevel: DefaultConsistencyLevel,\n\t}\n}", "func newConfig() *Config {\n\t// TODO: use config as default, allow setting some values per-job\n\t// and prevent config changes affecting already-running tasks\n\treturn &Config{\n\t\tPath: DefaultPath,\n\t\tDatastorePrefix: \"MP_\",\n\t\tDefaultQueue: \"\",\n\t\tShards: 8,\n\t\tOversampling: 32,\n\t\tLeaseDuration: time.Duration(30) * time.Second,\n\t\tLeaseTimeout: time.Duration(10)*time.Minute + time.Duration(30)*time.Second,\n\t\tTaskTimeout: time.Duration(10)*time.Minute - time.Duration(30)*time.Second,\n\t\tCursorTimeout: time.Duration(50) * time.Second,\n\t\tRetries: 31,\n\t\tLogVerbose: false,\n\t\tHost: \"\",\n\t}\n}", "func New(cfg *Config) *Server {\n\tdefaultConfig(cfg)\n\tlog.Printf(\"%+v\\n\", cfg)\n\treturn &Server{\n\t\tcfg: cfg,\n\t\thandlers: make([]connectionHandler, cfg.Count),\n\t\tevents: make(chan eventWithData, cfg.Count),\n\t}\n}", "func NewConfig() Config {\n\treturn Config{\n\t\tType: TypeNone,\n\t\tJaeger: NewJaegerConfig(),\n\t\tNone: struct{}{},\n\t}\n}", "func (sm *ShardMaster) CreateNewConfig() *Config{\n\t// get current config (the last config in config list)\n\tsz := len(sm.configs)\n\tcurr_config := &sm.configs[sz - 1]\n\n\t// create a new config\n\tnew_config := Config{Num: curr_config.Num + 1}\n\tnew_config.Groups = make(map[int64][]string)\n\n\t// copy the shards from curr_config\n\tfor s, gid := range curr_config.Shards{\n\t\tnew_config.Shards[s] = gid\n\t}\n\n\t// copy the group from curr_config\n\tfor gid, server := range curr_config.Groups{\n\t\tnew_config.Groups[gid] = server\n\t}\n\treturn &new_config\n}", "func (c CompletedConfig) New(name string) (*GenericServer, error) {\n\thandlerChainBuilder := func(handler http.Handler) http.Handler {\n\t\treturn c.BuildHandlerChainFunc(handler, c.Config)\n\t}\n\thandler := NewServerHandler(name, handlerChainBuilder, nil)\n\ts := &GenericServer{\n\t\tHandlerChainWaitGroup: c.HandlerChainWaitGroup,\n\n\t\tSecureServingInfo: c.SecureServingInfo,\n\t\tExternalAddress: c.ExternalAddress,\n\t\tHandler: handler,\n\n\t\tpostStartHooks: map[string]postStartHookEntry{},\n\t\tpreShutdownHooks: map[string]preShutdownHookEntry{},\n\n\t\thealthzChecks: c.HealthzChecks,\n\t}\n\n\tinstallAPI(s, c.Config)\n\n\treturn s, nil\n}", "func NewConfig() *Config {\n\treturn &Config{\n\t\tEnable: true,\n\t\tAddress: \"0.0.0.0:30003\",\n\t}\n}", "func NewServerConfig() *domain.ServerConfig {\n\tlog.Println(\"load config\")\n\treturn &domain.ServerConfig{\n\t\tListenHTTP: \":8080\",\n\t\tListenHTTPS: \":8443\",\n\t\tDebug: true,\n\t\tCookieAge: 8736, // hours (1 year)\n\t\tTokenAge: 5,\n\t\tHSTS: false,\n\t\tWebIDTLS: true,\n\t\tMetaSuffix: \".meta\",\n\t\tACLSuffix: \".acl\",\n\t\tDataApp: \"tabulator\",\n\t\tDirIndex: []string{\"index.html\", \"index.htm\"},\n\t\tDirApp: \"http://linkeddata.github.io/warp/#list/\",\n\t\tSignUpApp: \"https://solid.github.io/solid-signup/?domain=\",\n\t\tDiskLimit: 100000000, // 100MB\n\t\tDataRoot: serverDefaultRoot(),\n\t\tBoltPath: filepath.Join(os.TempDir(), \"bolt.db\"),\n\t\tProxyLocal: true,\n\t}\n}", "func NewConfig() *Config {\n\treturn &Config{\n\t\tOptions: true,\n\t\tOptionsHandler: DefaultOptionsHandler,\n\t\tTrace: false,\n\t\tTraceHandler: DefaultTraceHandler,\n\t\tAddHeadOnGet: true,\n\t}\n}", "func ConfigNew() *Config {\n\tc := Config{\n\t\tHosts: map[string]*ConfigHost{},\n\t}\n\treturn &c\n}", "func NewServerConfig() *ServerConfig {\n\treturn &ServerConfig{\n\t\tCookieAge: 24,\n\t\tTokenAge: 5,\n\t\tMetaSuffix: \".meta\",\n\t\tACLSuffix: \".acl\",\n\t\tDataSkin: \"tabulator\",\n\t\tDirIndex: []string{\"index.html\", \"index.htm\"},\n\t\tDirSkin: \"http://linkeddata.github.io/warp/#list/\",\n\t\tSignUpSkin: \"http://linkeddata.github.io/signup/?tab=signup&endpointUrl=\",\n\t\tDiskLimit: 100000000, // 100MB\n\t\tDataRoot: serverDefaultRoot(),\n\t}\n}", "func NewConfig() {\n\t appConfig = &AppConfig{}\n}", "func NewConfig() Config {\n\treturn make(map[string]interface{})\n}", "func (client *HTTPClient) NewConfig(config *Config) {\n\tclient.sendRequest(\"POST\", config, nil, &HTTPClientMetrics{NewConfig: true})\n}", "func NewConfig() *Config {\n\treturn &Config{v: make(map[string]string)}\n}", "func NewConfig() *Config {\n\treturn &Config{\n\t\tNumberSuperNodes: defaultNumberSuperNodes,\n\t}\n}", "func NewConfig(cfg map[string]interface{}) *Config {\n\tif cfg == nil {\n\t\tcfg = make(map[string]interface{})\n\t}\n\treturn &Config{\n\t\tm: cfg,\n\t}\n}", "func New() *Config {\n\treturn new(Config)\n}", "func NewConfig() *Config {\n\treturn &Config{\n\t\tBindAddr: \":8080\",\n\t\tLogLevel: \"debug\",\n\t}\n}", "func NewConfig() *Config {\n\treturn &Config{\n\t\tBindAddr: \":8080\",\n\t\tLogLevel: \"debug\",\n\t}\n}", "func New() *Config {\n\treturn &Config{}\n}", "func New() *Config {\n\treturn &Config{}\n}", "func New() *Config {\n\treturn &Config{\n\t\tOptions: AppConfig{\n\t\t\tHoldtime: 12 * time.Hour, // do not retry a successful device backup before this holdtime\n\t\t\tScanInterval: 10 * time.Minute, // interval for scanning device table\n\t\t\tMaxConcurrency: 20, // limit for concurrent backup jobs\n\t\t\tMaxConfigFiles: 120, // limit for per-device saved files\n\t\t\tMaxConfigLoadSize: 10000000, // 10M limit max config file size for loading to memory\n\t\t},\n\t\tDevices: []DevConfig{},\n\t}\n}", "func NewServerConfig(host string, port int) *ServerConfig {\n\treturn &ServerConfig{Port: port, Host: host}\n}", "func NewConfig() Config {\n\treturn Config{}\n}", "func NewConfig() Config {\n\treturn Config{}\n}", "func New() Config {\n\treturn Config{\n\t\tDatabase: \"messaging\",\n\t\tThreadColl: \"thread\",\n\t\tMessageColl: \"message\",\n\t}\n}", "func NewConfig() Config {\n\treturn Config{\n\t\tBindAddress: DefaultBindAddress,\n\t\tDatabase: DefaultDatabase,\n\t\tRetentionPolicy: DefaultRetentionPolicy,\n\t\tBatchSize: DefaultBatchSize,\n\t\tBatchPending: DefaultBatchPending,\n\t\tBatchDuration: DefaultBatchDuration,\n\t\tTypesDB: DefaultTypesDB,\n\t}\n}", "func New() *Config {\n\tcfg := &Config{}\n\tcfg.SetDefaults()\n\tcfg.ChangedCH = make(chan bool)\n\treturn cfg\n}", "func New() *Config {\n\treturn &Config{\n\t\tEncryptor: &encryption.KMSEncryptor{\n\t\t\tKMS: kms.New(session.New(), &aws.Config{Region: aws.String(os.Getenv(\"EC2_REGION\"))}),\n\t\t},\n\t\tdata: (unsafe.Pointer)(&configData{\n\t\t\tbody: new(sjson.Json),\n\t\t\tdecrypted: make(map[uint64]*sjson.Json),\n\t\t}),\n\t\tobservers: make([]chan bool, 0),\n\t}\n}", "func NewConfig() (app.Config, error) {\n\tconfigurator := nest.NewConfigurator()\n\tconfigurator.SetName(app.FriendlyServiceName)\n\n\tvar config app.Config\n\n\terr := configurator.Load(&config)\n\n\treturn config, err\n}", "func New() *Config {\n\tc := &Config{\n\t\tTargets: make([]string, 0),\n\t}\n\tsetDefaultValues(c)\n\n\treturn c\n}", "func newConfig() *bqConfig {\n\treturn &bqConfig{\n\t\tarenaSize: cDefaultArenaSize,\n\t\tmaxInMemArenas: cMinMaxInMemArenas,\n\t}\n}", "func (c *Config) Clone() *Config {\n\tglobal := *c.Global\n\tnewconf := &Config{\n\t\tGlobal: &global,\n\t\tServers: make(map[string]*Server, len(c.Servers)),\n\t\tErrors: make([]error, 0),\n\t\tfilename: c.filename,\n\t}\n\tfor name, srv := range c.Servers {\n\t\tnewsrv := *srv\n\t\tnewsrv.parent = newconf\n\t\tnewconf.Servers[name] = &newsrv\n\t}\n\treturn newconf\n}", "func (c completedConfig) New(delegationTarget genericapiserver.DelegationTarget) (*AppsServer, error) {\n\t// completion is done in Complete, no need for a second time\n\tgenericServer, err := c.AppsConfig.GenericConfig.SkipComplete().New(\"apps.openshift.io-apiserver\", delegationTarget)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ts := &AppsServer{\n\t\tGenericAPIServer: genericServer,\n\t}\n\n\tv1Storage, err := c.V1RESTStorage()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tparameterCodec := runtime.NewParameterCodec(c.Scheme)\n\tapiGroupInfo := genericapiserver.NewDefaultAPIGroupInfo(appsapiv1.GroupName, c.Registry, c.Scheme, parameterCodec, c.Codecs)\n\tapiGroupInfo.GroupMeta.GroupVersion = appsapiv1.SchemeGroupVersion\n\tapiGroupInfo.VersionedResourcesStorageMap[appsapiv1.SchemeGroupVersion.Version] = v1Storage\n\tif err := s.GenericAPIServer.InstallAPIGroup(&apiGroupInfo); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn s, nil\n}", "func NewServerConfig() *Server {\n\tport := \"5000\"\n\tmode := \"debug\"\n\tfileServiceEndPoint := \"http://localhost:5000/files\"\n\taccountServiceEndPoint := \"http://localhost:5000/accounts\"\n\tprofileServiceEndPoint := \"http://localhost:5000/profiles\"\n\n\tif env := os.Getenv(\"PORT\"); env != \"\" {\n\t\tport = env\n\t}\n\tif env := os.Getenv(\"MODE\"); env != \"\" {\n\t\tmode = env\n\t}\n\tif env := os.Getenv(\"FILE_API_ADDRESS\"); env != \"\" {\n\t\tfileServiceEndPoint = env\n\t}\n\tif env := os.Getenv(\"ACCOUNT_API_ADDRESS\"); env != \"\" {\n\t\taccountServiceEndPoint = env\n\t}\n\tif env := os.Getenv(\"PROFILE_API_ADDRESS\"); env != \"\" {\n\t\tprofileServiceEndPoint = env\n\t}\n\tserver := &Server{\n\t\tport: port,\n\t\tmode: mode,\n\t\tfileServiceEndPoint: fileServiceEndPoint,\n\t\taccountServiceEndPoint: accountServiceEndPoint,\n\t\tprofileServiceEndPoint: profileServiceEndPoint,\n\t}\n\tif server.mode != \"release\" && server.mode != \"debug\" {\n\t\tpanic(\"Unavailable gin mode\")\n\t}\n\treturn server\n}", "func New() *Config {\n\treturn &Config{\n\t\tdevices: make([]Device, 0),\n\t}\n}", "func (c completedConfig) New(delegationTarget genericapiserver.DelegationTarget) (*NetworkAPIServer, error) {\n\tgenericServer, err := c.GenericConfig.New(\"network.openshift.io-apiserver\", delegationTarget)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ts := &NetworkAPIServer{\n\t\tGenericAPIServer: genericServer,\n\t}\n\n\tv1Storage, err := c.V1RESTStorage()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tapiGroupInfo := genericapiserver.NewDefaultAPIGroupInfo(networkapiv1.GroupName, c.ExtraConfig.Scheme, metav1.ParameterCodec, c.ExtraConfig.Codecs)\n\tapiGroupInfo.VersionedResourcesStorageMap[networkapiv1.SchemeGroupVersion.Version] = v1Storage\n\tif err := s.GenericAPIServer.InstallAPIGroup(&apiGroupInfo); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn s, nil\n}", "func NewServerConfig(configTmpl string, target *model.K3OSNode) (*[]byte, error) {\n\ttmpl := configTmpl\n\tif tmpl == \"\" {\n\t\ttmpl = ServerConfigTmpl\n\t}\n\treturn generateConfig(tmpl, target)\n}", "func NewConfig() *Config {\n\treturn &Config{NodeAddresses: make(map[string]string)}\n}", "func newConfig(initiator bool) noise.Config {\n\treturn noise.Config{\n\t\tCipherSuite: cipherSuite,\n\t\tPattern: noise.HandshakeNK,\n\t\tInitiator: initiator,\n\t\tPrologue: []byte(\"dnstt 2020-04-13\"),\n\t}\n}", "func NewConfig() Config {\n\treturn Config{\"1.0.0\"}\n}", "func New() *Config {\n\tcfg := new(Config)\n\treturn cfg\n}", "func newConfig(opts ...Option) config {\n\tc := config{\n\t\tMeterProvider: otel.GetMeterProvider(),\n\t}\n\tfor _, opt := range opts {\n\t\topt.apply(&c)\n\t}\n\treturn c\n}", "func NewConfig(configFile string) (*Config, error) {\n\n\tcfg := &Config{\n\t\tHost: \"0.0.0.0\",\n\t\tPort: 8080,\n\t\tAllowEmptyClientSecret: false,\n\t\tScopes: []string{\"openid\", \"profile\", \"email\", \"offline_access\"},\n\t\tUsernameClaim: \"nickname\",\n\t\tEmailClaim: \"\",\n\t\tServeTLS: false,\n\t\tCertFile: \"/etc/gangway/tls/tls.crt\",\n\t\tKeyFile: \"/etc/gangway/tls/tls.key\",\n\t\tClusterCAPath: \"/var/run/secrets/kubernetes.io/serviceaccount/ca.crt\",\n\t\tHTTPPath: \"\",\n\t}\n\n\tif configFile != \"\" {\n\t\tdata, err := ioutil.ReadFile(configFile)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\terr = yaml.Unmarshal([]byte(data), cfg)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\terr := envconfig.Process(\"gangway\", cfg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = cfg.Validate()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Check for trailing slash on HTTPPath and remove\n\tcfg.HTTPPath = strings.TrimRight(cfg.HTTPPath, \"/\")\n\n\treturn cfg, nil\n}", "func NewConfig(path string) meta.Config {\n\treturn meta.Config{\n\t\tDir: path,\n\t\tHostname: \"localhost\",\n\t\tBindAddress: \"127.0.0.1:0\",\n\t\tHeartbeatTimeout: toml.Duration(500 * time.Millisecond),\n\t\tElectionTimeout: toml.Duration(500 * time.Millisecond),\n\t\tLeaderLeaseTimeout: toml.Duration(500 * time.Millisecond),\n\t\tCommitTimeout: toml.Duration(5 * time.Millisecond),\n\t}\n}", "func NewConfig() *Config {\n return &Config {\n UseTLS: true,\n Server: \"pop.gmail.com\",\n Port: 995,\n }\n}", "func NewConfig() Config {\n\tc := Config{}\n\tc.Forwards = map[string][]PortMappings{}\n\treturn c\n}", "func NewConfig() *config {\n\treturn &config{}\n}", "func (b BackendType) NewConfig() interface{} {\n\tswitch b {\n\tcase EtcdV2:\n\t\treturn &etcd.EtcdConfig{}\n\tdefault:\n\t\tlog.Errorf(\"Unknown backend type: %v\", b)\n\t\treturn nil\n\t}\n}", "func NewConfig(base string) (config *Config) {\n\tconfig = &Config{}\n\tconfig.Base = base\n\tconfig.Entries = make(map[string]ConfigEntry)\n\treturn\n}", "func NewConfig() *Config {\n\treturn &Config{\n\t\tNumberSuperNodes: defaultNumberSuperNodes,\n\n\t\tconnectToNextNodeDelay: connectToNextNodeDelay,\n\t\tacceptNodesTimeout: acceptNodesTimeout,\n\t\tconnectTimeout: connectTimeout,\n\t}\n}", "func NewConfig() Config {\n\treturn Config{\n\t\tserver: endpoint.NewServerAddr(SERVER_NAME),\n\t\ttweetDB: endpoint.NewPGConfig(TWEET_DB_NAME),\n\t\tappDB: endpoint.NewPGConfig(APP_DB_NAME),\n\t\taccessToken: getAccessToken(),\n\t\ttokenValidTo: getTokenValidTo(),\n\t}\n}", "func New() *Config {\n\treturn &Config{ Modules: []*Module{} }\n}", "func New() *Config {\n\tvar conf Config\n\tappName := \"live-config\"\n\n\terr := envconfig.Process(appName, &conf)\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn &conf\n}", "func NewConfig() Config {\n\treturn Config{\n\t\tData: tsdb.NewConfig(),\n\t\tRetentionService: retention.NewConfig(),\n\t\tPrecreatorConfig: precreator.NewConfig(),\n\t}\n}", "func NewConfig() *Config {\n\tcfg := &Config{}\n\tcfg.flagSet = flag.NewFlagSet(\"heartbeat-bench\", flag.ContinueOnError)\n\tfs := cfg.flagSet\n\tfs.ParseErrorsWhitelist.UnknownFlags = true\n\tfs.StringVar(&cfg.configFile, \"config\", \"\", \"config file\")\n\tfs.StringVar(&cfg.PDAddr, \"pd\", \"http://127.0.0.1:2379\", \"pd address\")\n\tfs.StringVar(&cfg.StatusAddr, \"status-addr\", \"http://127.0.0.1:20180\", \"status address\")\n\n\treturn cfg\n}", "func New(config Config) *Server {\n\treturn &Server{\n\t\tconfig: config,\n\t\tregistrars: make([]Registration, 0, 1),\n\t}\n}", "func NewConfig() Config {\n\treturn Config{\n\t\tLabel: \"\",\n\t\tType: \"stdout\",\n\t\tAMQP09: NewAMQPConfig(),\n\t\tAMQP1: NewAMQP1Config(),\n\t\tAWSDynamoDB: NewDynamoDBConfig(),\n\t\tAWSKinesis: NewKinesisConfig(),\n\t\tAWSKinesisFirehose: NewKinesisFirehoseConfig(),\n\t\tAWSS3: NewAmazonS3Config(),\n\t\tAWSSNS: NewSNSConfig(),\n\t\tAWSSQS: NewAmazonSQSConfig(),\n\t\tAzureBlobStorage: NewAzureBlobStorageConfig(),\n\t\tAzureQueueStorage: NewAzureQueueStorageConfig(),\n\t\tAzureTableStorage: NewAzureTableStorageConfig(),\n\t\tBroker: NewBrokerConfig(),\n\t\tCache: NewCacheConfig(),\n\t\tCassandra: NewCassandraConfig(),\n\t\tDrop: NewDropConfig(),\n\t\tDropOn: NewDropOnConfig(),\n\t\tDynamic: NewDynamicConfig(),\n\t\tElasticsearch: NewElasticsearchConfig(),\n\t\tFallback: NewTryConfig(),\n\t\tFile: NewFileConfig(),\n\t\tGCPCloudStorage: NewGCPCloudStorageConfig(),\n\t\tGCPPubSub: NewGCPPubSubConfig(),\n\t\tHDFS: NewHDFSConfig(),\n\t\tHTTPClient: NewHTTPClientConfig(),\n\t\tHTTPServer: NewHTTPServerConfig(),\n\t\tInproc: \"\",\n\t\tKafka: NewKafkaConfig(),\n\t\tMQTT: NewMQTTConfig(),\n\t\tMongoDB: NewMongoDBConfig(),\n\t\tNanomsg: NewNanomsgConfig(),\n\t\tNATS: NewNATSConfig(),\n\t\tNATSStream: NewNATSStreamConfig(),\n\t\tNSQ: NewNSQConfig(),\n\t\tPlugin: nil,\n\t\tRedisHash: NewRedisHashConfig(),\n\t\tRedisList: NewRedisListConfig(),\n\t\tRedisPubSub: NewRedisPubSubConfig(),\n\t\tRedisStreams: NewRedisStreamsConfig(),\n\t\tReject: \"\",\n\t\tResource: \"\",\n\t\tRetry: NewRetryConfig(),\n\t\tSFTP: NewSFTPConfig(),\n\t\tSTDOUT: NewSTDOUTConfig(),\n\t\tSubprocess: NewSubprocessConfig(),\n\t\tSwitch: NewSwitchConfig(),\n\t\tSyncResponse: struct{}{},\n\t\tSocket: NewSocketConfig(),\n\t\tWebsocket: NewWebsocketConfig(),\n\t\tProcessors: []processor.Config{},\n\t}\n}", "func NewConfig() Config {\n\treturn Config{\n\t\tLabel: \"\",\n\t\tType: \"stdout\",\n\t\tAMQP09: NewAMQPConfig(),\n\t\tAMQP1: NewAMQP1Config(),\n\t\tAWSDynamoDB: NewDynamoDBConfig(),\n\t\tAWSKinesis: NewKinesisConfig(),\n\t\tAWSKinesisFirehose: NewKinesisFirehoseConfig(),\n\t\tAWSS3: NewAmazonS3Config(),\n\t\tAWSSNS: NewSNSConfig(),\n\t\tAWSSQS: NewAmazonSQSConfig(),\n\t\tAzureBlobStorage: NewAzureBlobStorageConfig(),\n\t\tAzureQueueStorage: NewAzureQueueStorageConfig(),\n\t\tAzureTableStorage: NewAzureTableStorageConfig(),\n\t\tBroker: NewBrokerConfig(),\n\t\tCache: NewCacheConfig(),\n\t\tCassandra: NewCassandraConfig(),\n\t\tDrop: NewDropConfig(),\n\t\tDropOn: NewDropOnConfig(),\n\t\tDynamic: NewDynamicConfig(),\n\t\tElasticsearch: NewElasticsearchConfig(),\n\t\tFallback: NewTryConfig(),\n\t\tFile: NewFileConfig(),\n\t\tGCPCloudStorage: NewGCPCloudStorageConfig(),\n\t\tGCPPubSub: NewGCPPubSubConfig(),\n\t\tHDFS: NewHDFSConfig(),\n\t\tHTTPClient: NewHTTPClientConfig(),\n\t\tHTTPServer: NewHTTPServerConfig(),\n\t\tInproc: \"\",\n\t\tKafka: NewKafkaConfig(),\n\t\tMQTT: NewMQTTConfig(),\n\t\tMongoDB: NewMongoDBConfig(),\n\t\tNanomsg: NewNanomsgConfig(),\n\t\tNATS: NewNATSConfig(),\n\t\tNATSStream: NewNATSStreamConfig(),\n\t\tNSQ: NewNSQConfig(),\n\t\tPlugin: nil,\n\t\tRedisHash: NewRedisHashConfig(),\n\t\tRedisList: NewRedisListConfig(),\n\t\tRedisPubSub: NewRedisPubSubConfig(),\n\t\tRedisStreams: NewRedisStreamsConfig(),\n\t\tReject: \"\",\n\t\tResource: \"\",\n\t\tRetry: NewRetryConfig(),\n\t\tSFTP: NewSFTPConfig(),\n\t\tSTDOUT: NewSTDOUTConfig(),\n\t\tSubprocess: NewSubprocessConfig(),\n\t\tSwitch: NewSwitchConfig(),\n\t\tSyncResponse: struct{}{},\n\t\tSocket: NewSocketConfig(),\n\t\tWebsocket: NewWebsocketConfig(),\n\t\tProcessors: []processor.Config{},\n\t}\n}", "func NewConfig() Config {\n\treturn Config{\n\t\tBindAddress: DefaultBindAddress,\n\t\tDatabase: DefaultDatabase,\n\t\tRetentionPolicy: DefaultRetentionPolicy,\n\t\tConsistencyLevel: DefaultConsistencyLevel,\n\t\tTLSEnabled: false,\n\t\tCertificate: DefaultCertificate,\n\t\tBatchSize: DefaultBatchSize,\n\t\tBatchPending: DefaultBatchPending,\n\t\tBatchTimeout: toml.Duration(DefaultBatchTimeout),\n\t\tLogPointErrors: true,\n\t}\n}", "func NewConfig() Config {\n\treturn Config{\n\t\tBindAddress: DefaultBindAddress,\n\t\tHTTPSEnabled: false,\n\t\tHTTPSCertificate: \"/etc/ssl/influxdb.pem\",\n\t}\n}", "func NewConfig(cmd *cobra.Command, prefix string) *Config {\n\tloadingRules := clientcmd.NewDefaultClientConfigLoadingRules()\n\toverrides := &clientcmd.ConfigOverrides{}\n\tcfg := &Config{\n\t\tloadingRules: loadingRules,\n\t\toverrides: overrides,\n\t}\n\tcmd.PersistentFlags().StringVar(&loadingRules.ExplicitPath, prefix+\"kubeconfig\", \"\", \"Path to a kubeconfig file. Alternative to env var $KUBECONFIG.\")\n\tcmd.PersistentFlags().IntVar(&cfg.qps, prefix+\"client-qps\", 0, \"QPS to use for K8s client, 0 for default\")\n\tcmd.PersistentFlags().IntVar(&cfg.burst, prefix+\"client-burst\", 0, \"Burst to use for K8s client, 0 for default\")\n\tcmd.PersistentFlags().Int64Var(&cfg.ListPageSize, prefix+\"list-page-size\", 1000, \"Maximum number of responses per page to return for a list call. 0 for no limit\")\n\tclientcmd.BindOverrideFlags(overrides, cmd.PersistentFlags(), clientcmd.ConfigOverrideFlags{\n\t\tAuthOverrideFlags: clientcmd.RecommendedAuthOverrideFlags(prefix),\n\t\tTimeout: clientcmd.FlagInfo{\n\t\t\tLongName: prefix + clientcmd.FlagTimeout,\n\t\t\tDefault: \"0\",\n\t\t\tDescription: \"The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests.\"},\n\t})\n\treturn cfg\n}", "func NewConfig() *Config {\n\treturn &Config{\n\t\tEnabled: DefaultEnabled,\n\t\tBindAddress: DefaultBindAddress,\n\t}\n}", "func Config() ServerConfig {\n\treturn defaultServerConfig\n}", "func NewConfig() *Config {\n\treturn &Config{\n\t\tBindAddr: \":9999\",\n\t\tDatabaseURL: \"mongodb://test:33333\",\n\t\tLogLevel: \"debug\",\n\t\tSecretKey: \"simple_secret\",\n\t}\n}", "func NewConfig() Config {\n\treturn Config{\n\t\t0.0, 0.0,\n\t\t4.0, 4.0,\n\t\t1000, 1000,\n\t\t512,\n\t\t\"ramp.json\",\n\t\t\"default.gob\",\n\t\t\"output.jpg\",\n\t\t\"000000\",\n\t\t0.0, 0.0}\n}", "func New() *Config {\n\treturn &Config{\n\t\tMode: ModeDevelopment,\n\t\tconfigs: make([]map[string]string, 3),\n\t}\n}", "func New() *Config {\n\tc := &Config{\n\t\tAgent: &AgentConfig{\n\t\t\tEventReceiverCount: 5,\n\t\t\tEventQueueLimit: 50,\n\t\t\tHealthCheckPort: 10240,\n\t\t\tLogLevel: \"info\",\n\t\t},\n\t\tPlugins: make([]*pluginrunner.PluginRunner, 0),\n\t\tEventKinds: make(map[events.EventKind]bool),\n\t}\n\treturn c\n}", "func New() *Config {\n\treturn &Config{\n\t\tWebsite: Website{\n\t\t\tURL: \"localhost\",\n\t\t\tHTTPPort: \":80\",\n\t\t\tHTTPSPort: \":443\",\n\t\t\tCert: \"cert.pem\",\n\t\t\tKey: \"key.pem\",\n\t\t\tDirectory: \"app/public\",\n\t\t},\n\t}\n}", "func NewConfig(name string, client kubernetes.Interface) *Config {\n\treturn &Config{\n\t\tName: name,\n\t\tClient: client,\n\t}\n}", "func NewConfig(listen, raftDir, raftPort, nodeID string, join string) *Config {\n\treturn &Config{\n\t\tListenAddress: listen,\n\t\tRaftDir: raftDir,\n\t\tRaftPort: raftPort,\n\t\tJoin: join,\n\t\tNodeID: nodeID,\n\t}\n}", "func NewConfig(appName string) *Config {\n\tref := uuid.New()\n\n\treturn &Config{\n\t\treference: ref.String(),\n\t\tlevel: INFO,\n\t\tfilePathSize: SHORT,\n\t\tappName: appName,\n\t}\n}", "func NewConfig(config CConfig) *Config {\n\treturn &Config{config: config, nsPath: make([]byte, 1)}\n}", "func NewConfig(config CConfig) *Config {\n\treturn &Config{config: config, nsPath: make([]byte, 1)}\n}" ]
[ "0.714636", "0.6788078", "0.672954", "0.671221", "0.64953154", "0.6477215", "0.63846946", "0.6384149", "0.6354265", "0.6305003", "0.6282221", "0.6247137", "0.6234308", "0.622695", "0.61695105", "0.61501217", "0.61187845", "0.6103186", "0.60942477", "0.60919195", "0.60896236", "0.6078594", "0.60761386", "0.606631", "0.6040402", "0.6036451", "0.60340255", "0.6033639", "0.6032754", "0.6030515", "0.6009645", "0.6004144", "0.6001237", "0.59950656", "0.5991804", "0.59789324", "0.5978804", "0.59745854", "0.59668", "0.59428036", "0.59403765", "0.5939231", "0.5896968", "0.5883934", "0.5883934", "0.5878915", "0.5878915", "0.5875375", "0.58710706", "0.5869476", "0.5869476", "0.5865614", "0.58646166", "0.5861209", "0.58516765", "0.58507895", "0.58442956", "0.584127", "0.5840217", "0.58398837", "0.58367145", "0.58346736", "0.5828175", "0.58260715", "0.58189964", "0.5813579", "0.5801252", "0.5786383", "0.57817996", "0.5780594", "0.5778709", "0.57767135", "0.57742625", "0.5772214", "0.5765183", "0.5765056", "0.5764749", "0.57550275", "0.57475454", "0.57388926", "0.5735557", "0.573144", "0.57269466", "0.5721914", "0.5721914", "0.572101", "0.5703873", "0.5701076", "0.57000864", "0.56995106", "0.5695812", "0.5695323", "0.56924343", "0.56880087", "0.5684519", "0.5683569", "0.56671095", "0.56655794", "0.56610155", "0.56610155" ]
0.7364699
0
SetHost accepts a string in the form of and sets this as URL in serverConfig.
func (co *serverConfig) SetURL(URL string) ServerConfigBuilder { co.URL = URL return co }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (_this *URL) SetHost(value string) {\n\tinput := value\n\t_this.Value_JS.Set(\"host\", input)\n}", "func SetHost(v string) {\n\traw.Host = v\n}", "func (b *Binary) SetHost(host string) {\n\tif host == \"\" {\n\t\treturn\n\t}\n\tu, err := url.Parse(host)\n\tif err != nil {\n\t\treturn\n\t}\n\tu.Path = b.url.Path\n\tb.url = u\n}", "func SetServerHost(s string) func(*Server) error {\n\treturn func(c *Server) error {\n\t\tc.host = s\n\t\treturn nil\n\t}\n}", "func (sn *SelfNode) SetHostURL(address string, port int) {\n\tif address == \"\" {\n\t\taddress = \"localhost\"\n\t}\n\tsn.URL = fmt.Sprintf(\"http://%v:%v\", address, port)\n}", "func SetHost(s string) func(*Manager) error {\n\treturn func(c *Manager) error {\n\t\tc.samhost = s\n\t\treturn nil\n\t}\n}", "func Host(host string) func(*Config) error {\n\treturn func(c *Config) error {\n\t\tc.Host = host\n\t\treturn nil\n\t}\n}", "func (e *Executor) SetHost(v string) {\n\t// return if Executor type is nil\n\tif e == nil {\n\t\treturn\n\t}\n\n\te.Host = &v\n}", "func (h *Host) SetAdress(a string) {\n}", "func (c *Config) Host(host string) *Config {\n\tif c.context != nil {\n\t\tc.context.Host = host\n\t}\n\treturn c\n}", "func (h *RequestHeader) SetHost(host string) {\n\th.host = append(h.host[:0], host...)\n}", "func Host(host string) func(*Proxy) {\n\treturn func(r *Proxy) {\n\t\tr.host = host\n\t}\n}", "func SetHost(host string) ConfigOption {\n\treturn func(opt *ConfigOptions) error {\n\t\topt.host = host\n\t\treturn nil\n\t}\n}", "func (c *Client) SetServerHost(host string, port int) {\n\tc.serverAddr = net.JoinHostPort(host, strconv.Itoa(port))\n}", "func WithHost(host string) Option {\n\treturn func(c *Client) error {\n\t\tc.transport.URL = transport.DefaultURL\n\t\tu, err := url.Parse(host)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif u.Scheme != \"\" {\n\t\t\tc.transport.URL.Scheme = u.Scheme\n\t\t}\n\t\tc.transport.URL.Host = u.Host\n\t\tif !strings.Contains(c.transport.URL.Host, \":\") {\n\t\t\tc.transport.URL.Host += \":\" + string(transport.DefaultPort)\n\t\t}\n\t\treturn nil\n\t}\n}", "func Host(h string) {\n\thost = h\n}", "func (this *HostAddress) SetHost(host string) {\n\tthis.host = host\n}", "func Host(pathWithPort string) ConfigOption {\n\treturn func(c *Config) error {\n\t\th := strings.Split(pathWithPort, \":\")\n\t\tif len(h) != 2 {\n\t\t\treturn fmt.Errorf(\"unknown host, use host:port\")\n\t\t}\n\t\tc.host = h[0]\n\t\tc.port = h[1]\n\t\treturn nil\n\t}\n}", "func (u SysDBUpdater) SetHost(host string) SysDBUpdater {\n\tu.fields[string(SysDBDBSchema.Host)] = host\n\treturn u\n}", "func WithHost(p string) Option {\n\treturn func(o *options) {\n\t\to.host = p\n\t}\n}", "func (c *TLSConn) SetHost(host string) {\n\tc.host = host\n}", "func SetSocksHost(s string) func(*Manager) error {\n\treturn func(c *Manager) error {\n\t\tc.host = s\n\t\treturn nil\n\t}\n}", "func Host(host string) Option {\n\treturn func(o *Options) error {\n\t\to.Host = host\n\t\treturn nil\n\t}\n}", "func WithHost(host string) Option {\n\treturn func(c *gate.Configuration) {\n\t\tc.Host = host\n\t}\n}", "func WithHost(host string) ConfigOption {\n\treturn func(cfg *Config) {\n\t\tcfg.Host = host\n\t}\n}", "func (c *Conn) SetHostName(property string, value string) error {\n\terr := c.object.Call(dbusInterface+\".\"+property, 0, value, false).Err\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to set hostname: %v\", err)\n\t}\n\n\treturn nil\n}", "func (c *Cluster) Set(host, forward string) {\n\tproxy := &httputil.ReverseProxy{\n\t\tDirector: func(r *http.Request) {\n\t\t\tr.URL.Scheme = \"http\"\n\t\t\tr.URL.Host = forward\n\t\t},\n\t\tErrorHandler: func(w http.ResponseWriter, r *http.Request, err error) {\n\t\t\tw.WriteHeader(http.StatusBadGateway)\n\t\t\t_, _ = w.Write([]byte(errors.Cause(err).Error()))\n\t\t},\n\t}\n\n\tc.proxiesLock.Lock()\n\tdefer c.proxiesLock.Unlock()\n\n\tc.proxies[host] = proxy\n}", "func (r *Request) SetHost(host string) {\n\tr.request.Host = host\n}", "func Host(host string) Option {\n\treturn func(o *Options) {\n\t\to.host = host\n\t}\n}", "func WithHost(host string) ConfigOption {\n\treturn func(c *Config) {\n\t\tc.host = host\n\t}\n}", "func (n *Namespace) SetHostName(hostname string, cmd *exec.Cmd) {\n\tsyscall.Sethostname([]byte(hostname))\n}", "func (m *SiteCollection) SetHostname(value *string)() {\n m.hostname = value\n}", "func (_this *URL) SetHostname(value string) {\n\tinput := value\n\t_this.Value_JS.Set(\"hostname\", input)\n}", "func (c *tomlConfig) Host() string {\n\tparts := strings.Split(c.Server.HTTPAddress, \":\")\n\thost := c.Server.Host\n\tif len(parts) > 1 {\n\t\thost = host + \":\" + parts[len(parts)-1]\n\t}\n\treturn host\n}", "func WithHost(host string) Opt {\n\treturn func(c *Client) error {\n\t\thostURL, err := ParseHostURL(host)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tc.host = host\n\t\tc.proto = hostURL.Scheme\n\t\tc.addr = hostURL.Host\n\t\tc.basePath = hostURL.Path\n\t\tif transport, ok := c.client.Transport.(*http.Transport); ok {\n\t\t\treturn sockets.ConfigureTransport(transport, c.proto, c.addr)\n\t\t}\n\t\treturn errors.Errorf(\"cannot apply host to transport: %T\", c.client.Transport)\n\t}\n}", "func (syncHdler *SyncHandler) SetServerURL(url string) {\n\tsyncHdler.hdler.SetServerURL(url)\n}", "func SetHostname(h string) {\n\thostname = h\n}", "func (c *config) WithHost(host string) Config {\n\tc.host = host\n\treturn c\n}", "func (o *LogContent) SetHost(v string) {\n\to.Host = &v\n}", "func (mt *mockTokenBuilder) SetHostname(h string) {\n\t//TODO some mocking\n}", "func (o *V1VirusDatasetRequest) SetHost(v string) {\n\to.Host = &v\n}", "func SetURL(co *ConfigOption) error {\n\turlString := viper.GetString(co.Name)\n\tif urlString != \"\" {\n\t\turlType, err := url.Parse(urlString)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Unable to parse URL: %s/%v\", urlString, err)\n\t\t}\n\t\t*(co.ConfigKey.(**url.URL)) = urlType\n\t}\n\treturn nil\n}", "func (pel *PgEventListener) SetHost(host string) *PgEventListener {\n\tpel._host = host\n\treturn pel\n}", "func Host(h string) PinningOption {\n\treturn func(o *Pinning) {\n\t\to.Host = h\n\t}\n}", "func (m *LogicAppTriggerEndpointConfiguration) SetUrl(value *string)() {\n err := m.GetBackingStore().Set(\"url\", value)\n if err != nil {\n panic(err)\n }\n}", "func (s *HTTPSet) replaceHost(rawurl string) (string, error) {\n\tu, err := url.Parse(rawurl)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\thost, err := s.RotateEndpoint()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tu.Host = host\n\tif u.Scheme == \"\" {\n\t\tif s.UseHTTPS {\n\t\t\tu.Scheme = \"https\"\n\t\t} else {\n\t\t\tu.Scheme = \"http\"\n\t\t}\n\t}\n\n\treturn u.String(), nil\n}", "func (f *FastURL) SetHostname(hostname string) {\n\tf.hostname = append(f.hostname[:0], hostname...)\n}", "func (o GithubEnterpriseConfigOutput) HostUrl() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *GithubEnterpriseConfig) pulumi.StringOutput { return v.HostUrl }).(pulumi.StringOutput)\n}", "func (hb *HostBlueprint) SetHostName(hostName string) {\n\thb.SetElement(\"HOSTNAME\", hostName)\n}", "func WithHost(ctx context.Context, v string) context.Context {\n\treturn context.WithValue(ctx, keyHost, v)\n}", "func Host(host string) func(*Locker) error {\n\treturn func(l *Locker) error {\n\t\tl.Host = host\n\t\treturn nil\n\t}\n}", "func (o *V2TcpConfiguration) SetHost(v string) {\n\to.Host = &v\n}", "func WithHost(host string) ClientOption {\n\treturn func(cfg *clientConfig) {\n\t\tcfg.host = host\n\t}\n}", "func (r *Router) Host(hostpattern string) *Router {\n\tr.host = hostpattern\n\tr.hostrm.Register(hostpattern)\n\treturn r\n}", "func (cl *DoHClient) SetRemoteAddr(addr string) (err error) {\n\tcl.addr, err = url.Parse(addr)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tcl.query = cl.addr.Query()\n\n\treturn\n}", "func PassHostHeader(b bool) optSetter {\n\treturn func(f *Forwarder) error {\n\t\tf.httpForwarder.passHost = b\n\t\treturn nil\n\t}\n}", "func (s *SlackConfig) SetAPIhost(h string) {\n\ts.APIHost = h\n}", "func (c *Client) ProxyHost(host string) *Client {\n\t_h := strings.SplitN(host, \":\", 2)\n\tif len(_h) == 1 {\n\t\thost = fmt.Sprintf(\"%s:%d\", host, 22)\n\t}\n\tc.proxyHost = host\n\treturn c\n}", "func WithHost(host string) InstanceOpt {\n\treturn func(i *Instance) error {\n\t\ti.host = host\n\t\treturn nil\n\t}\n}", "func (cfg *Config) SetRemoteHost(remoteHost string) {\n\tcfg.RemoteHost = remoteHost\n}", "func WithHost(host string) Preparer {\n\treturn func(r *http.Request) *http.Request {\n\t\tr.Host = host\n\t\treturn r\n\t}\n}", "func (s *Server) SetHTTPSPort(port ...int) {\n\tif len(port) > 0 {\n\t\ts.config.HTTPSAddr = \"\"\n\t\tfor _, v := range port {\n\t\t\tif len(s.config.HTTPSAddr) > 0 {\n\t\t\t\ts.config.HTTPSAddr += \",\"\n\t\t\t}\n\t\t\ts.config.HTTPSAddr += \":\" + strconv.Itoa(v)\n\t\t}\n\t}\n}", "func (c *Direct) SetHostinfo(hi *tailcfg.Hostinfo) bool {\n\tif hi == nil {\n\t\tpanic(\"nil Hostinfo\")\n\t}\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\tif hi.Equal(c.hostinfo) {\n\t\treturn false\n\t}\n\tc.hostinfo = hi.Clone()\n\tj, _ := json.Marshal(c.hostinfo)\n\tc.logf(\"HostInfo: %s\", j)\n\treturn true\n}", "func WithHost(host string) Option {\n\treturn func(c *config) {\n\t\tc.options.Collector.Host = host\n\t\tc.options.SystemMetrics.Endpoint.Host = host\n\t}\n}", "func (c *client) Host() string {\n\treturn c.cfg.GetURL().String()\n}", "func SetAllowedHosts(allowed []string) {\n\tDefaultDialer.SetAllowedHosts(allowed)\n}", "func SetAddress(addr string) ServerOptionFunc {\n\treturn func(s *Server) error {\n\t\ts.address = addr\n\t\treturn nil\n\t}\n}", "func HostKey(key string) Option {\n\treturn func(ec *Envcfg) {\n\t\tec.hostKey = key\n\t}\n}", "func (j *Jail) SetHostname(hostname string) error {\n\tjpps := jailParamList{}\n\tdefer jpps.release()\n\n\tparams := map[string]interface{}{\n\t\t\"jid\": &j.jid,\n\t\t\"host.hostname\": hostname,\n\t}\n\n\tif er := jpps.bindParameters(params); er != nil {\n\t\treturn er\n\t}\n\n\tif _, er := C.jailparam_set(&jpps.params[0], jpps.numParams(), C.JAIL_UPDATE); er != nil {\n\t\treturn er\n\t}\n\n\tj.hostname = hostname\n\n\treturn nil\n}", "func (t *TargetCollection) SetTarget(originalHost string, target Target) {\n\tif originalHost == \"\" || target == nil {\n\t\treturn\n\t}\n\n\tt.mux.Lock()\n\tdefer t.mux.Unlock()\n\n\tt.entries[originalHost] = target\n}", "func WithBaseURL(s string) ClientOptionFunc {\n\treturn func(c *Client) error {\n\t\tc.http.SetHostURL(s)\n\t\treturn nil\n\t}\n}", "func WithHost(host string) ClientOption {\n\treturn func(options *clientOptions) {\n\t\toptions.host = host\n\t}\n}", "func SetAddr(addr string) {\n\tcfg.Listen = addr\n}", "func (r *HandlerRouteItem) URLHost(pairs ...string) (*url.URL, error) {\r\n\tif r.err != nil {\r\n\t\treturn nil, r.err\r\n\t}\r\n\tif r.regexp == nil || r.regexp.host == nil {\r\n\t\treturn nil, errors.New(\"mux: route doesn't have a host\")\r\n\t}\r\n\thost, err := r.regexp.host.url(pairs...)\r\n\tif err != nil {\r\n\t\treturn nil, err\r\n\t}\r\n\treturn &url.URL{\r\n\t\tScheme: \"http\",\r\n\t\tHost: host,\r\n\t}, nil\r\n}", "func WithHost(host string) PostgresConfigOption {\n\treturn func(o *PostgresConfig) {\n\t\to.Host = host\n\t}\n}", "func (impl *ldapAuthImpl) SetLDAPServerHost(ldapServerHost string) {\n\timpl.Lock()\n\tdefer impl.Unlock()\n\n\tif ldapServerHost != impl.ldapServerHost {\n\t\timpl.ldapServerHost = ldapServerHost\n\t}\n}", "func (r *Router) Host(hosts ...string) *Router {\n\tc := r.route.clone()\n\tc.Hosts = hosts\n\treturn r.clone(c)\n}", "func (r *Route) URLHost(pairs ...string) (*url.URL, error) {\n\tif r.err != nil {\n\t\treturn nil, r.err\n\t}\n\tif r.regexp.host == nil {\n\t\treturn nil, errors.New(\"mux: route doen't have a host\")\n\t}\n\tvalues, err := r.prepareVars(pairs...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\thost, err := r.regexp.host.url(values)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tu := &url.URL{\n\t\tScheme: \"http\",\n\t\tHost: host,\n\t}\n\tif r.buildScheme != \"\" {\n\t\tu.Scheme = r.buildScheme\n\t}\n\treturn u, nil\n}", "func (p *PerHost) AddHost(host string) {\n\tif strings.HasSuffix(host, \".\") {\n\t\thost = host[:len(host)-1]\n\t}\n\tp.bypassHosts = append(p.bypassHosts, host)\n}", "func (uri *URI) ChangeHost(hostWithPort string) {\n\tif uri.hostInfo.hostWithPort == hostWithPort {\n\t\treturn\n\t}\n\tvar newRawURI []byte\n\tif len(uri.host) == 0 {\n\t\t// not host in URI before, add it\n\t\tnewRawURI = []byte(hostWithPort)\n\t\tif len(uri.full) == 0 || (len(uri.full) > 0 && uri.full[0] != '/') {\n\t\t\tnewRawURI = append(newRawURI, '/')\n\t\t}\n\t\tnewRawURI = append(newRawURI, uri.full...)\n\t} else if hostIndex := bytes.Index(uri.full, uri.host); hostIndex >= 0 {\n\t\tif len(hostWithPort) == 0 {\n\t\t\tnewRawURI = uri.full[hostIndex+len(uri.host):]\n\t\t} else {\n\t\t\t// host already in URI, replace it\n\t\t\tnewRawURI = bytes.Replace(uri.full, uri.host, []byte(hostWithPort), 1)\n\t\t}\n\t}\n\tif len(newRawURI) == 0 {\n\t\tnewRawURI = []byte(\"/\")\n\t}\n\turi.Parse(uri.isConnect, newRawURI)\n}", "func (c ServerConfig) HTTPAddress() string {\n\thttp := \"\"\n\tif c.HostName != \"\" {\n\t\thttp = \"http://\" + c.HostName\n\t}\n\tif c.HTTPPort != \"\" {\n\t\thttp = http + \":\" + c.HTTPPort\n\t}\n\treturn http\n}", "func (c *Config) Userhost(userhost string) *Config {\n\tc.GetContext().Userhost = userhost\n\treturn c\n}", "func SetUrl(rawurl string) OptionFunc {\n\treturn func(c *Client) error {\n\t\tif rawurl == \"\" {\n\t\t\trawurl = DefaultURL\n\t\t}\n\t\tu, err := url.Parse(rawurl)\n\t\tif err != nil {\n\t\t\tc.errorf(\"Invalid URL [%s] - %v\\n\", rawurl, err)\n\t\t\treturn err\n\t\t}\n\t\tif u.Scheme != \"http\" && u.Scheme != \"https\" {\n\t\t\terr := Error{\"bad_url\", fmt.Sprintf(\"Invalid schema specified [%s]\", rawurl)}\n\t\t\tc.errorf(\"%v\", err)\n\t\t\treturn err\n\t\t}\n\t\tc.url = rawurl\n\t\tif !strings.HasSuffix(c.url, \"/\") {\n\t\t\tc.url += \"/\"\n\t\t}\n\t\treturn nil\n\t}\n}", "func (v *Client) Host() string {\n\tif v.host == \"\" {\n\t\tv.host = v.req.Host\n\t}\n\treturn v.host\n}", "func (ctx *Context) HostWithPort() string {\r\n\tif ctx.R.Host != \"\" {\r\n\t\tif strings.Contains(ctx.R.Host, \":\") {\r\n\t\t\treturn ctx.R.Host\r\n\t\t}\r\n\t\treturn ctx.R.Host + \":80\"\r\n\t}\r\n\treturn \"localhost:80\"\r\n}", "func (r *Route) Host(tpl string) *Route {\n\tr.err = r.addRegexpMatcher(tpl, regexpTypeHost)\n\treturn r\n}", "func (s *Switch) Add_host( host *string, vmid *string, port int ) {\n\tif s == nil {\n\t\treturn\n\t}\n\n\ts.hosts[*host] = true\n\ts.hport[*host] = port\n\ts.hvmid[*host] = vmid\n}", "func (o *IamLdapConfigParamsAllOf) SetConfigHost(v string) {\n\to.ConfigHost = &v\n}", "func DBHost(address string) DBOption {\n\treturn func(pm *param) *param {\n\t\tpm.Address = address\n\t\treturn pm\n\t}\n}", "func (s *Cli) SetNameServer(dnsServer string) (err error) {\n\t// run command: netsh interface ip set dnsservers name=KtConnectTunnel source=static address=8.8.8.8\n\tif _, _, err = util.RunAndWait(exec.Command(\"netsh\",\n\t\t\"interface\",\n\t\t\"ip\",\n\t\t\"set\",\n\t\t\"dnsservers\",\n\t\tfmt.Sprintf(\"name=%s\", util.TunNameWin),\n\t\t\"source=static\",\n\t\tfmt.Sprintf(\"address=%s\", strings.Split(dnsServer, \":\")[0]),\n\t)); err != nil {\n\t\tlog.Error().Msgf(\"Failed to set dns server of tun device\")\n\t\treturn err\n\t}\n\treturn nil\n}", "func (h *Host) SetUser(u string) {\n}", "func (client *LANHostConfigManagement1) SetDNSServer(NewDNSServers string) (err error) {\n\treturn client.SetDNSServerCtx(context.Background(),\n\t\tNewDNSServers,\n\t)\n}", "func SetURL(name, newurl, oldurl string) func(*types.Cmd) {\n\treturn func(g *types.Cmd) {\n\t\tg.AddOptions(\"set-url\")\n\t\tg.AddOptions(name)\n\t\tg.AddOptions(newurl)\n\t\tif oldurl != \"\" {\n\t\t\tg.AddOptions(oldurl)\n\t\t}\n\t}\n}", "func (h *Host) SetPort(p uint16) {\n}", "func configureServer(s *http.Server, scheme, addr string) {\n\n}", "func (m *TelecomExpenseManagementPartner) SetUrl(value *string)() {\n err := m.GetBackingStore().Set(\"url\", value)\n if err != nil {\n panic(err)\n }\n}", "func dbHost(port int, hosts ...string) string {\n\tURI, joinHosts := \"\", \"\"\n\tif len(hosts) > 1 {\n\t\tjoinHosts = strings.Join(hosts, \",\")\n\t} else {\n\t\tURI = hosts[0]\n\t}\n\tURI = fmt.Sprintf(\"mongodb://%s:%d\", joinHosts, port)\n\treturn URI\n}", "func (o UrlMapTestOutput) Host() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v UrlMapTest) *string { return v.Host }).(pulumi.StringPtrOutput)\n}", "func (s *Server) SetMaster(host string, config toolkit.M) error {\n\tif s.masterClient != nil {\n s.masterClient.Call(\"unfollow\", toolkit.M{}.Set(\"nodeid\", s.masterClient.Host), nil)\n\t\ts.masterClient.Close()\n\t}\n \n s.masterHost = host\n\tif host==\"\"{\n return nil \n }\n \n masterClient := NewClient(s.masterHost, config)\n\te := masterClient.Connect()\n\tif e != nil {\n\t\treturn errors.New(\"Server.SetMaster: \" + e.Error())\n\t}\n\n\te = masterClient.Call(\"follow\", toolkit.M{}.Set(\"nodeid\", s.Host), nil)\n\tif e != nil {\n\t\treturn errors.New(\"Server.SetMaster.Follow: \" + e.Error())\n\t}\n s.Log.AddLog(toolkit.Sprintf(\"Server %s is now following %s\", s.Host, host), \"INFO\") \t\n\treturn nil\n}", "func (me *TURLType) Set(s string) { (*xsdt.AnyURI)(me).Set(s) }", "func (s *StackExchangeConfig) SetAPIhost(h string) {\n\ts.APIHost = h\n}" ]
[ "0.77516806", "0.7434453", "0.7432567", "0.7276324", "0.71877426", "0.6935025", "0.68915313", "0.6835565", "0.67834437", "0.6763738", "0.6760979", "0.67476106", "0.67438185", "0.6711807", "0.66517454", "0.66216093", "0.6601068", "0.6472164", "0.64509356", "0.64432657", "0.6437333", "0.6403095", "0.63824177", "0.6369926", "0.6333308", "0.6329466", "0.6326212", "0.6308961", "0.62686545", "0.626023", "0.6182436", "0.61140823", "0.607415", "0.6030224", "0.60092974", "0.60041696", "0.59954435", "0.59813434", "0.5966139", "0.5944931", "0.58853656", "0.58596724", "0.58345664", "0.58198607", "0.5802402", "0.5747708", "0.5738709", "0.5711519", "0.5699313", "0.56844497", "0.56663483", "0.565013", "0.5647534", "0.5628684", "0.562029", "0.56023216", "0.55904865", "0.559042", "0.5569964", "0.55573094", "0.5556609", "0.55452156", "0.5520103", "0.55113834", "0.55091566", "0.5486648", "0.54804015", "0.547687", "0.54530483", "0.5445969", "0.5445431", "0.5439716", "0.5422963", "0.54133", "0.54112303", "0.54070705", "0.5399495", "0.5395206", "0.53767776", "0.53607506", "0.53566074", "0.5346048", "0.533784", "0.53363264", "0.53303313", "0.5327112", "0.52844095", "0.52814865", "0.52794486", "0.5262393", "0.52587265", "0.52586365", "0.5244176", "0.52382374", "0.5237514", "0.52321196", "0.5224553", "0.5221809", "0.5218231", "0.52150005", "0.52095" ]
0.0
-1
SetHost accepts an int and sets the retry count.
func (co *serverConfig) SetRetry(r int) ServerConfigBuilder { co.Retry = r return co }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (client *BaseClient) SetRetry(value int) {\n\tclient.retry = value\n}", "func (m *DeviceHealthAttestationState) SetRestartCount(value *int64)() {\n err := m.GetBackingStore().Set(\"restartCount\", value)\n if err != nil {\n panic(err)\n }\n}", "func (wp *WorkerPool[T]) SetNumShards(numShards int) {\n\tif numShards <= 1 {\n\t\tnumShards = 1\n\t}\n\n\tif numShards > maxShards {\n\t\tnumShards = maxShards\n\t}\n\n\twp.numShards = numShards\n}", "func (h *InputHost) SetHostConnLimit(connLimit int32) {\n\th.logger.WithField(`val`, connLimit).Info(`SetHostConnLimit`)\n\tatomic.StoreInt32(&h.hostConnLimit, connLimit)\n}", "func WithHostSelectionRetryAttempts(attempts int64) option {\n\treturn func(c *KubernetesConfigurator) {\n\t\tc.hostSelectionRetryAttempts = attempts\n\t}\n}", "func WithHostSelectionRetryAttempts(attempts int64) option {\n\treturn func(c *KubernetesConfigurator) {\n\t\tc.hostSelectionRetryAttempts = attempts\n\n\t}\n}", "func (h *Host) SetPort(p uint16) {\n}", "func (c *Client) SetMaxRetries(retries int) {\n\tc.modifyLock.RLock()\n\tdefer c.modifyLock.RUnlock()\n\tc.config.modifyLock.Lock()\n\tdefer c.config.modifyLock.Unlock()\n\n\tc.config.MaxRetries = retries\n}", "func (m *MockClient) SetMax404Retries(arg0 int) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"SetMax404Retries\", arg0)\n}", "func (c *Client) Settry(key string, value string, exp int32, tries int) {\n\ttry := 0\n\tvar err error\n\tfor try < tries {\n\t\terr = c.client.Set(&memcache.Item{Key: key, Value: []byte(value), Expiration: exp})\n\t\tif err == nil {\n\t\t\treturn\n\t\t}\n\t}\n\tfmt.Println(fmt.Sprintf(\"error seting key %v\", err))\n}", "func (m *CloudPcBulkActionSummary) SetFailedCount(value *int32)() {\n err := m.GetBackingStore().Set(\"failedCount\", value)\n if err != nil {\n panic(err)\n }\n}", "func (c *Cluster) Set(host, forward string) {\n\tproxy := &httputil.ReverseProxy{\n\t\tDirector: func(r *http.Request) {\n\t\t\tr.URL.Scheme = \"http\"\n\t\t\tr.URL.Host = forward\n\t\t},\n\t\tErrorHandler: func(w http.ResponseWriter, r *http.Request, err error) {\n\t\t\tw.WriteHeader(http.StatusBadGateway)\n\t\t\t_, _ = w.Write([]byte(errors.Cause(err).Error()))\n\t\t},\n\t}\n\n\tc.proxiesLock.Lock()\n\tdefer c.proxiesLock.Unlock()\n\n\tc.proxies[host] = proxy\n}", "func configureDiscoveryRetry() {\n\tdiscoveryRetry = defaultRetry\n\n\tif v := os.Getenv(registryDiscoveryRetryEnvName); v != \"\" {\n\t\tretry, err := strconv.Atoi(v)\n\t\tif err == nil && retry > 0 {\n\t\t\tdiscoveryRetry = retry\n\t\t}\n\t}\n}", "func (pl *Peerlist) ResetRetryTimes(addr string) {\n\tpl.lock.Lock()\n\tdefer pl.lock.Unlock()\n\n\tif _, ok := pl.peers[addr]; ok {\n\t\tpl.peers[addr].ResetRetryTimes()\n\t\tpl.peers[addr].Seen()\n\t}\n}", "func (pn *paxosNode) setUpAllConnections(numRetries int, hostMap map[int]string) error {\n\tidx := 0\n\tfor id, hostPort := range hostMap {\n\t\tattemptNum := 0\n\t\tfor ; attemptNum < numRetries; attemptNum++ {\n\t\t\tnode, err := rpc.DialHTTP(\"tcp\", hostPort)\n\t\t\tif err != nil {\n\t\t\t\ttime.Sleep(1 * time.Second)\n\t\t\t} else {\n\t\t\t\tif id != pn.srvId {\n\t\t\t\t\tpn.oneOtherNode = node\n\t\t\t\t}\n\t\t\t\tpn.nodes[idx] = node\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif attemptNum == numRetries {\n\t\t\treturn errors.New(\"Can't connect to node \" + hostPort)\n\t\t}\n\t\tidx++\n\t}\n\treturn nil\n}", "func (r *Listener) SetThreads(count uint32) {\n\tr.thCntLock.Lock()\n\tdefer r.thCntLock.Unlock()\n\n\tr.thMax = count\n}", "func SetHostHealthy(val float64) {\n\thostname, _ := os.Hostname()\n\tlabels := prometheus.Labels{\n\t\t\"hostname\": hostname,\n\t\t\"env\": os.Getenv(\"ENV\"),\n\t}\n\n\thostHealthy.With(labels).Set(val)\n}", "func RetryTimes(n uint) Option {\n\treturn func(rc *RetryConfig) {\n\t\trc.retryTimes = n\n\t}\n}", "func (options *CreateLoadBalancerMonitorOptions) SetRetries(retries int64) *CreateLoadBalancerMonitorOptions {\n\toptions.Retries = core.Int64Ptr(retries)\n\treturn options\n}", "func SetSocksHost(s string) func(*Manager) error {\n\treturn func(c *Manager) error {\n\t\tc.host = s\n\t\treturn nil\n\t}\n}", "func (b *taskBuilder) attempts(a int) {\n\tb.Spec.MaxAttempts = a\n}", "func SetHost(s string) func(*Manager) error {\n\treturn func(c *Manager) error {\n\t\tc.samhost = s\n\t\treturn nil\n\t}\n}", "func SetHost(v string) {\n\traw.Host = v\n}", "func (m *TeamsAsyncOperation) SetAttemptsCount(value *int32)() {\n m.attemptsCount = value\n}", "func Retries(count uint) OptionFunc {\n\treturn func(c *Component) error {\n\t\tc.retries = count\n\t\treturn nil\n\t}\n}", "func SetRetryParameters(maxAttempts int, maxGap int) {\n\tif maxAttempts > 0 {\n\t\tmaxRetryAttempt = maxAttempts\n\t}\n\n\tif maxGap > 0 {\n\t\tmaxRetryGap = maxGap\n\t}\n}", "func (c *TLSConn) SetHost(host string) {\n\tc.host = host\n}", "func (m *DomainDnsSrvRecord) SetPort(value *int32)() {\n err := m.GetBackingStore().Set(\"port\", value)\n if err != nil {\n panic(err)\n }\n}", "func (conn *extHost) setReplicaInfo(hostport storeHostPort, replicaConn *replicaConnection) {\n\tconn.streams[hostport] = &replicaInfo{\n\t\tconn: replicaConn,\n\t\tsendTimer: common.NewTimer(replicaSendTimeout),\n\t}\n}", "func (c *APIClient) updateRetry(retry string) {\r\n\tv, err := strconv.Atoi(retry)\r\n\tif err != nil {\r\n\t\t// retry value incorrect\r\n\t\tlog.Printf(\"Retry value incorrect %s\\n\", retry)\r\n\t}\r\n\tc.RateLimit.RetryAfter = v\r\n}", "func (s *scalerService) SetReplicas(ctx context.Context, serviceName string, count uint64) error {\n\n\tservice, _, err := s.c.ServiceInspectWithRaw(ctx, serviceName)\n\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"docker inspect failed in ScalerService\")\n\t}\n\n\tservice.Spec.Mode.Replicated.Replicas = &count\n\tupdateOpts := types.ServiceUpdateOptions{}\n\tupdateOpts.RegistryAuthFrom = types.RegistryAuthFromSpec\n\n\t_, updateErr := s.c.ServiceUpdate(\n\t\tctx, service.ID, service.Version, service.Spec, updateOpts)\n\treturn updateErr\n}", "func SetDDLReorgWorkerCounter(cnt int32) {\n\tif cnt > maxDDLReorgWorkerCount {\n\t\tcnt = maxDDLReorgWorkerCount\n\t}\n\tatomic.StoreInt32(&ddlReorgWorkerCounter, cnt)\n}", "func (m *DeviceCompliancePolicySettingStateSummary) SetFailedDeviceCount(value *int32)() {\n err := m.GetBackingStore().Set(\"failedDeviceCount\", value)\n if err != nil {\n panic(err)\n }\n}", "func (h *InputHost) SetHostConnLimitPerSecond(connLimit int32) {\n\th.logger.WithField(`val`, connLimit).Info(`SetHostConnLimitPerSecond`)\n\tatomic.StoreInt32(&h.hostConnLimitPerSecond, connLimit)\n\th.SetTokenBucketValue(int32(connLimit))\n}", "func (pl *Peerlist) IncreaseRetryTimes(addr string) {\n\tpl.lock.Lock()\n\tdefer pl.lock.Unlock()\n\n\tif _, ok := pl.peers[addr]; ok {\n\t\tpl.peers[addr].IncreaseRetryTimes()\n\t\tpl.peers[addr].Seen()\n\t}\n}", "func TestPostRetries(t *testing.T) {\n\tconst SleepToCauseTimeout = 0\n\n\tif testing.Short() {\n\t\tt.Skip(\"skipping retry tests with backoff in short mode.\")\n\t}\n\n\tbackoff := 5 * time.Millisecond\n\tclientTimeout := backoff\n\n\tretryTests := []int{\n\t\tSleepToCauseTimeout,\n\t\thttp.StatusRequestTimeout,\n\t\t500,\n\t\t599,\n\t\tSleepToCauseTimeout,\n\t\t200, // must end with 200\n\t}\n\n\texpectedTries := len(retryTests)\n\n\ttries := 0\n\tts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif tries++; tries <= expectedTries {\n\t\t\tstatus := retryTests[tries-1]\n\t\t\tif status == SleepToCauseTimeout {\n\t\t\t\ttime.Sleep(2 * clientTimeout)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tw.WriteHeader(status)\n\t\t} else {\n\t\t\tt.Fatalf(\"expected client to try %d times, received %d tries\", expectedTries, tries-1)\n\t\t}\n\t}))\n\n\tdefer ts.Close()\n\n\tclient := ts.Client()\n\tclient.Timeout = backoff\n\n\tc := &APIClient{\n\t\tBaseURL: ts.URL,\n\t\tClient: client,\n\t\tRetries: expectedTries - 1,\n\t\tBackoff: backoff,\n\t}\n\n\terr := c.PingSuccess(TestUUID, nil)\n\tif err != nil {\n\t\tt.Fatalf(\"expected successful Ping, got error: %+v\", err)\n\t}\n\n\tif tries < expectedTries {\n\t\tt.Fatalf(\"expected client to try %d times, received %d tries\", expectedTries, tries)\n\t}\n}", "func WithRetryCount(c int) Option {\n\treturn func(b *backoff) {\n\t\tif c <= 0 {\n\t\t\treturn\n\t\t}\n\t\tb.maxRetryCount = c\n\t}\n}", "func (options *EditLoadBalancerMonitorOptions) SetRetries(retries int64) *EditLoadBalancerMonitorOptions {\n\toptions.Retries = core.Int64Ptr(retries)\n\treturn options\n}", "func setReplicas(pendingDeployment, previousDeployment *appsv1.Deployment) {\n\tif pendingDeployment.Spec.Replicas == nil || *previousDeployment.Spec.Replicas > *pendingDeployment.Spec.Replicas {\n\t\tlog.Infof(\"Setting number of replicas to pre-existing value of %d\", *previousDeployment.Spec.Replicas)\n\t\tpendingDeployment.Spec.Replicas = previousDeployment.Spec.Replicas\n\t}\n}", "func (h *Host) SetAdress(a string) {\n}", "func SetRouterHealth(err error) { set(SysRouter, err) }", "func (d *DBGenerator) setShardCount(ctx context.Context) error {\n\tq := Queries[ShardCountKey]\n\trows, err := d.Conn.QueryContext(ctx, q)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed running '%s': %w\", q, err)\n\t}\n\tdefer rows.Close()\n\n\tif rows.Err() != nil {\n\t\treturn fmt.Errorf(\"failed running '%s': %w\", q, rows.Err())\n\t}\n\tif !rows.Next() {\n\t\treturn errors.New(\"did find any rows in SHARDS\")\n\t}\n\tif err := rows.Scan(&d.Objs.Vdb.Spec.ShardCount); err != nil {\n\t\treturn fmt.Errorf(\"failed running '%s': %w\", q, err)\n\t}\n\n\treturn nil\n}", "func TestActiveReplicatorReconnectOnStart(t *testing.T) {\n\tbase.RequireNumTestBuckets(t, 2)\n\n\tif testing.Short() {\n\t\tt.Skipf(\"Test skipped in short mode\")\n\t}\n\n\ttests := []struct {\n\t\tname string\n\t\tusernameOverride string\n\t\tremoteURLHostOverride string\n\t\texpectedErrorContains string\n\t\texpectedErrorIsConnectionRefused bool\n\t}{\n\t\t{\n\t\t\tname: \"wrong user\",\n\t\t\tusernameOverride: \"bob\",\n\t\t\texpectedErrorContains: \"unexpected status code 401 from target database\",\n\t\t},\n\t\t{\n\t\t\tname: \"invalid port\", // fails faster than unroutable address (connection refused vs. connect timeout)\n\t\t\tremoteURLHostOverride: \"127.0.0.1:1234\",\n\t\t\texpectedErrorIsConnectionRefused: true,\n\t\t},\n\t}\n\tfor _, test := range tests {\n\t\tt.Run(test.name, func(t *testing.T) {\n\n\t\t\tvar abortTimeout = time.Millisecond * 500\n\t\t\tif runtime.GOOS == \"windows\" {\n\t\t\t\t// A longer timeout is required on Windows as connection refused errors take approx 2 seconds vs. instantaneous on Linux.\n\t\t\t\tabortTimeout = time.Second * 5\n\t\t\t}\n\t\t\t// test cases with and without a timeout. Ensure replicator retry loop is stopped in both cases.\n\t\t\ttimeoutVals := []time.Duration{\n\t\t\t\t0,\n\t\t\t\tabortTimeout,\n\t\t\t}\n\n\t\t\tfor _, timeoutVal := range timeoutVals {\n\t\t\t\tt.Run(test.name+\" with timeout \"+timeoutVal.String(), func(t *testing.T) {\n\n\t\t\t\t\tbase.SetUpTestLogging(t, logger.LevelDebug, logger.KeyAll)\n\n\t\t\t\t\t// Passive\n\t\t\t\t\ttb2 := base.GetTestBucket(t)\n\t\t\t\t\trt2 := NewRestTester(t, &RestTesterConfig{\n\t\t\t\t\t\tTestBucket: tb2,\n\t\t\t\t\t\tDatabaseConfig: &DatabaseConfig{DbConfig: DbConfig{\n\t\t\t\t\t\t\tUsers: map[string]*db.PrincipalConfig{\n\t\t\t\t\t\t\t\t\"alice\": {\n\t\t\t\t\t\t\t\t\tPassword: base.StringPtr(\"pass\"),\n\t\t\t\t\t\t\t\t\tExplicitChannels: utils.SetOf(\"alice\"),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}},\n\t\t\t\t\t})\n\t\t\t\t\tdefer rt2.Close()\n\n\t\t\t\t\t// Make rt2 listen on an actual HTTP port, so it can receive the blipsync request from rt1\n\t\t\t\t\tsrv := httptest.NewServer(rt2.TestPublicHandler())\n\t\t\t\t\tdefer srv.Close()\n\n\t\t\t\t\t// Build remoteDBURL with basic auth creds\n\t\t\t\t\tremoteDBURL, err := url.Parse(srv.URL + \"/db\")\n\t\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\t\t// Add basic auth creds to target db URL\n\t\t\t\t\tusername := \"alice\"\n\t\t\t\t\tif test.usernameOverride != \"\" {\n\t\t\t\t\t\tusername = test.usernameOverride\n\t\t\t\t\t}\n\t\t\t\t\tremoteDBURL.User = url.UserPassword(username, \"pass\")\n\n\t\t\t\t\tif test.remoteURLHostOverride != \"\" {\n\t\t\t\t\t\tremoteDBURL.Host = test.remoteURLHostOverride\n\t\t\t\t\t}\n\n\t\t\t\t\t// Active\n\t\t\t\t\ttb1 := base.GetTestBucket(t)\n\t\t\t\t\trt1 := NewRestTester(t, &RestTesterConfig{\n\t\t\t\t\t\tTestBucket: tb1,\n\t\t\t\t\t})\n\t\t\t\t\tdefer rt1.Close()\n\n\t\t\t\t\tid, err := base.GenerateRandomID()\n\t\t\t\t\trequire.NoError(t, err)\n\t\t\t\t\tarConfig := db.ActiveReplicatorConfig{\n\t\t\t\t\t\tID: id,\n\t\t\t\t\t\tDirection: db.ActiveReplicatorTypePush,\n\t\t\t\t\t\tRemoteDBURL: remoteDBURL,\n\t\t\t\t\t\tActiveDB: &db.Database{\n\t\t\t\t\t\t\tDatabaseContext: rt1.GetDatabase(),\n\t\t\t\t\t\t},\n\t\t\t\t\t\tContinuous: true,\n\t\t\t\t\t\t// aggressive reconnect intervals for testing purposes\n\t\t\t\t\t\tInitialReconnectInterval: time.Millisecond,\n\t\t\t\t\t\tMaxReconnectInterval: time.Millisecond * 50,\n\t\t\t\t\t\tTotalReconnectTimeout: timeoutVal,\n\t\t\t\t\t\tReplicationStatsMap: base.SyncGatewayStats.NewDBStats(t.Name(), false, false, false).DBReplicatorStats(t.Name()),\n\t\t\t\t\t}\n\n\t\t\t\t\t// Create the first active replicator to pull from seq:0\n\t\t\t\t\tar := db.NewActiveReplicator(&arConfig)\n\t\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\t\tassert.Equal(t, int64(0), ar.Push.GetStats().NumConnectAttempts.Value())\n\n\t\t\t\t\terr = ar.Start()\n\t\t\t\t\tassert.Error(t, err, \"expecting ar.Start() to return error, but it didn't\")\n\n\t\t\t\t\tif test.expectedErrorIsConnectionRefused {\n\t\t\t\t\t\tassert.True(t, base.IsConnectionRefusedError(err))\n\t\t\t\t\t}\n\n\t\t\t\t\tif test.expectedErrorContains != \"\" {\n\t\t\t\t\t\tassert.True(t, strings.Contains(err.Error(), test.expectedErrorContains))\n\t\t\t\t\t}\n\n\t\t\t\t\t// wait for an arbitrary number of reconnect attempts\n\t\t\t\t\twaitAndRequireCondition(t, func() bool {\n\t\t\t\t\t\treturn ar.Push.GetStats().NumConnectAttempts.Value() > 2\n\t\t\t\t\t}, \"Expecting NumConnectAttempts > 2\")\n\n\t\t\t\t\tif timeoutVal > 0 {\n\t\t\t\t\t\ttime.Sleep(timeoutVal + time.Millisecond*250)\n\t\t\t\t\t\t// wait for the retry loop to hit the TotalReconnectTimeout and give up retrying\n\t\t\t\t\t\twaitAndRequireCondition(t, func() bool {\n\t\t\t\t\t\t\treturn ar.Push.GetStats().NumReconnectsAborted.Value() > 0\n\t\t\t\t\t\t}, \"Expecting NumReconnectsAborted > 0\")\n\t\t\t\t\t}\n\n\t\t\t\t\tassert.NoError(t, ar.Stop())\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\t}\n}", "func (m *DeviceHealthAttestationState) SetResetCount(value *int64)() {\n err := m.GetBackingStore().Set(\"resetCount\", value)\n if err != nil {\n panic(err)\n }\n}", "func (h *RequestHeader) SetHost(host string) {\n\th.host = append(h.host[:0], host...)\n}", "func (c *Client) SetServerHost(host string, port int) {\n\tc.serverAddr = net.JoinHostPort(host, strconv.Itoa(port))\n}", "func TestRetryAfterCount(t *testing.T) {\n\tconst attempts = 2\n\n\tvar (\n\t\treq, _ = http.NewRequest(\"GET\", \"http://example/test\", nil)\n\t\tnext = &testRoundTrip{err: fmt.Errorf(\"next\"), resp: nil}\n\t\ttrans = Transport{\n\t\t\tRetry: All(Errors(), Max(attempts)),\n\t\t\tNext: next,\n\t\t}\n\t)\n\n\tresp, err := trans.RoundTrip(req)\n\n\tif have, got := next.err.Error(), err.Error(); have == got {\n\t\tt.Fatalf(\"expected to override error from next\")\n\t}\n\n\tif want, got := attempts, next.count; want != got {\n\t\tt.Fatalf(\"expected to make %d attempts, got %d\", want, got)\n\t}\n\n\tif resp != nil {\n\t\tt.Fatalf(\"expected response to be nil since error is not nil\")\n\t}\n}", "func (b *Backend) SetStateRetry(signature *tasks.Signature) error {\n\tstate := tasks.NewRetryTaskState(signature)\n\treturn b.updateState(state)\n}", "func (b *MaxRetryBackOff) Reset() { b.tries = 0 }", "func TestConnectRetry(t *testing.T) {\n\ttype args struct {\n\t\tnetwork string\n\t\taddress string\n\t}\n\ttests := []struct {\n\t\tname string\n\t\targs args\n\t}{\n\t\t{\n\t\t\tname: \"case 1\",\n\t\t\targs: args{\n\t\t\t\tnetwork: \"tcp\",\n\t\t\t\taddress: \"192.168.0.1:8078\",\n\t\t\t},\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tconn, err := exponentialbackoff.ConnectRetry(tt.args.network, tt.args.address)\n\t\t\tfmt.Println(\"conn:\", conn, err)\n\t\t})\n\t}\n}", "func SetReplicas(ns, scanid string, repl int) {\n\tif scanid == \"\" {\n\t\t// don't store metrics without scanner id's configured.\n\t\treturn\n\t}\n\treplicas.With(prometheus.Labels{\n\t\t\"target\": ns,\n\t\t\"scanner\": scanid}).Set(float64(repl))\n}", "func (c *Client) SetCheckRetry(checkRetry retryablehttp.CheckRetry) {\n\tc.modifyLock.RLock()\n\tdefer c.modifyLock.RUnlock()\n\tc.config.modifyLock.Lock()\n\tdefer c.config.modifyLock.Unlock()\n\n\tc.config.CheckRetry = checkRetry\n}", "func (c *ReplicaClient) SetTimeout(timeout time.Duration) {\n\tif c.httpClient != nil {\n\t\tc.httpClient.Timeout = timeout\n\t}\n}", "func (_this *URL) SetHost(value string) {\n\tinput := value\n\t_this.Value_JS.Set(\"host\", input)\n}", "func (c *Client) SetTps(tps int) {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\tc.tps = tps\n}", "func (c *Conn) SetHostName(property string, value string) error {\n\terr := c.object.Call(dbusInterface+\".\"+property, 0, value, false).Err\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to set hostname: %v\", err)\n\t}\n\n\treturn nil\n}", "func (m *MockAPI) ResetHost(arg0 context.Context, arg1 *models.Host, arg2 string, arg3 *gorm.DB) *common.ApiErrorResponse {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ResetHost\", arg0, arg1, arg2, arg3)\n\tret0, _ := ret[0].(*common.ApiErrorResponse)\n\treturn ret0\n}", "func Retries(i int) Option {\n\treturn func(o *Options) {\n\t\to.CallOptions.Retries = i\n\t}\n}", "func (set *HostSet) AddHost(c renter.Contract) {\n\tlh := new(lockedHost)\n\t// lazy connection function\n\tvar lastSeen time.Time\n\tlh.reconnect = func() error {\n\t\tif lh.s != nil && !lh.s.IsClosed() {\n\t\t\t// if it hasn't been long since the last reconnect, assume the\n\t\t\t// connection is still open\n\t\t\tif time.Since(lastSeen) < 2*time.Minute {\n\t\t\t\tlastSeen = time.Now()\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\t// otherwise, the connection *might* still be open; test by sending\n\t\t\t// a \"ping\" RPC\n\t\t\t//\n\t\t\t// NOTE: this is somewhat inefficient; it means we might incur an\n\t\t\t// extra roundtrip when we don't need to. Better would be for the\n\t\t\t// caller to handle the reconnection logic after calling whatever\n\t\t\t// RPC it wants to call; that way, we only do extra work if the host\n\t\t\t// has actually disconnected. But that feels too burdensome.\n\t\t\tif _, err := lh.s.Settings(); err == nil {\n\t\t\t\tlastSeen = time.Now()\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\t// connection timed out, or some other error occurred; close our\n\t\t\t// end (just in case) and fallthrough to the reconnection logic\n\t\t\tlh.s.Close()\n\t\t}\n\t\thostIP, err := set.hkr.ResolveHostKey(c.HostKey)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"could not resolve host key: %w\", err)\n\t\t}\n\t\t// create and lock the session manually so that we can use our custom\n\t\t// lock timeout\n\t\tlh.s, err = proto.NewUnlockedSession(hostIP, c.HostKey, set.currentHeight)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := lh.s.Lock(c.ID, c.RenterKey, set.lockTimeout); err != nil {\n\t\t\tlh.s.Close()\n\t\t\treturn err\n\t\t} else if _, err := lh.s.Settings(); err != nil {\n\t\t\tlh.s.Close()\n\t\t\treturn err\n\t\t}\n\t\tset.onConnect(lh.s)\n\t\tlastSeen = time.Now()\n\t\treturn nil\n\t}\n\tset.sessions[c.HostKey] = lh\n}", "func (r *resolver) setLast(host string) {\n\tr.mu.Lock()\n\tr.last = host\n\tr.mu.Unlock()\n}", "func (peer *Peer) ResetRetryTimes() {\n\tpeer.RetryTimes = 0\n\tlogger.Debug(\"Reset retry times of %v\", peer.Addr)\n}", "func (d *Dispatcher) SetWorkerCount(n int) {\n\tif n > 0 {\n\t\td.workerCount = n\n\n\t\tfor d.workerCount != len(d.workers) {\n\t\t\tif d.workerCount > len(d.workers) {\n\t\t\t\td.addWorker()\n\t\t\t} else {\n\t\t\t\td.removeWorker()\n\t\t\t}\n\t\t}\n\t}\n}", "func (e *Exec) IncrRetriesCount() {\n\te.RetriesCount++\n}", "func (e *Executor) SetHost(v string) {\n\t// return if Executor type is nil\n\tif e == nil {\n\t\treturn\n\t}\n\n\te.Host = &v\n}", "func (c *GormClient) IncRetryCount(j *jobinator.Job) error {\n\terr := c.db.Model(j).Update(\"retry_count\", gorm.Expr(\"retry_count + ?\", 1)).Error\n\tj.RetryCount++\n\treturn err\n}", "func SetMaxRetries(maxRetries int8) Option {\n\treturn func(s *Scraper) Option {\n\t\tprev := s.maxRetries\n\t\ts.maxRetries = maxRetries\n\t\treturn SetMaxRetries(prev)\n\t}\n}", "func (b *Backend) SetStateRetry(signature *tasks.Signature) error {\n\tupdate := bson.M{\"state\": tasks.StateRetry}\n\treturn b.updateState(signature, update)\n}", "func TestReAttachTCP(t *testing.T) {\n\ttestAttachNTimes(t, 3)\n}", "func SetWorkerCount(count uint64) {\n\tworkerCount = count\n}", "func (x *snmpHandler) SetMaxRepetitions(maxRepetitions uint32) {\n\tx.GoSNMP.MaxRepetitions = (maxRepetitions & 0x7FFFFFFF)\n}", "func (c *Client) SetMaxRetryWait(retryWait time.Duration) {\n\tc.modifyLock.RLock()\n\tdefer c.modifyLock.RUnlock()\n\tc.config.modifyLock.Lock()\n\tdefer c.config.modifyLock.Unlock()\n\n\tc.config.MaxRetryWait = retryWait\n}", "func Retries(retries int) func(*Config) error {\n\treturn func(c *Config) error {\n\t\tc.Retries = retries\n\t\treturn nil\n\t}\n}", "func retry(g getter, retries int, delay time.Duration) getter {\n\treturn func(client http.Client, url string) (*http.Response, error) {\n\t\tfor r := 0; ; r++ {\n\t\t\tresponse, err := g(client, url)\n\t\t\tif err == nil || r >= retries {\n\t\t\t\treturn response, err\n\t\t\t}\n\t\t\ttime.Sleep(delay)\n\t\t}\n\t}\n}", "func dialHost(addr string, port, timeout uint) error {\n\twaitTime := 5 * time.Second\n\tattempts := timeout / uint(waitTime.Seconds())\n\tfullAddr := fmt.Sprintf(\"%s:%d\", addr, port)\n\n\treturn retry.Retry(func(attempt uint) error {\n\t\tconn, err := net.Dial(\"tcp\", fullAddr)\n\t\tif err != nil {\n\t\t\tlog.Warnf(\"attempt [%d] to verify host [%s] is listening failed\", attempt, fullAddr)\n\t\t\treturn err\n\t\t}\n\n\t\treturn conn.Close()\n\t}, strategy.Wait(waitTime), strategy.Limit(attempts))\n}", "func setReuseAddress(network, addr string, conn syscall.RawConn) error {\n\treturn fmt.Errorf(\"address reuse is not supported on Windows\")\n}", "func (t *ThrottledReadCloser) SetLimit(r rate.Limit, b int) error {\n\treturn t.pool.SetLimitByID(r, b, t.id)\n}", "func SetRetrySeconds(retrySeconds int8) Option {\n\treturn func(s *Scraper) Option {\n\t\tprev := s.retrySeconds\n\t\tif retrySeconds > 0 {\n\t\t\ts.retrySeconds = retrySeconds\n\t\t}\n\t\treturn SetRetrySeconds(prev)\n\t}\n}", "func setReuseAddress(network, addr string, conn syscall.RawConn) error {\n\treturn errors.New(\"address reuse is not supported on Windows\")\n}", "func TestRetryingClient(t *testing.T) {\n\trequestCount := 0\n\n\t// Our bad server only responds properly on the third try\n\tbadServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\trequestCount++\n\t\tif requestCount == 1 {\n\n\t\t\t// Simulate a dropped connection.\n\t\t\t// Abort such that the client sees an interrupted response but the server doesn't log an error\n\t\t\t// https://golang.org/pkg/net/http/#Handler\n\t\t\tpanic(http.ErrAbortHandler)\n\t\t}\n\t\tif requestCount == 2 {\n\n\t\t\t// Any non-200 response should be considered failure, including e.g. 202\n\t\t\tw.WriteHeader(http.StatusAccepted)\n\t\t\treturn\n\t\t}\n\t\tw.WriteHeader(http.StatusOK)\n\t}))\n\tdefer badServer.Close()\n\n\tclient := &RetryingClient{\n\t\tclient: &http.Client{\n\t\t\tTimeout: time.Millisecond * 500,\n\t\t},\n\t\tMaxRetries: 2,\n\t\tBackoff: 0,\n\t}\n\n\tbadServerReq, err := http.NewRequest(\"GET\", badServer.URL, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tres, err := client.Do(badServerReq)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif res.StatusCode != http.StatusOK {\n\t\tt.Fatal(\"client did not retry the configured number of times\")\n\t}\n\tif requestCount != 3 {\n\t\tt.Fatalf(\"unexpected number of requests made, want 3 got %d\", requestCount)\n\t}\n}", "func SetNumThreads(numThreads int) {\n\tpoolSize = numThreads\n}", "func (b *Binary) SetHost(host string) {\n\tif host == \"\" {\n\t\treturn\n\t}\n\tu, err := url.Parse(host)\n\tif err != nil {\n\t\treturn\n\t}\n\tu.Path = b.url.Path\n\tb.url = u\n}", "func (m *DeviceManagementConfigurationPolicy) SetSettingCount(value *int32)() {\n err := m.GetBackingStore().Set(\"settingCount\", value)\n if err != nil {\n panic(err)\n }\n}", "func WithRetries(i int) CallOption {\n\treturn func(o *CallOptions) {\n\t\to.Retries = i\n\t}\n}", "func (h *StandHystrix) SetMaxFailedNumber(number int64) {\n\th.maxFailedNumber = number\n}", "func (sshConfig *SSHConfig) SetMaxRetries(retries int) (result *SSHConfig) {\n\tsshConfig.maxRetries = retries\n\tresult = sshConfig\n\treturn\n}", "func (m *MailTips) SetExternalMemberCount(value *int32)() {\n err := m.GetBackingStore().Set(\"externalMemberCount\", value)\n if err != nil {\n panic(err)\n }\n}", "func (c *Config) SetInt(k string, i int) {\n\tc.SetString(k, strconv.Itoa(i))\n}", "func SetTTLNotInt(t *testing.T, f func() (mangos.Socket, error)) {\n\ts, err := f()\n\tif err != nil {\n\t\tt.Errorf(\"Failed to make socket: %v\", err)\n\t\treturn\n\t}\n\tdefer s.Close()\n\terr = s.SetOption(mangos.OptionTTL, \"garbage\")\n\tswitch err {\n\tcase mangos.ErrBadValue: // expected result\n\tcase nil:\n\t\tt.Errorf(\"Negative test fail, permitted non-int value\")\n\tdefault:\n\t\tt.Errorf(\"Negative test fail (garbage), wrong error %v\", err)\n\t}\n}", "func SetTTL(t *testing.T, f func() (mangos.Socket, error)) {\n\ts, err := f()\n\tif err != nil {\n\t\tt.Errorf(\"Failed to make socket: %v\", err)\n\t\treturn\n\t}\n\tdefer s.Close()\n\n\terr = s.SetOption(mangos.OptionTTL, 2)\n\tif err != nil {\n\t\tt.Errorf(\"Failed SetOption: %v\", err)\n\t\treturn\n\t}\n\n\tv, err := s.GetOption(mangos.OptionTTL)\n\tif err != nil {\n\t\tt.Errorf(\"Failed GetOption: %v\", err)\n\t\treturn\n\t}\n\tif val, ok := v.(int); !ok {\n\t\tt.Errorf(\"Returned value not type int\")\n\t} else if val != 2 {\n\t\tt.Errorf(\"Returned value %d not %d\", val, 2)\n\t}\n}", "func (client *BaseClient) Retry() int {\n\treturn client.retry\n}", "func (u SysDBUpdater) SetHost(host string) SysDBUpdater {\n\tu.fields[string(SysDBDBSchema.Host)] = host\n\treturn u\n}", "func (m *SequentialActivationRenewalsAlertIncident) SetActivationCount(value *int32)() {\n err := m.GetBackingStore().Set(\"activationCount\", value)\n if err != nil {\n panic(err)\n }\n}", "func (s *SaveVideo) SetRetries(retryDelay time.Duration, maxRetries int) {\n\ts.retryDelay = retryDelay\n\ts.maxRetries = maxRetries\n}", "func (m *RecurrenceRange) SetNumberOfOccurrences(value *int32)() {\n err := m.GetBackingStore().Set(\"numberOfOccurrences\", value)\n if err != nil {\n panic(err)\n }\n}", "func (h *ProxyHealth) SetHealthCheck(check func(addr *url.URL) bool, period time.Duration) {\n\th.mu.Lock()\n\tdefer h.mu.Unlock()\n\n\th.stop()\n\th.check = check\n\th.period = period\n\th.cancel = make(chan struct{})\n\th.isAvailable = h.check(h.origin)\n\th.run()\n}", "func SetDNSHealth(err error) { set(SysDNS, err) }", "func (n *Sub) retry(uri *model.NotifyURL, msg string, source int) (err error) {\n\tlog.Info(\"Notify.retry do callback url(%v), msg(%s), source(%d)\", uri, msg, source)\n\tfor i := 0; i < _retry; i++ {\n\t\terr = n.clients.Post(context.TODO(), uri, msg)\n\t\tif err != nil {\n\t\t\ttime.Sleep(n.backoff.Backoff(i))\n\t\t\tcontinue\n\t\t} else {\n\t\t\tlog.Info(\"Notify.retry callback success group(%s), topic(%s), retry(%d), msg(%s), source(%d)\",\n\t\t\t\tn.w.Group, n.w.Topic, i, msg, source)\n\t\t\treturn\n\t\t}\n\t}\n\tif err != nil {\n\t\tlog.Error(\"Notify.retry callback error(%v), uri(%s), msg(%s), source(%d)\",\n\t\t\terr, uri, msg, source)\n\t}\n\treturn\n}", "func WithHost(host string) Option {\n\treturn func(c *Client) error {\n\t\tc.transport.URL = transport.DefaultURL\n\t\tu, err := url.Parse(host)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif u.Scheme != \"\" {\n\t\t\tc.transport.URL.Scheme = u.Scheme\n\t\t}\n\t\tc.transport.URL.Host = u.Host\n\t\tif !strings.Contains(c.transport.URL.Host, \":\") {\n\t\t\tc.transport.URL.Host += \":\" + string(transport.DefaultPort)\n\t\t}\n\t\treturn nil\n\t}\n}", "func (context *context) SetThreads(v uint) {\n\tcontext.params.SetThreads(int(v))\n}", "func (m *AospDeviceOwnerDeviceConfiguration) SetPasswordSignInFailureCountBeforeFactoryReset(value *int32)() {\n err := m.GetBackingStore().Set(\"passwordSignInFailureCountBeforeFactoryReset\", value)\n if err != nil {\n panic(err)\n }\n}" ]
[ "0.56200206", "0.55528766", "0.5400684", "0.517109", "0.5157865", "0.51496375", "0.5104901", "0.5081008", "0.50627774", "0.5058947", "0.50475997", "0.5022949", "0.49496228", "0.49336797", "0.4874195", "0.48509568", "0.4837097", "0.4823085", "0.48207945", "0.48175836", "0.47882503", "0.47857398", "0.47821838", "0.47735375", "0.47712156", "0.4764436", "0.47304866", "0.46878898", "0.468765", "0.46809536", "0.46745694", "0.46630123", "0.46590486", "0.46548706", "0.46516833", "0.46505916", "0.46477947", "0.46298483", "0.4624275", "0.46228653", "0.45975223", "0.45909888", "0.45861134", "0.4577945", "0.45506516", "0.45487458", "0.45470008", "0.4535186", "0.45310065", "0.45267516", "0.45167542", "0.4513882", "0.45074242", "0.4502792", "0.45013028", "0.4483337", "0.44828165", "0.44788232", "0.44781297", "0.44676703", "0.44625604", "0.4459315", "0.4456462", "0.44547427", "0.4449922", "0.44487223", "0.44474408", "0.4436917", "0.44211215", "0.4420598", "0.4412023", "0.44044116", "0.4403832", "0.43877888", "0.43787557", "0.43754163", "0.43746713", "0.43685788", "0.4364515", "0.43607217", "0.4359698", "0.43532518", "0.43467677", "0.4336547", "0.43262988", "0.43246046", "0.43197736", "0.4316225", "0.43161023", "0.42974982", "0.4290425", "0.42774802", "0.42740738", "0.42740092", "0.42720178", "0.42715767", "0.42705432", "0.4265677", "0.4263397", "0.42632538" ]
0.52591467
3
SetRetryWaitTime accepts an int and sets retry wait time in seconds.
func (co *serverConfig) SetRetryWaitTime(t int) ServerConfigBuilder { co.RetryWaitTime = time.Duration(t) * time.Second return co }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (c *Client) SetMaxRetryWait(retryWait time.Duration) {\n\tc.modifyLock.RLock()\n\tdefer c.modifyLock.RUnlock()\n\tc.config.modifyLock.Lock()\n\tdefer c.config.modifyLock.Unlock()\n\n\tc.config.MaxRetryWait = retryWait\n}", "func SetRetrySeconds(retrySeconds int8) Option {\n\treturn func(s *Scraper) Option {\n\t\tprev := s.retrySeconds\n\t\tif retrySeconds > 0 {\n\t\t\ts.retrySeconds = retrySeconds\n\t\t}\n\t\treturn SetRetrySeconds(prev)\n\t}\n}", "func (co *serverConfig) SetRetry(r int) ServerConfigBuilder {\n\tco.Retry = r\n\treturn co\n}", "func (client *BaseClient) SetRetry(value int) {\n\tclient.retry = value\n}", "func (c *Client) SetMinRetryWait(retryWait time.Duration) {\n\tc.modifyLock.RLock()\n\tdefer c.modifyLock.RUnlock()\n\tc.config.modifyLock.Lock()\n\tdefer c.config.modifyLock.Unlock()\n\n\tc.config.MinRetryWait = retryWait\n}", "func (a *Animator) SetRetryInterval(retryInterval time.Duration) {\n\ta.RetryInterval = RetryIntervalDefault\n\tif retryInterval > 0 {\n\t\ta.RetryInterval = retryInterval\n\t}\n}", "func (s *SaveVideo) SetRetries(retryDelay time.Duration, maxRetries int) {\n\ts.retryDelay = retryDelay\n\ts.maxRetries = maxRetries\n}", "func (sshConfig *SSHConfig) SetSleepBtwRetries(sleepMS int64) (result *SSHConfig) {\n\tsshConfig.sleepBtwRetries = sleepMS\n\tresult = sshConfig\n\treturn\n}", "func (r *Retry) MaxWaitTime(maxWaitTime time.Duration) dataflow.Retry {\n\tr.maxWaitTime = maxWaitTime\n\treturn r\n}", "func SetRetryParameters(maxAttempts int, maxGap int) {\n\tif maxAttempts > 0 {\n\t\tmaxRetryAttempt = maxAttempts\n\t}\n\n\tif maxGap > 0 {\n\t\tmaxRetryGap = maxGap\n\t}\n}", "func (o *QueryOptions) SetRetry(retry bool) *QueryOptions {\n\tif o == nil {\n\t\treturn nil\n\t}\n\to.Retry = retry\n\treturn o\n}", "func (r *Retry) WaitTime(waitTime time.Duration) dataflow.Retry {\n\tr.waitTime = waitTime\n\treturn r\n}", "func (options *CreateLoadBalancerMonitorOptions) SetRetries(retries int64) *CreateLoadBalancerMonitorOptions {\n\toptions.Retries = core.Int64Ptr(retries)\n\treturn options\n}", "func RetryTimes(n uint) Option {\n\treturn func(rc *RetryConfig) {\n\t\trc.retryTimes = n\n\t}\n}", "func (options *EditLoadBalancerMonitorOptions) SetRetries(retries int64) *EditLoadBalancerMonitorOptions {\n\toptions.Retries = core.Int64Ptr(retries)\n\treturn options\n}", "func (object *Config) SetDBRetryTimes(times int) *Config {\n\tobject.dbRetryTimes = times\n\treturn object\n}", "func (c *Client) SetMaxRetries(retries uint8, timeBetweenRetries int64) {\n\tc.MaxRetriesOnError = retries\n\tif timeBetweenRetries == 0 {\n\t\tc.TimeBetweenRetries = 1\n\t} else {\n\t\tc.TimeBetweenRetries = timeBetweenRetries\n\t}\n}", "func (c *Client) SetMaxRetries(retries int) {\n\tc.modifyLock.RLock()\n\tdefer c.modifyLock.RUnlock()\n\tc.config.modifyLock.Lock()\n\tdefer c.config.modifyLock.Unlock()\n\n\tc.config.MaxRetries = retries\n}", "func (c *Authorized) applyRetryWait(retryCount int) {\n\ttime.Sleep(time.Duration(c.retrySettings.MinMsBetweenRetries) * time.Millisecond)\n}", "func (g *Gorc) SetWaitMillis(w int) {\n\tg.Lock()\n\tg.waitMillis = time.Duration(w) * time.Millisecond\n\tg.Unlock()\n}", "func (c *AuditClient) SetBacklogWaitTime(waitTime int32, wm WaitMode) error {\n\tstatus := AuditStatus{\n\t\tMask: AuditStatusBacklogWaitTime,\n\t\tBacklogWaitTime: uint32(waitTime),\n\t}\n\treturn c.set(status, wm)\n}", "func (o *StackpathRpcRetryInfoAllOf) SetRetryDelay(v string) {\n\to.RetryDelay = &v\n}", "func SetMaxRetries(maxRetries int8) Option {\n\treturn func(s *Scraper) Option {\n\t\tprev := s.maxRetries\n\t\ts.maxRetries = maxRetries\n\t\treturn SetMaxRetries(prev)\n\t}\n}", "func (g *Generator) SetWaitSeconds(secs float64) {\n\tg.waitSeconds = secs\n}", "func RetryWait(interval time.Duration) OptionFunc {\n\treturn func(c *Component) error {\n\t\tif interval <= 0 {\n\t\t\treturn errors.New(\"retry wait time should be a positive number\")\n\t\t}\n\t\tc.retryWait = interval\n\t\treturn nil\n\t}\n}", "func (peer *Peer) ResetRetryTimes() {\n\tpeer.RetryTimes = 0\n\tlogger.Debug(\"Reset retry times of %v\", peer.Addr)\n}", "func MaxWaitTime(maxWaitTime time.Duration) DefaultBackoffPolicyOption {\n\treturn func(dbp *DefaultBackoffPolicy) {\n\t\tdbp.MaxWaitTime = maxWaitTime\n\t}\n}", "func (r *MethodCallRetrier) sleepAndIncrementRetries() {\n\ttime.Sleep(time.Duration(r.waitTime) * time.Second)\n\n\tr.waitTime *= r.exponent\n\n\tr.currentRetries++\n}", "func (s *ReceiveMessageInput) SetWaitTimeSeconds(v int64) *ReceiveMessageInput {\n\ts.WaitTimeSeconds = &v\n\treturn s\n}", "func (o *Job) SetRetries(v int32) {\n\to.Retries = &v\n}", "func MinWaitTime(minWaitTime time.Duration) DefaultBackoffPolicyOption {\n\treturn func(dbp *DefaultBackoffPolicy) {\n\t\tdbp.MinWaitTime = minWaitTime\n\t}\n}", "func (b *Backend) SetStateRetry(signature *tasks.Signature) error {\n\tupdate := bson.M{\"state\": tasks.StateRetry}\n\treturn b.updateState(signature, update)\n}", "func (cd *ConnectionDetails) RetrySleep() time.Duration {\n\td, err := time.ParseDuration(defaults.String(cd.Options[\"retry_sleep\"], \"1ms\"))\n\tif err != nil {\n\t\treturn 1 * time.Millisecond\n\t}\n\treturn d\n}", "func (c *Client) SetCheckRetry(checkRetry retryablehttp.CheckRetry) {\n\tc.modifyLock.RLock()\n\tdefer c.modifyLock.RUnlock()\n\tc.config.modifyLock.Lock()\n\tdefer c.config.modifyLock.Unlock()\n\n\tc.config.CheckRetry = checkRetry\n}", "func (s *Sample) SetTimeSleep(d int) *Sample {\n\ts.Ticker = time.NewTicker(time.Duration(d) * time.Second)\n\ts.TimeSleep = d\n\n\treturn s\n}", "func (c *APIClient) updateRetry(retry string) {\r\n\tv, err := strconv.Atoi(retry)\r\n\tif err != nil {\r\n\t\t// retry value incorrect\r\n\t\tlog.Printf(\"Retry value incorrect %s\\n\", retry)\r\n\t}\r\n\tc.RateLimit.RetryAfter = v\r\n}", "func WithRetryAttempts(num int) PublishOpt {\n\treturn func(opts *pubOpts) error {\n\t\topts.retryAttempts = num\n\t\treturn nil\n\t}\n}", "func (sshConfig *SSHConfig) SetMaxRetries(retries int) (result *SSHConfig) {\n\tsshConfig.maxRetries = retries\n\tresult = sshConfig\n\treturn\n}", "func WithRetryWait(dur time.Duration) PublishOpt {\n\treturn func(opts *pubOpts) error {\n\t\topts.retryWait = dur\n\t\treturn nil\n\t}\n}", "func (transaction *ScheduleSignTransaction) SetMaxRetry(count int) *ScheduleSignTransaction {\n\ttransaction.Transaction.SetMaxRetry(count)\n\treturn transaction\n}", "func RetryDuration(d time.Duration) Option {\n\treturn func(rc *RetryConfig) {\n\t\trc.retryDuration = d\n\t}\n}", "func (s stdlib) RetryInterval(time.Duration) {}", "func WithMaxRetry(retry int) OptionFunc {\n\treturn func(cfg *config) {\n\t\tcfg.maxRetry = retry\n\t}\n}", "func (req *Request) SetTimeout(t time.Duration) {\n\tvalue := t.Milliseconds()\n\tif value == 0 {\n\t\tlog.Debug(\"WARNING! Setting a timeout of 0 means infinite timeout!!\")\n\t} else if value < 0 {\n\t\tvalue = -value\n\t\tlog.Warning(\"WARNING! Get a negative timeout, using absolute value\")\n\t}\n\treq.Timeout = time.Duration(value) * time.Millisecond\n}", "func WithRetryCount(c int) Option {\n\treturn func(b *backoff) {\n\t\tif c <= 0 {\n\t\t\treturn\n\t\t}\n\t\tb.maxRetryCount = c\n\t}\n}", "func (transaction *FileCreateTransaction) SetMaxRetry(count int) *FileCreateTransaction {\n\ttransaction.Transaction.SetMaxRetry(count)\n\treturn transaction\n}", "func WithMaxRetryAttempts(maximumAttempts uint) Option {\n\treturn func(r *Retrier) {\n\t\tr.maximumRetryAttempts = maximumAttempts\n\t}\n}", "func RetryBackoff(retry int, minBackoff, maxBackoff time.Duration) time.Duration {\n\tif retry < 0 {\n\t\tretry = 0\n\t}\n\n\tbackoff := minBackoff << uint(retry)\n\tif backoff > maxBackoff || backoff < minBackoff {\n\t\tbackoff = maxBackoff\n\t}\n\n\tif backoff == 0 {\n\t\treturn 0\n\t}\n\treturn time.Duration(rand.Int63n(int64(backoff)))\n}", "func (transaction *AccountCreateTransaction) SetMaxRetry(count int) *AccountCreateTransaction {\n\ttransaction.Transaction.SetMaxRetry(count)\n\treturn transaction\n}", "func (pl *Peerlist) ResetRetryTimes(addr string) {\n\tpl.lock.Lock()\n\tdefer pl.lock.Unlock()\n\n\tif _, ok := pl.peers[addr]; ok {\n\t\tpl.peers[addr].ResetRetryTimes()\n\t\tpl.peers[addr].Seen()\n\t}\n}", "func RetrySeconds(secs int) Option {\n\treturn func(opts workerOpts) workerOpts {\n\t\topts.retrySecs = secs\n\t\treturn opts\n\t}\n}", "func (o *ConfigurationBackupModifyParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}", "func Retry(timeoutSeconds int, sleepSeconds int, try func() error) error {\n\tstart, err := time.Now(), try() // gurantees that it tries once regardless of timeout\n\ttime.Sleep(time.Duration(sleepSeconds) * time.Second) // all try calls are seperated by the sleep period\n\tfor err != nil && time.Since(start) < time.Duration(timeoutSeconds)*time.Second {\n\t\terr = try()\n\t\ttime.Sleep(time.Duration(sleepSeconds) * time.Second)\n\t}\n\treturn err\n}", "func (b *Backend) SetStateRetry(signature *tasks.Signature) error {\n\tstate := tasks.NewRetryTaskState(signature)\n\treturn b.updateState(state)\n}", "func (o *OnpremUpgradePhase) SetRetryCount(v int64) {\n\to.RetryCount = &v\n}", "func (m *MockIrcServer) SetReconnectExp(val uint64) {\n\tm.reconnectExp = &val\n}", "func (peer *Peer) IncreaseRetryTimes() {\n\tpeer.RetryTimes++\n\tlogger.Debug(\"Increase retry times of %v to %v\", peer.Addr, peer.RetryTimes)\n}", "func (transaction *TokenUpdateTransaction) SetMaxRetry(count int) *TokenUpdateTransaction {\n\ttransaction.Transaction.SetMaxRetry(count)\n\treturn transaction\n}", "func (botManagement *BotManagementV1) EnableRetries(maxRetries int, maxRetryInterval time.Duration) {\n\tbotManagement.Service.EnableRetries(maxRetries, maxRetryInterval)\n}", "func WithRetryDelay(retryDelay time.Duration) OptionFunc {\n\treturn func(opts *option) error {\n\t\topts.retryDelay = retryDelay\n\t\treturn nil\n\t}\n}", "func (s *HealthCheck) SetRetries(v int64) *HealthCheck {\n\ts.Retries = &v\n\treturn s\n}", "func (v SensorType) GetRetryTimeout() time.Duration {\n\treturn 1500 * time.Millisecond\n}", "func WithRetry(ctx context.Context, period time.Duration, maxCount int, expBackoff bool) context.Context {\n\treturn context.WithValue(ctx, retryCtxKey, &RetryOpt{\n\t\tPeriod: period,\n\t\tMaxCount: maxCount,\n\t\tExpBackoff: expBackoff,\n\t})\n}", "func (o *PatchRetryEventUsingPATCHParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}", "func (c *Sender) SetPollAttempts(pa int) {\n\tc.pollAttempts = pa\n}", "func NewRetry(maxTries int, initialDelay, maxDelay time.Duration) *Retry {\n\tif maxTries <= 0 {\n\t\tmaxTries = DefaultMaxTries\n\t}\n\tif initialDelay <= 0 {\n\t\tinitialDelay = DefaultInitialDelay\n\t}\n\tif maxDelay <= 0 {\n\t\tmaxDelay = DefaultMaxDelay\n\t}\n\treturn &Retry{maxTries, initialDelay, maxDelay}\n}", "func WithRetryIntervals(retryIntervals []time.Duration) Option {\n\treturn func(opts *Options) {\n\t\topts.RetryIntervals = retryIntervals\n\t}\n}", "func (r *CustomRetrier) Retry(\n\tctx context.Context,\n\tretry int,\n\treq *http.Request,\n\tresp *http.Response,\n\terr error) (time.Duration, bool, error) {\n\t// Fail hard on a specific error\n\tif err == syscall.ECONNREFUSED {\n\t\treturn 0, false, errors.New(\"elasticsearch or network down\")\n\t}\n\n\t// Stop after 5 retries\n\tif retry >= 5 {\n\t\treturn 0, false, nil\n\t}\n\n\t// Let the backoff strategy decide how long to wait and whether to stop\n\twait, stop := r.backoff.Next(retry)\n\n\treturn wait, stop, nil\n}", "func (c *Config) ReconnectTimeout(seconds uint) *Config {\n\tc.GetContext().ReconnectTimeout = strconv.FormatUint(uint64(seconds), 10)\n\treturn c\n}", "func setRandomizedElectionTimeout(r *raft, v int) {\n\tr.randomizedElectionTimeout = v\n}", "func Retry(\n\tf func() error,\n\tnumberOfRetries int,\n\tonError func(error),\n\tperiod ...time.Duration) {\n\tp := time.Second * 5\n\tif len(period) > 0 && period[0] > 0 {\n\t\tp = period[0]\n\t}\n\tfor numberOfRetries != 0 {\n\t\tif numberOfRetries > 0 {\n\t\t\tnumberOfRetries--\n\t\t}\n\t\tif err := Try(f); err != nil {\n\t\t\tif onError != nil {\n\t\t\t\tonError(err)\n\t\t\t}\n\t\t\tif numberOfRetries != 0 {\n\t\t\t\ttime.Sleep(p)\n\t\t\t}\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n}", "func (s *MockTraceServer) SetDelay(delay time.Duration) {\n\ts.delayLock.Lock()\n\tdefer s.delayLock.Unlock()\n\ts.delay = delay\n}", "func (r *CustomRetrier) Retry(\n\tctx context.Context,\n\tretry int,\n\treq *http.Request,\n\tresp *http.Response,\n\terr error) (time.Duration, bool, error) {\n\t// Fail hard on a specific error\n\tif err == syscall.ECONNREFUSED {\n\t\treturn 0, false, errors.New(\"Elasticsearch or network down\")\n\t}\n\n\t// Stop after 5 retries\n\tif retry >= 5 {\n\t\treturn 0, false, nil\n\t}\n\n\t// Let the backoff strategy decide how long to wait and whether to stop\n\twait, stop := r.backoff.Next(retry)\n\treturn wait, stop, nil\n}", "func retryParams() retry.Iterator {\n\treturn &retry.ExponentialBackoff{\n\t\tLimited: retry.Limited{\n\t\t\tDelay: 100 * time.Millisecond,\n\t\t\tMaxTotal: 5 * time.Minute,\n\t\t\tRetries: -1, // until the overall MaxTotal timeout\n\t\t},\n\t\tMultiplier: 2,\n\t\tMaxDelay: 10 * time.Second,\n\t}\n}", "func (options *EditLoadBalancerMonitorOptions) SetTimeout(timeout int64) *EditLoadBalancerMonitorOptions {\n\toptions.Timeout = core.Int64Ptr(timeout)\n\treturn options\n}", "func (o *UpdateSleepGoalParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}", "func Retries(i int) Option {\n\treturn func(o *Options) {\n\t\to.CallOptions.Retries = i\n\t}\n}", "func (cd *ConnectionDetails) RetryLimit() int {\n\ti, err := strconv.Atoi(defaults.String(cd.Options[\"retry_limit\"], \"1000\"))\n\tif err != nil {\n\t\treturn 100\n\t}\n\treturn i\n}", "func (b *ExponentialBackoff) Retry(fn Func) error {\n\twait := time.Duration(b.InitialDelayInterval)\n\tctx := context.Background()\n\tif b.Ctx != nil {\n\t\tctx = b.Ctx\n\t}\n\n\tfor i := 0; i < b.MaxRetryAttempts || b.MaxRetryAttempts == 0; i++ {\n\t\tif i != 0 {\n\t\t\t// Verify if we reached the MaxElapsedTime\n\t\t\tif b.MaxElapsedTime != 0 && time.Since(b.start) > time.Duration(b.MaxElapsedTime) {\n\t\t\t\treturn ErrMaxElapsedTime\n\t\t\t}\n\n\t\t\t// Sleep for the determined duration\n\t\t\tif b.MaxDelayInterval > 0 && wait > time.Duration(b.MaxDelayInterval) {\n\t\t\t\twait = time.Duration(b.MaxDelayInterval)\n\t\t\t}\n\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn ctx.Err()\n\t\t\tcase <-time.After(time.Duration(wait)):\n\t\t\t}\n\n\t\t\t// Exponentially increase that sleep duration\n\t\t\tmultiplier := b.Multiplier\n\t\t\tif multiplier == 0 {\n\t\t\t\tmultiplier = DefaultMultiplier\n\t\t\t}\n\t\t\twait = time.Duration(float64(wait) * multiplier)\n\n\t\t\t// Add a jitter (randomized delay) for the next attempt, to prevent\n\t\t\t// potential collisions\n\t\t\twait = wait + time.Duration(rand.Float64()*float64(wait))\n\t\t} else {\n\t\t\t// Save the current time, in order to measure the total execution time\n\t\t\tb.start = time.Now()\n\t\t}\n\n\t\tif ok, err := fn(i); err != nil || ok {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn ErrMaxRetryAttempts\n}", "func (r *RetryPolicy) Sleep() {\n\tdefer r.retryCount.Inc()\n\n\tif r.retryCount.Load() == 0 {\n\t\tduration := r.retryCount.Load() * uint32(r.rangeMillSeconds[0]) / 2\n\t\ttime.Sleep(time.Duration(duration) * time.Millisecond)\n\t\treturn\n\t}\n\n\tif r.retryCount.Load() <= r.immuneCount {\n\t\tduration := r.retryCount.Load() * uint32(r.rangeMillSeconds[0])\n\t\ttime.Sleep(time.Duration(duration) * time.Millisecond)\n\t\treturn\n\t}\n\n\t// no matter retry how many times, sleep a const time and with an extra rand time.\n\trd := rand.New(rand.NewSource(time.Now().UnixNano()))\n\trandTime := rd.Intn(int(r.rangeMillSeconds[1])-int(r.rangeMillSeconds[0])) + int(r.rangeMillSeconds[0])\n\tduration := r.rangeMillSeconds[0] + uint(randTime)\n\ttime.Sleep(time.Duration(duration) * time.Millisecond)\n\treturn\n}", "func (c *Client) SetBackoff(backoff retryablehttp.Backoff) {\n\tc.modifyLock.RLock()\n\tdefer c.modifyLock.RUnlock()\n\tc.config.modifyLock.Lock()\n\tdefer c.config.modifyLock.Unlock()\n\n\tc.config.Backoff = backoff\n}", "func (o NotificationEndpointGrpcSettingsResponseOutput) RetryDurationSec() pulumi.IntOutput {\n\treturn o.ApplyT(func(v NotificationEndpointGrpcSettingsResponse) int { return v.RetryDurationSec }).(pulumi.IntOutput)\n}", "func (options *CreateLoadBalancerMonitorOptions) SetTimeout(timeout int64) *CreateLoadBalancerMonitorOptions {\n\toptions.Timeout = core.Int64Ptr(timeout)\n\treturn options\n}", "func (runner ConcurrentRunner) SetTimeout(dur time.Duration) ConcurrentRunner {\n\trunner.timeout = &dur\n\treturn runner\n}", "func (l *Loader) RetryDelay() time.Duration {\n\treturn l.cfg.RetryDelay\n}", "func (b *taskBuilder) attempts(a int) {\n\tb.Spec.MaxAttempts = a\n}", "func (b *Block) SetDelay() {\n\tb.Delay = time.Duration(int64(math.Round(b.Delay.Seconds()*(1.0+math.Sqrt(5.0))/2))%3600) * time.Second\n}", "func (globalLoadBalancerMonitor *GlobalLoadBalancerMonitorV1) EnableRetries(maxRetries int, maxRetryInterval time.Duration) {\n\tglobalLoadBalancerMonitor.Service.EnableRetries(maxRetries, maxRetryInterval)\n}", "func (client *Client) SetRetryer() {\n\tclient.SetRetry([]storage.RetryOption{\n\t\tstorage.WithPolicy(storage.RetryAlways),\n\t\tstorage.WithErrorFunc(storage.ShouldRetry),\n\t}...)\n}", "func WithRetries(i int) CallOption {\n\treturn func(o *CallOptions) {\n\t\to.Retries = i\n\t}\n}", "func (p *Plugin) SetTimeout(t time.Duration) {\n\tp.mu.Lock()\n\tp.timeout = t\n\tp.mu.Unlock()\n}", "func (o *CreatePolicyResetItemParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}", "func (r *TimeoutReader) SetTimeout(t time.Duration) time.Duration {\n\tprev := r.t\n\tr.t = t\n\treturn prev\n}", "func (o *PostLolGameflowV1ReconnectParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}", "func (b *Backoff) Retry(ctx context.Context, retry RetryFunc) error {\n\tif err := b.validate(); err != nil {\n\t\treturn err\n\t}\n\n\tfor attempt := 0; attempt < b.Iterations; attempt++ {\n\t\tif retry(attempt) {\n\t\t\treturn contextDoneOr(ctx, nil)\n\t\t}\n\n\t\tif attempt == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tmultiple := float64(uint(1) << (uint(attempt) - 1))\n\n\t\tif b.Jitter != 0 {\n\t\t\tj := (((b.Jitter * rand.Float64()) - (b.Jitter / 2)) / 100)\n\t\t\tmultiple += multiple * j\n\t\t}\n\n\t\tselect {\n\t\tcase <-time.After(b.Coefficient * time.Duration(multiple)):\n\t\t\tcontinue\n\t\tcase <-ctx.Done():\n\t\t\treturn ctx.Err()\n\t\t}\n\t}\n\n\treturn contextDoneOr(ctx, ErrRetriesExhausted)\n}", "func (cl *Client) SetTimeout(secs int) {\n\tif secs <= 0 {\n\t\tsecs = clientDefaultTimeoutSecs\n\t}\n\tcl.Timeout = time.Duration(secs) * time.Second\n}", "func (g *Gopher) SetDelay(d time.Duration) {\n\tg.mu.Lock()\n\tdefer g.mu.Unlock()\n\tg.delay = d\n}", "func MaxRetries(maxRetries uint) DefaultBackoffPolicyOption {\n\treturn func(dbp *DefaultBackoffPolicy) {\n\t\tdbp.MaxRetries = maxRetries\n\t}\n}", "func configureDiscoveryRetry() {\n\tdiscoveryRetry = defaultRetry\n\n\tif v := os.Getenv(registryDiscoveryRetryEnvName); v != \"\" {\n\t\tretry, err := strconv.Atoi(v)\n\t\tif err == nil && retry > 0 {\n\t\t\tdiscoveryRetry = retry\n\t\t}\n\t}\n}", "func InitRetryClient() *RetryHTTPClient {\n\trc := rhttp.NewClient()\n\t// Replace default timeout \"0\" for http.client\n\trc.HTTPClient.Timeout = timeout\n\trc.Logger = log.New()\n\trc.RetryMax = defaultRetryMax\n\trc.CheckRetry = defaultRetryPolicy\n\t//rc.Backoff = rhttp.LinearJitterBackoff\n\n\t// Replace default config for http.Transport\n\tt := rc.HTTPClient.Transport.(*http.Transport)\n\tt.MaxConnsPerHost = defaultMaxConnsPerHost\n\tt.MaxIdleConns = defaultMaxIdleConns\n\tt.MaxIdleConnsPerHost = defaultMaxIdleConnsPerHost\n\tt.IdleConnTimeout = defaultIdleConnTimeout\n\t//t.ExpectContinueTimeout = defaultExpectContinueTimeout\n\n\treturn &RetryHTTPClient{rc}\n}" ]
[ "0.6929946", "0.67542446", "0.6693405", "0.6526574", "0.6397074", "0.63898647", "0.6365026", "0.6311", "0.6271505", "0.6238838", "0.6207614", "0.61254764", "0.60295117", "0.6028012", "0.6021742", "0.5974879", "0.5930354", "0.59214675", "0.5837585", "0.5833836", "0.57983696", "0.5793158", "0.57495826", "0.57379323", "0.5736484", "0.5639807", "0.55551755", "0.5457537", "0.54340804", "0.54310596", "0.54290986", "0.5420329", "0.54179144", "0.54134536", "0.53607124", "0.53553677", "0.5334603", "0.5331392", "0.52953774", "0.52904034", "0.52881575", "0.52852994", "0.52758425", "0.52710646", "0.52668273", "0.52351075", "0.5226176", "0.52211475", "0.5199754", "0.5182418", "0.51780486", "0.51742274", "0.5171724", "0.51662606", "0.51593673", "0.5153791", "0.51059073", "0.510461", "0.50966597", "0.5093622", "0.50772005", "0.50636375", "0.5059374", "0.5055514", "0.5047978", "0.5034578", "0.50013715", "0.49878272", "0.49784875", "0.49728677", "0.49702215", "0.49580583", "0.4936965", "0.4929336", "0.49290094", "0.49285284", "0.4912322", "0.48966506", "0.48795325", "0.4875408", "0.4862785", "0.4848011", "0.48396048", "0.48305655", "0.48244023", "0.48237488", "0.48113126", "0.48074588", "0.47881413", "0.47857", "0.47825086", "0.47788456", "0.47787985", "0.4773316", "0.4760425", "0.47601527", "0.475339", "0.47525388", "0.4732547", "0.47310284" ]
0.83682585
0
Build method returns a serverConfig struct.
func (co *serverConfig) Build() serverConfig { return serverConfig{ URL: co.URL, Retry: co.Retry, RetryWaitTime: co.RetryWaitTime, } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (c *TLSConfig) BuildServerConfig(host string) *tls.Config {\n\tif c == nil {\n\t\t// use default TLS settings, if config is empty.\n\t\treturn &tls.Config{\n\t\t\tServerName: host,\n\t\t\tInsecureSkipVerify: true,\n\t\t\tVerifyConnection: makeVerifyServerConnection(&TLSConfig{\n\t\t\t\tVerification: VerifyFull,\n\t\t\t}),\n\t\t}\n\t}\n\n\tconfig := c.ToConfig()\n\tconfig.ServerName = host\n\tconfig.VerifyConnection = makeVerifyServerConnection(c)\n\treturn config\n}", "func (serv *Server) Config() Config {\n return serv.config\n}", "func NewConfig() ServerConfigBuilder {\n\treturn &serverConfig{}\n}", "func (bu *Builder) Build() *Server {\n\treturn bu.srv\n}", "func (c *Config) Build() *ConfProxy {\n\tif c.Enable {\n\t\tswitch c.Etcd.Enable {\n\t\tcase true:\n\t\t\txlog.Info(\"plugin\", xlog.String(\"appConf.etcd\", \"start\"))\n\t\t\treturn NewConfProxy(c.Enable, etcd.NewETCDDataSource(c.Prefix))\n\t\tdefault:\n\t\t\txlog.Info(\"plugin\", xlog.String(\"appConf.mysql\", \"start\"))\n\t\t}\n\t\t// todo mysql implement\n\t}\n\treturn nil\n}", "func (c *Configs) Server() *ServerConfig {\n\treturn c.chassixConfigs[KeyServerConfig].(*ServerConfig)\n}", "func New() *Config {\n\t// Keep only one instance of server config\n\tif serverConfig != nil {\n\t\treturn serverConfig\n\t}\n\n\t// Get the current environment\n\tenvironment := getEnv(\"_APP_ENV\", \"development\")\n\n\t// If not running on stating nor production, fallback to local configs\n\tif environment != \"staging\" && environment != \"production\" {\n\t\tserverConfig = &Config{\n\t\t\tEnvironment: \"development\",\n\t\t\tDelay: 5,\n\t\t\tMonitoringQuantity: 5,\n\t\t}\n\t\treturn serverConfig\n\t}\n\n\t// For prod and staging\n\tserverConfig = &Config{\n\t\tEnvironment: environment,\n\t}\n\n\treturn serverConfig\n}", "func newConfigServer() *ConfigServer {\n\treturn &ConfigServer{}\n}", "func BuildConfig(opt ClientOptions) (*rest.Config, error) {\n\tvar cfg *rest.Config\n\tvar err error\n\n\tmaster := opt.Master\n\tkubeconfig := opt.KubeConfig\n\tcfg, err = clientcmd.BuildConfigFromFlags(master, kubeconfig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcfg.QPS = opt.QPS\n\tcfg.Burst = opt.Burst\n\n\treturn cfg, nil\n}", "func Config() ServerConfig {\n\treturn defaultServerConfig\n}", "func (c *Config) Build() {\n\tc.Interval = viper.GetDuration(configInterval)\n\tc.MaxKeepedImageFiles = viper.GetInt(configMaxKeepedImageFiles)\n\tc.CameraURL = viper.GetString(configCameraURL)\n\tc.ImagePath = viper.GetString(configImagePath)\n\tc.AlertImagePath = viper.GetString(configAlertImagePath)\n\tc.Treshold = viper.GetInt(configTreshold)\n\tc.AlertHandlers = viper.GetStringSlice(configAlertHandlers)\n\tc.HTTPEnabled = viper.GetBool(configHTTPEnabled)\n\tc.HTTPAddr = viper.GetString(configHTTPAddr)\n\tc.MetricsEnabled = viper.GetBool(configMetricsEnabled)\n\tc.MetricsAddr = viper.GetString(configMetricsAddr)\n\n\tif viper.GetBool(configVerbose) {\n\t\tc.LogLevel = log.DebugLevel\n\t} else {\n\t\tc.LogLevel = log.InfoLevel\n\t}\n}", "func (cb *ConfigBuilder) Build() *gojmx.JMXConfig {\n\treturn cb.config\n}", "func NewServerConfig() *ServerConfig {\n\treturn &ServerConfig{\n\t\tCookieAge: 24,\n\t\tTokenAge: 5,\n\t\tMetaSuffix: \".meta\",\n\t\tACLSuffix: \".acl\",\n\t\tDataSkin: \"tabulator\",\n\t\tDirIndex: []string{\"index.html\", \"index.htm\"},\n\t\tDirSkin: \"http://linkeddata.github.io/warp/#list/\",\n\t\tSignUpSkin: \"http://linkeddata.github.io/signup/?tab=signup&endpointUrl=\",\n\t\tDiskLimit: 100000000, // 100MB\n\t\tDataRoot: serverDefaultRoot(),\n\t}\n}", "func (s *Server) Config() ServerConfig {\n\treturn s.cfg\n}", "func NewServerConfig() *domain.ServerConfig {\n\tlog.Println(\"load config\")\n\treturn &domain.ServerConfig{\n\t\tListenHTTP: \":8080\",\n\t\tListenHTTPS: \":8443\",\n\t\tDebug: true,\n\t\tCookieAge: 8736, // hours (1 year)\n\t\tTokenAge: 5,\n\t\tHSTS: false,\n\t\tWebIDTLS: true,\n\t\tMetaSuffix: \".meta\",\n\t\tACLSuffix: \".acl\",\n\t\tDataApp: \"tabulator\",\n\t\tDirIndex: []string{\"index.html\", \"index.htm\"},\n\t\tDirApp: \"http://linkeddata.github.io/warp/#list/\",\n\t\tSignUpApp: \"https://solid.github.io/solid-signup/?domain=\",\n\t\tDiskLimit: 100000000, // 100MB\n\t\tDataRoot: serverDefaultRoot(),\n\t\tBoltPath: filepath.Join(os.TempDir(), \"bolt.db\"),\n\t\tProxyLocal: true,\n\t}\n}", "func ServerConfig(globalConfig *viper.Viper) *viper.Viper {\n\treturn subconfig(globalConfig, serverConfigKey)\n}", "func (c CompletedConfig) New() (*GenericAPIServer, error) {\n\ts := &GenericAPIServer{\n\t\t//SecureServingInfo: c.SecureServing,\n\t\t//InsecureServingInfo: c.InsecureServing,\n\t\tmode: c.Mode,\n\t\thealthz: c.Healthz,\n\t\t//enableMetrics: c.EnableMetrics,\n\t\t//enableProfiling: c.EnableProfiling,\n\t\tmiddlewares: c.Middlewares,\n\t\tEngine: gin.New(),\n\t}\n\n\tinitGenericAPIServer(s)\n\n\treturn s, nil\n}", "func buildConfig(opts []Option) (*Config, error) {\n\tcfg := &Config{\n\t\tkeyPrefix: DefKeyPrefix,\n\t}\n\n\tfor _, opt := range opts {\n\t\tif err := opt(cfg); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn cfg, nil\n}", "func NewServerConfig() *Server {\n\tport := \"5000\"\n\tmode := \"debug\"\n\tfileServiceEndPoint := \"http://localhost:5000/files\"\n\taccountServiceEndPoint := \"http://localhost:5000/accounts\"\n\tprofileServiceEndPoint := \"http://localhost:5000/profiles\"\n\n\tif env := os.Getenv(\"PORT\"); env != \"\" {\n\t\tport = env\n\t}\n\tif env := os.Getenv(\"MODE\"); env != \"\" {\n\t\tmode = env\n\t}\n\tif env := os.Getenv(\"FILE_API_ADDRESS\"); env != \"\" {\n\t\tfileServiceEndPoint = env\n\t}\n\tif env := os.Getenv(\"ACCOUNT_API_ADDRESS\"); env != \"\" {\n\t\taccountServiceEndPoint = env\n\t}\n\tif env := os.Getenv(\"PROFILE_API_ADDRESS\"); env != \"\" {\n\t\tprofileServiceEndPoint = env\n\t}\n\tserver := &Server{\n\t\tport: port,\n\t\tmode: mode,\n\t\tfileServiceEndPoint: fileServiceEndPoint,\n\t\taccountServiceEndPoint: accountServiceEndPoint,\n\t\tprofileServiceEndPoint: profileServiceEndPoint,\n\t}\n\tif server.mode != \"release\" && server.mode != \"debug\" {\n\t\tpanic(\"Unavailable gin mode\")\n\t}\n\treturn server\n}", "func NewServerConfig(cfg *Config) *server.Config {\n\treturn &cfg.serverConfig\n}", "func NewServerConfig(hbInfo *HeartbeatInfo, opc, dpc, aspID, tmt, nwApr, corrID uint32, rtCtxs []uint32, si, ni, mp, sls uint8) *Config {\n\treturn &Config{\n\t\tHeartbeatInfo: hbInfo,\n\t\tAspIdentifier: params.NewAspIdentifier(aspID),\n\t\tTrafficModeType: params.NewTrafficModeType(tmt),\n\t\tNetworkAppearance: params.NewNetworkAppearance(nwApr),\n\t\tRoutingContexts: params.NewRoutingContext(rtCtxs...),\n\t\tCorrelationID: params.NewCorrelationID(corrID),\n\t\tOriginatingPointCode: opc,\n\t\tDestinationPointCode: dpc,\n\t\tServiceIndicator: si,\n\t\tNetworkIndicator: ni,\n\t\tMessagePriority: mp,\n\t\tSignalingLinkSelection: sls,\n\t}\n}", "func newConfig() *Config {\n\n\tc := &Config{}\n\tvar logLevel, bServers, dServers string\n\n\tflag.StringVar(&c.ControlAddress, \"controlAddress\", \"localhost:4000\",\n\t\t\"Control process IP address, default localhost:4000\")\n\n\tflag.BoolVar(&c.Broadcast, \"broadcast\", true,\n\t\t\"Set to false to squash actual broadcast.\")\n\n\tflag.IntVar(&c.Bclients, \"bClients\", 1,\n\t\t\"The number of broadcast clients; Default 1\")\n\n\tflag.IntVar(&c.Dclients, \"dClients\", 1,\n\t\t\"The number of deliver clients; Default 1\")\n\n\tflag.IntVar(&c.Channels, \"channels\", 1,\n\t\t\"The number of channels; Default 1\")\n\n\tflag.StringVar(&bServers, \"bServers\", \"\",\n\t\t\"A comma-separated list of IP:PORT of broadcast servers to target; Required\")\n\n\tflag.StringVar(&dServers, \"dServers\", \"\",\n\t\t\"A comma-separated list of IP:PORT of deliver servers to target; Defaults to broadcast szervers\")\n\n\tflag.IntVar(&c.Transactions, \"transactions\", 1,\n\t\t\"The number of transactions broadcast to each client's servers; Default 1\")\n\n\tflag.IntVar(&c.Payload, \"payload\", TxHeaderSize,\n\t\t\"Payload size in bytes; Minimum/default is the performance header size (56 bytes)\")\n\n\tflag.IntVar(&c.Burst, \"burst\", 1,\n\t\t\"The number of transactions burst to each server during broadcast; Dafault 1\")\n\n\tflag.DurationVar(&c.Delay, \"delay\", 0,\n\t\t\"The delay between bursts, in the form required by time.ParseDuration(); Default is no delay\")\n\n\tflag.IntVar(&c.Window, \"window\", 100,\n\t\t\"The number of blocks allowed to be delivered without an ACK; Default 100\")\n\n\tflag.IntVar(&c.AckEvery, \"ackEvery\", 70,\n\t\t\"The deliver client will ACK every (this many) blocks; Default 70\")\n\n\tflag.DurationVar(&c.Timeout, \"timeout\", 30*time.Second,\n\t\t\"The initialization timeout, in the form required by time.ParseDuration(); Default 30s\")\n\n\tflag.BoolVar(&c.LatencyAll, \"latencyAll\", false,\n\t\t\"By default, only block latencies are reported. Set -latencyAll=true to report all transaction latencies\")\n\n\tflag.StringVar(&c.LatencyDir, \"latencyDir\", \"\",\n\t\t\"The directory to contain latency files; These files are only created if -latencyDir is specified\")\n\n\tflag.StringVar(&c.LatencyPrefix, \"latencyPrefix\", \"client\",\n\t\t\"Prefix for latency file names\")\n\n\tflag.StringVar(&logLevel, \"logLevel\", \"info\",\n\t\t\"The global logging level; Default 'info'\")\n\n\tflag.StringVar(&c.ControlLogging, \"controlLogging\", \"\",\n\t\t\"Override logging level for the 'control' process\")\n\n\tflag.StringVar(&c.BroadcastLogging, \"broadcastLogging\", \"\",\n\t\t\"Override logging level for the 'broadcast' processes\")\n\n\tflag.StringVar(&c.DeliverLogging, \"deliverLogging\", \"\",\n\t\t\"Override logging level for the 'deliver' processes\")\n\n\tflag.Parse()\n\n\tif c.ControlLogging == \"\" {\n\t\tc.ControlLogging = logLevel\n\t}\n\tif c.BroadcastLogging == \"\" {\n\t\tc.BroadcastLogging = logLevel\n\t}\n\tif c.DeliverLogging == \"\" {\n\t\tc.DeliverLogging = logLevel\n\t}\n\n\tinitLogging(c.ControlLogging)\n\n\trequireUint16(\"bclients\", c.Bclients)\n\trequireUint16(\"dclients\", c.Dclients)\n\trequireUint16(\"channels\", c.Channels)\n\trequireNonEmpty(\"bServers\", bServers)\n\tif dServers == \"\" {\n\t\tdServers = bServers\n\t}\n\trequireUint32(\"transactions\", c.Transactions)\n\trequirePosInt(\"payload\", c.Payload)\n\tif c.Payload < TxHeaderSize {\n\t\tlogger.Infof(\"Payload size will be set to the default (%d bytes)\\n\",\n\t\t\tTxHeaderSize)\n\t\tc.Payload = TxHeaderSize\n\t}\n\trequirePosInt(\"burst\", c.Burst)\n\trequirePosDuration(\"delay\", c.Delay)\n\trequirePosInt(\"window\", c.Window)\n\trequirePosInt(\"ackevery\", c.AckEvery)\n\trequireLE(\"ackevery\", \"window\", c.AckEvery, c.Window)\n\trequirePosDuration(\"timeout\", c.Timeout)\n\n\tc.Bservers = strings.Split(bServers, \",\")\n\tc.NumBservers = len(c.Bservers)\n\n\tc.Dservers = strings.Split(dServers, \",\")\n\tc.NumDservers = len(c.Dservers)\n\n\tlogger.Infof(\"Configuration\")\n\tlogger.Infof(\" Broadcast Servers: %d: %v\", c.NumBservers, c.Bservers)\n\tlogger.Infof(\" Broadcast Clients: %d\", c.Bclients)\n\tlogger.Infof(\" Deliver Servers : %d: %v\", c.NumDservers, c.Dservers)\n\tlogger.Infof(\" Deliver Clients : %d\", c.Dclients)\n\tlogger.Infof(\" Channels : %d\", c.Channels)\n\tlogger.Infof(\" Transactions : %d\", c.Transactions)\n\tlogger.Infof(\" Payload : %d\", c.Payload)\n\tlogger.Infof(\" Burst : %d\", c.Burst)\n\tlogger.Infof(\" Delay : %s\", c.Delay.String())\n\tlogger.Infof(\" Window : %d\", c.Window)\n\tlogger.Infof(\" AckEvery : %d\", c.AckEvery)\n\tlogger.Infof(\" Broadcast? : %v\", c.Broadcast)\n\n\tc.TotalBroadcastClients =\n\t\tuint64(c.NumBservers) * uint64(c.Channels) * uint64(c.Bclients)\n\tc.TxBroadcastPerClient = uint64(c.Transactions)\n\tc.BytesBroadcastPerClient = c.TxBroadcastPerClient * uint64(c.Payload)\n\tc.TotalTxBroadcast = uint64(c.TotalBroadcastClients) * c.TxBroadcastPerClient\n\tc.TotalBytesBroadcast = c.TotalTxBroadcast * uint64(c.Payload)\n\n\tc.TotalDeliverClients =\n\t\tuint64(c.NumDservers) * uint64(c.Channels) * uint64(c.Dclients)\n\tc.TxDeliveredPerClient =\n\t\tuint64(c.NumBservers) * uint64(c.Bclients) * uint64(c.Transactions)\n\tc.BytesDeliveredPerClient = c.TxDeliveredPerClient * uint64(c.Payload)\n\tc.TotalTxDelivered = c.TxDeliveredPerClient * c.TotalDeliverClients\n\tc.TotalBytesDelivered = c.TotalTxDelivered * uint64(c.Payload)\n\n\treturn c\n}", "func serverConfig() (http.ServerConfig, error) {\n\tconfig := http.ServerConfig{}\n\tvar err error\n\tconfig.Port, err = os.GetIntFromEnvVar(\"PORT\", 8080)\n\tif err != nil {\n\t\treturn config, err\n\t}\n\tconfig.TLSEnabled, err = os.GetBoolFromEnvVar(\"TLS_ENABLED\", false)\n\tif err != nil {\n\t\treturn config, err\n\t}\n\tif config.TLSEnabled {\n\t\tconfig.TLSCertPath, err = os.GetRequiredEnvVar(\"TLS_CERT_PATH\")\n\t\tif err != nil {\n\t\t\treturn config, err\n\t\t}\n\t\tconfig.TLSKeyPath, err = os.GetRequiredEnvVar(\"TLS_KEY_PATH\")\n\t\tif err != nil {\n\t\t\treturn config, err\n\t\t}\n\t}\n\treturn config, nil\n}", "func (c *Config) Build() *Godim {\n\tif c.appProfile == nil {\n\t\tc.appProfile = newAppProfile()\n\t}\n\tc.appProfile.lock()\n\tif c.activateES {\n\t\tc.eventSwitch = NewEventSwitch(c.bufferSize)\n\t}\n\treturn NewGodim(c)\n}", "func (b *Builder) Build() Interface {\n\tswitch {\n\tcase b.path != \"\":\n\t\tfSys := fs.NewDocumentFs()\n\t\treturn NewKubeConfig(FromFile(b.path, fSys), InjectFilePath(b.path, fSys), InjectTempRoot(b.root))\n\tcase b.fromParent():\n\t\t// TODO add method that would get kubeconfig from parent cluster and glue it together\n\t\t// with parent kubeconfig if needed\n\t\treturn NewKubeConfig(func() ([]byte, error) {\n\t\t\treturn nil, errors.ErrNotImplemented{}\n\t\t})\n\tcase b.bundlePath != \"\":\n\t\treturn NewKubeConfig(FromBundle(b.bundlePath), InjectTempRoot(b.root))\n\tdefault:\n\t\tfSys := fs.NewDocumentFs()\n\t\t// return default path to kubeconfig file in airship workdir\n\t\tpath := filepath.Join(util.UserHomeDir(), config.AirshipConfigDir, KubeconfigDefaultFileName)\n\t\treturn NewKubeConfig(FromFile(path, fSys), InjectFilePath(path, fSys), InjectTempRoot(b.root))\n\t}\n}", "func (c *config) Build() *dataX.Config {\n\treturn &dataX.Config{}\n}", "func Build(config map[string]interface{}) {\n}", "func CreateServer(serverConfig ServerConfig) {\n\n}", "func (m *Module) Build(s *system.System) {\n\tr := s.CommandRouter\n\n\tt, err := system.NewSubCommandRouter(`^config(\\s|$)`, \"config\")\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\tt.Router.Prefix = \"^\"\n\tr.AddSubrouter(t)\n\n\tt.CommandRoute = &system.CommandRoute{\n\t\tName: \"config\",\n\t\tDesc: \"configures guild settings\",\n\t\tHandler: Auth(CmdConfig),\n\t}\n\n\tk := t.Router\n\tk.On(\"prefix\", Auth(CmdPrefix)).Set(\"\", \"sets the guild command prefix\")\n\tk.On(\"admins\", Auth(CmdAdmins)).Set(\"\", \"sets the admin list\")\n}", "func ParseServerConfig(filename string) ServerConfig {\n\tvar config ServerConfig\n\terr := gcfg.ReadFileInto(&config, filename)\n\tif err != nil {\n\t\tglog.Fatalf(\"Failed to parse gcfg data: %s\", err)\n\t}\n\t// checking configuation is sensible\n\tif len(config.Peers.Address) == 0 {\n\t\tglog.Fatal(\"At least one server is required\")\n\t}\n\tif config.Options.Length <= 0 {\n\t\tglog.Fatal(\"Log length must be at least 1\")\n\t}\n\tif config.Options.BatchInterval < 0 {\n\t\tglog.Fatal(\"Batch interval must be positive\")\n\t}\n\tif config.Options.MaxBatch < 0 {\n\t\tglog.Fatal(\"Max batch size must be positive\")\n\t}\n\tif config.Options.DelegateReplication < 0 || config.Options.DelegateReplication > len(config.Peers.Address) {\n\t\tglog.Fatal(\"Batch interval must be positive\")\n\t}\n\tif config.Options.WindowSize <= 0 {\n\t\tglog.Fatal(\"Window Size must be greater than one\")\n\t}\n\tif config.Options.Application != \"kv-store\" && config.Options.Application != \"dummy\" {\n\t\tglog.Fatal(\"Application must be either kv-store or dummy but is \", config.Options.Application)\n\t}\n\t// TODO: check QuorumSystem\n\treturn config\n}", "func GetServerConfig() *ServerConfig {\n\treturn &ServerConfig{\n\t\tTotalNodes: DefaultTotalNodes,\n\t\tServerPort: DefaultServerPort,\n\t}\n}", "func (s *AppServer) Config() *AppConfig {\n\treturn s.config\n}", "func CreateConfigStruct() Config {\n\tfile, err := os.Open(\"config.txt\")\n\tCheckError(err)\n\n\t// Create scanner object and textlines array\n\tscanner := bufio.NewScanner(file)\n\tscanner.Split(bufio.ScanLines)\n\tvar txtlines []string\n\n\t// Loop through file lines, appending to textlines\n\tfor scanner.Scan() {\n\t\ttxtlines = append(txtlines, scanner.Text())\n\t}\n\n\terr = file.Close()\n\tCheckError(err)\n\n\t// Get min delay, max delay, and f\n\tline := strings.Split(txtlines[0], \" \")\n\tmin, _ := strconv.Atoi(line[0])\n\tmax, _ := strconv.Atoi(line[1])\n\tf, _ := strconv.Atoi(line[2])\n\n\t// Get master server info\n\tline = strings.Split(txtlines[1], \" \")\n\tserver := Server{Ip: line[0], Port: line[1], Conns: []net.Conn{}}\n\n\t// Get list of nodes. Loop through config file lines, skipping line 1 since it contains delay params\n\tvar nodeList []Node\n\tfor _, line := range txtlines[2:] {\n\t\t// For each line, create node struct and add it to list of nodes\n\t\tlist := strings.Split(line, \" \")\n\t\tinput, err := strconv.ParseFloat(list[1], 64)\n\t\tCheckError(err)\n\t\tnode := Node{Id: list[0], Input: input, Ip: list[2], Port: list[3], Conns: []net.Conn{}}\n\t\tnodeList = append(nodeList, node)\n\t}\n\n\treturn Config{MinDelay: min, MaxDelay: max, F: f, Nodes: nodeList, MServer: server}\n}", "func NewServerConf() *ServerConfig {\n return &ServerConfig{\n HTTPConf: &HTTPConfig{\n RunMode:\tviper.GetString(\"runmode\"),\n Addr:\t\tviper.GetString(\"addr\"),\n Name:\t\tviper.GetString(\"name\"),\n PingMax:\tviper.GetInt(\"PingMax\"),\n },\n RedisConf: &RedisConfig{\n Proto: viper.GetString(\"redis.Proto\"),\n Addr: viper.GetString(\"redis.Addr\"),\n Auth: viper.GetString(\"redis.Auth\"),\n MaxIdle: viper.GetInt(\"redis.MaxIdle\"),\n MaxActive: viper.GetInt(\"redis.MaxActive\"),\n IdleTimeout: time.Duration(viper.GetInt(\"redis.IdleTimeout\")),\n },\n }\n}", "func (o *DatadogMetricsAdapter) Config() (*apiserver.Config, error) {\n\tif err := o.SecureServing.MaybeDefaultWithSelfSignedCerts(\"localhost\", nil, []net.IP{net.ParseIP(\"127.0.0.1\")}); err != nil {\n\t\tlog.Errorf(\"Failed to create self signed AuthN/Z configuration %#v\", err)\n\t\treturn nil, fmt.Errorf(\"error creating self-signed certificates: %v\", err)\n\t}\n\n\tscheme := runtime.NewScheme()\n\tcodecs := serializer.NewCodecFactory(scheme)\n\tserverConfig := genericapiserver.NewConfig(codecs)\n\n\terr := o.SecureServing.ApplyTo(serverConfig)\n\tif err != nil {\n\t\tlog.Errorf(\"Error while converting SecureServing type %v\", err)\n\t\treturn nil, err\n\t}\n\n\t// Get the certificates from the extension-apiserver-authentication ConfigMap\n\tif err := o.Authentication.ApplyTo(&serverConfig.Authentication, serverConfig.SecureServing, nil); err != nil {\n\t\tlog.Errorf(\"Could not create Authentication configuration: %v\", err)\n\t\treturn nil, err\n\t}\n\n\tif err := o.Authorization.ApplyTo(&serverConfig.Authorization); err != nil {\n\t\tlog.Infof(\"Could not create Authorization configuration: %v\", err)\n\t\treturn nil, err\n\t}\n\n\treturn &apiserver.Config{\n\t\tGenericConfig: serverConfig,\n\t}, nil\n}", "func (server *SingleInstance) Config() func(config *neo4j.Config) {\n\treturn server.config\n}", "func (rb *PipelineConfigBuilder) Build() PipelineConfig {\n\treturn *rb.v\n}", "func (c *Config) GenerateServer() (*client.Server, error) {\n\tif c.Server.Name == \"\" {\n\t\treturn nil, validFail(\"name\", c.Server.Name)\n\t}\n\tif c.Server.Host == \"\" {\n\t\treturn nil, validFail(\"host\", c.Server.Host)\n\t}\n\tserver := defaultServer\n\tserver.Name = c.Server.Name\n\tserver.Host = c.Server.Host\n\n\tcopySrtConf(&server.GslbExternalAddress, c.Server.GslbExternalAddress)\n\tcopyIntConf(&server.Weight, c.Server.Weight)\n\tcopySrtConf(&server.HealthMonitor, c.Server.HealthMonitor)\n\tcopyIntConf(&server.ConnLimit, c.Server.ConnLimit)\n\tcopyNumBoolConf(&server.ConnLimitLog, c.Server.ConnLimitLog)\n\tcopyIntConf(&server.ConnResume, c.Server.ConnResume)\n\tcopyNumBoolConf(&server.StatsData, c.Server.StatsData)\n\tcopyNumBoolConf(&server.ExtendedStats, c.Server.ExtendedStats)\n\tcopyNumBoolConf(&server.SlowStart, c.Server.SlowStart)\n\tcopyNumBoolConf(&server.SpoofingCache, c.Server.SpoofingCache)\n\tcopySrtConf(&server.Template, c.Server.Template)\n\n\tfor num, conf := range c.Server.PortList {\n\t\tport := defaultPort\n\t\tport.PortNum = num\n\t\tcopyIntConf(&port.Protocol, conf.Protocol)\n\t\tcopyIntConf(&port.Weight, conf.Weight)\n\t\tcopyNumBoolConf(&port.NoSsl, conf.NoSsl)\n\t\tcopyIntConf(&port.ConnLimit, conf.ConnLimit)\n\t\tcopyNumBoolConf(&port.ConnLimitLog, conf.ConnLimitLog)\n\t\tcopyIntConf(&port.ConnResume, conf.ConnResume)\n\t\tcopySrtConf(&port.Template, conf.Template)\n\t\tcopyNumBoolConf(&port.StatsData, conf.StatsData)\n\t\tcopySrtConf(&port.HealthMonitor, conf.HealthMonitor)\n\t\tcopyNumBoolConf(&port.ExtendedStats, conf.ExtendedStats)\n\n\t\tserver.PortList = append(server.PortList, port)\n\t}\n\n\treturn &server, nil\n}", "func BuildNewServer(addr string, n http.Handler, tlsConf *tls.Config) (Server, error) {\n\tif tlsConf == nil {\n\t\treturn nil, errors.New(\"must specify a non-nil tls config\")\n\t}\n\n\tconf := ServerConfig{\n\t\tAddress: addr,\n\t\tHandler: n,\n\t\tTLS: tlsConf,\n\t\tTimeout: time.Minute,\n\t}\n\n\treturn conf.Resolve()\n}", "func (mocb *mqttOptionalConfigurationBuilder) Build() map[string]string {\n\treturn mocb.options\n}", "func CreateConfig() *Config {\n\treturn &Config{\n\t\tGlobal: &Server{},\n\t\tServers: make(map[string]*Server, nAssumedServers),\n\t\tErrors: make([]error, 0),\n\t}\n}", "func (s *Services) Config() *Configuration { return &s.config }", "func BuildConfig(kubecfgPath string, ctx string) (*rest.Config, error) {\n\tif kubecfgPath != \"\" {\n\t\tinfo, err := os.Stat(kubecfgPath)\n\t\tif err != nil || info.Size() == 0 {\n\t\t\t// If the specified kube config file doesn't exist / empty file / any other error\n\t\t\t// from file stat, fall back to default\n\t\t\tkubecfgPath = \"\"\n\t\t}\n\t}\n\n\tloadingRules := clientcmd.NewDefaultClientConfigLoadingRules()\n\tloadingRules.DefaultClientConfig = &clientcmd.DefaultClientConfig\n\tloadingRules.ExplicitPath = kubecfgPath\n\tconfigOverrides := &clientcmd.ConfigOverrides{\n\t\tClusterDefaults: clientcmd.ClusterDefaults,\n\t\tCurrentContext: ctx,\n\t}\n\tcfg, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, configOverrides).ClientConfig()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsetDefaults(cfg)\n\treturn cfg, nil\n}", "func GetServerConfig() ServerConfig {\n\tHost := viper.GetString(\"host\")\n\tPort := viper.GetInt(\"port\")\n\tConfigFile := viper.ConfigFileUsed()\n\tConfigPath := viper.GetString(\"configPath\")\n\n\treturn ServerConfig{Host, Port, ConfigFile, ConfigPath}\n}", "func (o FunctionOutput) BuildConfig() BuildConfigResponseOutput {\n\treturn o.ApplyT(func(v *Function) BuildConfigResponseOutput { return v.BuildConfig }).(BuildConfigResponseOutput)\n}", "func (c *Configuration) Server() *Server {\n\treturn NewServer(c)\n}", "func (c CompletedConfig) New(name string) (*GenericServer, error) {\n\thandlerChainBuilder := func(handler http.Handler) http.Handler {\n\t\treturn c.BuildHandlerChainFunc(handler, c.Config)\n\t}\n\thandler := NewServerHandler(name, handlerChainBuilder, nil)\n\ts := &GenericServer{\n\t\tHandlerChainWaitGroup: c.HandlerChainWaitGroup,\n\n\t\tSecureServingInfo: c.SecureServingInfo,\n\t\tExternalAddress: c.ExternalAddress,\n\t\tHandler: handler,\n\n\t\tpostStartHooks: map[string]postStartHookEntry{},\n\t\tpreShutdownHooks: map[string]preShutdownHookEntry{},\n\n\t\thealthzChecks: c.HealthzChecks,\n\t}\n\n\tinstallAPI(s, c.Config)\n\n\treturn s, nil\n}", "func (c *Config) Server(name string) *Config {\n\tif len(name) != 0 {\n\t\tif _, ok := c.Servers[name]; !ok {\n\t\t\tc.context = &Server{parent: c, Name: name, Host: name}\n\t\t\tc.Servers[name] = c.context\n\t\t} else {\n\t\t\tc.addError(errMsgDuplicateServer)\n\t\t}\n\t} else {\n\t\tc.addError(fmtErrMissing, \"<NONE>\", errHost)\n\t}\n\treturn c\n}", "func buildConfig(log logrus.FieldLogger, masterURL, kubeConfig string) (*rest.Config, error) {\n\tif os.Getenv(\"KUBERNETES_SERVICE_HOST\") != \"\" && os.Getenv(\"KUBERNETES_SERVICE_PORT\") != \"\" {\n\t\t// If these env vars are set, we can build an in-cluster config.\n\t\tlog.Debug(\"Creating in-cluster client\")\n\t\treturn rest.InClusterConfig()\n\t}\n\n\tif masterURL != \"\" || kubeConfig != \"\" {\n\t\tlog.Debug(\"Creating cluster-external client from provided masterURL or kubeconfig\")\n\t\treturn clientcmd.BuildConfigFromFlags(masterURL, kubeConfig)\n\t}\n\n\treturn nil, fmt.Errorf(\"could not create client: missing masterURL or kubeConfig\")\n}", "func NewServerConfig(host string, port int) *ServerConfig {\n\treturn &ServerConfig{Port: port, Host: host}\n}", "func buildConfig(opts []Option) config {\n\tc := config{\n\t\tclock: clock.New(),\n\t\tslack: 10,\n\t\tper: time.Second,\n\t}\n\n\tfor _, opt := range opts {\n\t\topt.apply(&c)\n\t}\n\treturn c\n}", "func NewServerConfig(configTmpl string, target *model.K3OSNode) (*[]byte, error) {\n\ttmpl := configTmpl\n\tif tmpl == \"\" {\n\t\ttmpl = ServerConfigTmpl\n\t}\n\treturn generateConfig(tmpl, target)\n}", "func NewConfig(c *v2.ServerConfig) *Config {\n\treturn &Config{\n\t\tServerName: c.ServerName,\n\t\tLogPath: c.DefaultLogPath,\n\t\tLogLevel: configmanager.ParseLogLevel(c.DefaultLogLevel),\n\t\tLogRoller: c.GlobalLogRoller,\n\t\tGracefulTimeout: c.GracefulTimeout.Duration,\n\t\tUseNetpollMode: c.UseNetpollMode,\n\t}\n}", "func newServerConfig(fname, id, name, passWord, serverKey string) (err error) {\n\tconfig := Config{\n\t\tid,\n\t\tname,\n\t\t\"server\",\n\t\tpassWord,\n\t\tserverKey,\n\t\tDEFAULT_SERVER_URL,\n\t\tDEFAULT_PROCESS_USER,\n\t\tDEFAULT_PROCESS_LOCK,\n\t\tDEFAULT_PROCESS_LOG,\n\t\tDEFAULT_BASE_DIR,\n\t\tDEFAULT_DATA_DIR,\n\t\tDEFAULT_HTTP_LISTEN,\n\t\tfname,\n\t}\n\n\treturn SaveConfig(config)\n}", "func New() (*Config, error) {\n\tflags := pflag.NewFlagSet(os.Args[0], pflag.ExitOnError)\n\tcfg := &Config{\n\t\tFlags: flags,\n\t\tHTTPAddr: flags.StringP(\"http-addr\", \"l\", \":8080\", \"http listen address\"),\n\t\tHTTPReadHeaderTimeout: flags.DurationP(\"http-timeout\", \"h\", 1*time.Second, \"http timeout for reading request headers\"),\n\t\tCallTimeout: flags.DurationP(\"call-timeout\", \"t\", 0*time.Second, \"function call timeout\"),\n\t\tReadLimit: flags.Int64(\"read-limit\", -1, \"limit the amount of data which can be contained in a requests body\"),\n\t\tFramer: flags.StringP(\"framer\", \"f\", \"\", \"afterburn framer to use: line, json or http\"),\n\t\tBuffer: flags.BoolP(\"buffer\", \"b\", false, \"buffer output before writing\"),\n\t}\n\tif err := cfg.parseCommandline(); err != nil {\n\t\treturn nil, err\n\t}\n\tif err := cfg.parseEnvironment(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn cfg, nil\n}", "func New(config *ServerConfig) (*Server, error) {\n\n\tif config == nil {\n\t\treturn nil, fmt.Errorf(\"Must provide configuration\")\n\t}\n\n\t// If no security set, initialize the object as empty\n\tif config.Security == nil {\n\t\tconfig.Security = &SecurityConfig{}\n\t}\n\n\t// Check if the socket is provided to enable the REST gateway to communicate\n\t// to the unix domain socket\n\tif len(config.Socket) == 0 {\n\t\treturn nil, fmt.Errorf(\"Must provide unix domain socket for SDK\")\n\t}\n\tif len(config.RestPort) == 0 {\n\t\treturn nil, fmt.Errorf(\"Must provide REST Gateway port for the SDK\")\n\t}\n\n\t// Set default log locations\n\tvar (\n\t\taccessLog, auditLog *os.File\n\t\terr error\n\t)\n\tif config.AuditOutput == nil {\n\t\tauditLog, err = openLog(defaultAuditLog)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tconfig.AuditOutput = auditLog\n\t}\n\tif config.AccessOutput == nil {\n\t\taccessLog, err := openLog(defaultAccessLog)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tconfig.AccessOutput = accessLog\n\t}\n\n\t_, port, err := net.SplitHostPort(config.Address)\n\tif err != nil {\n\t\tlogrus.Warnf(\"SDK Address NOT in host:port format, failed to get port %v\", err.Error())\n\t}\n\tconfig.port = port\n\t// Create a gRPC server on the network\n\tnetServer, err := newSdkGrpcServer(config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Create a gRPC server on a unix domain socket\n\tudsConfig := *config\n\tudsConfig.Net = \"unix\"\n\tudsConfig.Address = config.Socket\n\tudsServer, err := newSdkGrpcServer(&udsConfig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Create REST Gateway and connect it to the unix domain socket server\n\trestGateway, err := newSdkRestGateway(config, udsServer)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tctx, cancel := context.WithCancel(context.Background())\n\treturn &Server{\n\t\twatcherCtx: ctx,\n\t\twatcherCtxCancel: cancel,\n\t\tconfig: *config,\n\t\tnetServer: netServer,\n\t\tudsServer: udsServer,\n\t\trestGateway: restGateway,\n\t\tauditLog: auditLog,\n\t\taccessLog: accessLog,\n\t}, nil\n}", "func New() (*Config, error) {\n\n\tvar c Config\n\n\tif err := viper.Unmarshal(&c); err != nil {\n\t\treturn nil, err\n\t}\n\n\tserver := &ServerConfig{\n\t\tPort: c.Server.Port,\n\t\tReadTimeOut: c.Server.ReadTimeOut * time.Second,\n\t\tWriteTimeOut: c.Server.WriteTimeOut * time.Second,\n\t}\n\n\tdb := &DatabaseConfig{\n\t\tPort: c.Database.Port,\n\t\tUser: c.Database.User,\n\t\tPassword: c.Database.Password,\n\t\tClient: c.Database.Client,\n\t\tHost: c.Database.Host,\n\t\tDb: c.Database.Db,\n\t}\n\n\treturn &Config{\n\t\tServer: server,\n\t\tDatabase: db,\n\t}, nil\n}", "func (lb *LB) Build(conf config.Config) *LB {\n\tswitch conf.Balancing {\n\tcase \"ip-hash\":\n\t\tih, err := iphash.New(conf.Servers.GetAddress())\n\t\tif err != nil {\n\t\t\tglg.Fatalln(errors.Wrap(err, \"ip-hash algorithm\"))\n\t\t}\n\n\t\tlb.balancing = b.New(ih)\n\t\tlb.Handler = http.HandlerFunc(lb.ipHashBalancing)\n\tcase \"round-robin\":\n\t\trr, err := roundrobin.New(conf.Servers.GetAddress())\n\t\tif err != nil {\n\t\t\tglg.Fatalln(errors.Wrap(err, \"round-robin algorithm\"))\n\t\t}\n\n\t\tlb.balancing = b.New(rr)\n\t\tlb.Handler = http.HandlerFunc(lb.roundRobinBalancing)\n\tcase \"least-connections\":\n\t\tlc, err := leastconnections.New(conf.Servers.GetAddress())\n\t\tif err == nil {\n\t\t\tglg.Fatalln(errors.Wrap(err, \"least-connections algorithm\"))\n\t\t}\n\n\t\tlb.balancing = b.New(lc)\n\t\tlb.Handler = http.HandlerFunc(lb.ipHashBalancing)\n\tdefault:\n\t\tglg.Fatalln(errors.Wrap(ErrInvalidBalancingAlgorithm, conf.Balancing))\n\t}\n\n\treturn lb\n}", "func buildConfig(opts []Option) config {\r\n\tc := config{\r\n\t\tclock: clock.New(),\r\n\t\tmaxSlack: 10,\r\n\t\tper: time.Second,\r\n\t}\r\n\tfor _, opt := range opts {\r\n\t\topt.apply(&c)\r\n\t}\r\n\treturn c\r\n}", "func (builder *appGwConfigBuilder) Build() *network.ApplicationGatewayPropertiesFormat {\n\tconfig := builder.appGwConfig\n\treturn &config\n}", "func (c completedConfig) New(delegationTarget genericapiserver.DelegationTarget) (*AppsServer, error) {\n\t// completion is done in Complete, no need for a second time\n\tgenericServer, err := c.AppsConfig.GenericConfig.SkipComplete().New(\"apps.openshift.io-apiserver\", delegationTarget)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ts := &AppsServer{\n\t\tGenericAPIServer: genericServer,\n\t}\n\n\tv1Storage, err := c.V1RESTStorage()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tparameterCodec := runtime.NewParameterCodec(c.Scheme)\n\tapiGroupInfo := genericapiserver.NewDefaultAPIGroupInfo(appsapiv1.GroupName, c.Registry, c.Scheme, parameterCodec, c.Codecs)\n\tapiGroupInfo.GroupMeta.GroupVersion = appsapiv1.SchemeGroupVersion\n\tapiGroupInfo.VersionedResourcesStorageMap[appsapiv1.SchemeGroupVersion.Version] = v1Storage\n\tif err := s.GenericAPIServer.InstallAPIGroup(&apiGroupInfo); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn s, nil\n}", "func (c completedConfig) New(delegationTarget genericapiserver.DelegationTarget) (*NetworkAPIServer, error) {\n\tgenericServer, err := c.GenericConfig.New(\"network.openshift.io-apiserver\", delegationTarget)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ts := &NetworkAPIServer{\n\t\tGenericAPIServer: genericServer,\n\t}\n\n\tv1Storage, err := c.V1RESTStorage()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tapiGroupInfo := genericapiserver.NewDefaultAPIGroupInfo(networkapiv1.GroupName, c.ExtraConfig.Scheme, metav1.ParameterCodec, c.ExtraConfig.Codecs)\n\tapiGroupInfo.VersionedResourcesStorageMap[networkapiv1.SchemeGroupVersion.Version] = v1Storage\n\tif err := s.GenericAPIServer.InstallAPIGroup(&apiGroupInfo); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn s, nil\n}", "func (c *Config) Build() weather.Provider {\n\t// Build the OWM URL.\n\twURL := url.URL{\n\t\tScheme: \"http\",\n\t\tHost: \"api.wunderground.com\",\n\t\tPath: fmt.Sprintf(\"/api/%s/conditions/q/%s.json\", c.apiKey, c.query),\n\t}\n\treturn Provider(wURL.String())\n}", "func New() *Config {\n\tcfg := ddconfig.SystemProbe\n\tsysconfig.Adjust(cfg)\n\n\tc := &Config{\n\t\tConfig: *ebpf.NewConfig(),\n\n\t\tNPMEnabled: cfg.GetBool(join(netNS, \"enabled\")),\n\t\tServiceMonitoringEnabled: cfg.GetBool(join(smNS, \"enabled\")),\n\t\tDataStreamsEnabled: cfg.GetBool(join(dsNS, \"enabled\")),\n\n\t\tCollectTCPv4Conns: cfg.GetBool(join(netNS, \"collect_tcp_v4\")),\n\t\tCollectTCPv6Conns: cfg.GetBool(join(netNS, \"collect_tcp_v6\")),\n\t\tTCPConnTimeout: 2 * time.Minute,\n\t\tTCPClosedTimeout: 1 * time.Second,\n\n\t\tCollectUDPv4Conns: cfg.GetBool(join(netNS, \"collect_udp_v4\")),\n\t\tCollectUDPv6Conns: cfg.GetBool(join(netNS, \"collect_udp_v6\")),\n\t\tUDPConnTimeout: defaultUDPTimeoutSeconds * time.Second,\n\t\tUDPStreamTimeout: defaultUDPStreamTimeoutSeconds * time.Second,\n\n\t\tOffsetGuessThreshold: uint64(cfg.GetInt64(join(spNS, \"offset_guess_threshold\"))),\n\t\tExcludedSourceConnections: cfg.GetStringMapStringSlice(join(spNS, \"source_excludes\")),\n\t\tExcludedDestinationConnections: cfg.GetStringMapStringSlice(join(spNS, \"dest_excludes\")),\n\n\t\tMaxTrackedConnections: uint32(cfg.GetInt64(join(spNS, \"max_tracked_connections\"))),\n\t\tMaxClosedConnectionsBuffered: uint32(cfg.GetInt64(join(spNS, \"max_closed_connections_buffered\"))),\n\t\tClosedConnectionFlushThreshold: cfg.GetInt(join(spNS, \"closed_connection_flush_threshold\")),\n\t\tClosedChannelSize: cfg.GetInt(join(spNS, \"closed_channel_size\")),\n\t\tMaxConnectionsStateBuffered: cfg.GetInt(join(spNS, \"max_connection_state_buffered\")),\n\t\tClientStateExpiry: 2 * time.Minute,\n\n\t\tDNSInspection: !cfg.GetBool(join(spNS, \"disable_dns_inspection\")),\n\t\tCollectDNSStats: cfg.GetBool(join(spNS, \"collect_dns_stats\")),\n\t\tCollectLocalDNS: cfg.GetBool(join(spNS, \"collect_local_dns\")),\n\t\tCollectDNSDomains: cfg.GetBool(join(spNS, \"collect_dns_domains\")),\n\t\tMaxDNSStats: cfg.GetInt(join(spNS, \"max_dns_stats\")),\n\t\tMaxDNSStatsBuffered: 75000,\n\t\tDNSTimeout: time.Duration(cfg.GetInt(join(spNS, \"dns_timeout_in_s\"))) * time.Second,\n\n\t\tProtocolClassificationEnabled: cfg.GetBool(join(netNS, \"enable_protocol_classification\")),\n\n\t\tEnableHTTPMonitoring: cfg.GetBool(join(smNS, \"enable_http_monitoring\")),\n\t\tEnableHTTP2Monitoring: cfg.GetBool(join(smNS, \"enable_http2_monitoring\")),\n\t\tEnableHTTPSMonitoring: cfg.GetBool(join(netNS, \"enable_https_monitoring\")),\n\t\tEnableIstioMonitoring: cfg.GetBool(join(smNS, \"enable_istio_monitoring\")),\n\t\tMaxHTTPStatsBuffered: cfg.GetInt(join(smNS, \"max_http_stats_buffered\")),\n\t\tMaxKafkaStatsBuffered: cfg.GetInt(join(smNS, \"max_kafka_stats_buffered\")),\n\n\t\tMaxTrackedHTTPConnections: cfg.GetInt64(join(smNS, \"max_tracked_http_connections\")),\n\t\tHTTPNotificationThreshold: cfg.GetInt64(join(smNS, \"http_notification_threshold\")),\n\t\tHTTPMaxRequestFragment: cfg.GetInt64(join(smNS, \"http_max_request_fragment\")),\n\n\t\tEnableConntrack: cfg.GetBool(join(spNS, \"enable_conntrack\")),\n\t\tConntrackMaxStateSize: cfg.GetInt(join(spNS, \"conntrack_max_state_size\")),\n\t\tConntrackRateLimit: cfg.GetInt(join(spNS, \"conntrack_rate_limit\")),\n\t\tConntrackRateLimitInterval: 3 * time.Second,\n\t\tEnableConntrackAllNamespaces: cfg.GetBool(join(spNS, \"enable_conntrack_all_namespaces\")),\n\t\tIgnoreConntrackInitFailure: cfg.GetBool(join(netNS, \"ignore_conntrack_init_failure\")),\n\t\tConntrackInitTimeout: cfg.GetDuration(join(netNS, \"conntrack_init_timeout\")),\n\t\tEnableEbpfConntracker: true,\n\t\tAllowNetlinkConntrackerFallback: cfg.GetBool(join(netNS, \"allow_netlink_conntracker_fallback\")),\n\n\t\tEnableGatewayLookup: cfg.GetBool(join(netNS, \"enable_gateway_lookup\")),\n\n\t\tEnableMonotonicCount: cfg.GetBool(join(spNS, \"windows.enable_monotonic_count\")),\n\n\t\tRecordedQueryTypes: cfg.GetStringSlice(join(netNS, \"dns_recorded_query_types\")),\n\n\t\tEnableProcessEventMonitoring: cfg.GetBool(join(evNS, \"network_process\", \"enabled\")),\n\t\tMaxProcessesTracked: cfg.GetInt(join(evNS, \"network_process\", \"max_processes_tracked\")),\n\n\t\tEnableRootNetNs: cfg.GetBool(join(netNS, \"enable_root_netns\")),\n\n\t\tHTTPMapCleanerInterval: time.Duration(cfg.GetInt(join(smNS, \"http_map_cleaner_interval_in_s\"))) * time.Second,\n\t\tHTTPIdleConnectionTTL: time.Duration(cfg.GetInt(join(smNS, \"http_idle_connection_ttl_in_s\"))) * time.Second,\n\n\t\t// Service Monitoring\n\t\tEnableJavaTLSSupport: cfg.GetBool(join(smjtNS, \"enabled\")),\n\t\tJavaAgentDebug: cfg.GetBool(join(smjtNS, \"debug\")),\n\t\tJavaAgentArgs: cfg.GetString(join(smjtNS, \"args\")),\n\t\tJavaAgentAllowRegex: cfg.GetString(join(smjtNS, \"allow_regex\")),\n\t\tJavaAgentBlockRegex: cfg.GetString(join(smjtNS, \"block_regex\")),\n\t\tEnableGoTLSSupport: cfg.GetBool(join(smNS, \"enable_go_tls_support\")),\n\t\tEnableHTTPStatsByStatusCode: cfg.GetBool(join(smNS, \"enable_http_stats_by_status_code\")),\n\t}\n\n\thttpRRKey := join(smNS, \"http_replace_rules\")\n\trr, err := parseReplaceRules(cfg, httpRRKey)\n\tif err != nil {\n\t\tlog.Errorf(\"error parsing %q: %v\", httpRRKey, err)\n\t} else {\n\t\tc.HTTPReplaceRules = rr\n\t}\n\n\tif !c.CollectTCPv4Conns {\n\t\tlog.Info(\"network tracer TCPv4 tracing disabled\")\n\t}\n\tif !c.CollectUDPv4Conns {\n\t\tlog.Info(\"network tracer UDPv4 tracing disabled\")\n\t}\n\tif !c.CollectTCPv6Conns {\n\t\tlog.Info(\"network tracer TCPv6 tracing disabled\")\n\t}\n\tif !c.CollectUDPv6Conns {\n\t\tlog.Info(\"network tracer UDPv6 tracing disabled\")\n\t}\n\tif !c.DNSInspection {\n\t\tlog.Info(\"network tracer DNS inspection disabled by configuration\")\n\t}\n\n\tc.EnableKafkaMonitoring = c.DataStreamsEnabled\n\n\tif c.EnableProcessEventMonitoring {\n\t\tlog.Info(\"network process event monitoring enabled\")\n\t}\n\treturn c\n}", "func newConfig() *Config {\n\treturn &Config{\n\t\tgeneral{\n\t\t\tVerbose: false,\n\t\t},\n\t\tserver{\n\t\t\tType: \"http\",\n\t\t\tHost: \"0.0.0.0\",\n\t\t},\n\t\tmongo{\n\t\t\tHost: \"0.0.0.0:27017\",\n\t\t\tDatabase: \"etlog\",\n\t\t\tCollection: \"logs\",\n\t\t},\n\t}\n}", "func NewServer(r io.Reader, ct string) Server {\n\tviper.SetConfigType(ct)\n\n\tviper.SetDefault(\"Port\", \":8080\")\n\tviper.SetDefault(\"DatabaseType\", \"sqlite3\")\n\tviper.SetDefault(\"DatabaseName\", \"2q2r.db\")\n\tviper.SetDefault(\"ExpirationTime\", 1*time.Minute)\n\tviper.SetDefault(\"CleanTime\", 30*time.Second)\n\tviper.SetDefault(\"ListenerExpirationTime\", 3*time.Minute)\n\tviper.SetDefault(\"RecentlyCompletedExpirationTime\", 5*time.Second)\n\tviper.SetDefault(\"BaseURL\", \"127.0.0.1\")\n\tviper.SetDefault(\"HTTPS\", true)\n\tviper.SetDefault(\"LogRequests\", false)\n\tviper.SetDefault(\"AuthenticationRequiredRoutes\", []string{\n\t\t\"/*/register/request/*\",\n\t})\n\tviper.SetDefault(\"Base64EncodedPublicKey\", \"mypubkey\")\n\tviper.SetDefault(\"KeyType\", \"ECC-P256\")\n\tviper.SetDefault(\"PrivateKeyEncrypted\", false)\n\n\terr := viper.ReadConfig(r)\n\tif err != nil {\n\t\tlog.Printf(\"Could not read config file! Using default options\\n\")\n\t}\n\n\tc := &Config{\n\t\tPort: viper.GetString(\"Port\"),\n\t\tDatabaseType: viper.GetString(\"DatabaseType\"),\n\t\tDatabaseName: viper.GetString(\"DatabaseName\"),\n\t\tExpirationTime: viper.GetDuration(\"ExpirationTime\"),\n\t\tCleanTime: viper.GetDuration(\"CleanTime\"),\n\t\tListenerExpirationTime: viper.GetDuration(\"ListenerExpirationTime\"),\n\t\tRecentlyCompletedExpirationTime: viper.GetDuration(\"RecentlyCompletedExpirationTime\"),\n\t\tBaseURL: viper.GetString(\"BaseURL\"),\n\t\tHTTPS: viper.GetBool(\"HTTPS\"),\n\t\tLogRequests: viper.GetBool(\"LogRequests\"),\n\t\tCertFile: viper.GetString(\"CertFile\"),\n\t\tKeyFile: viper.GetString(\"KeyFile\"),\n\t\tAuthenticationRequiredRoutes: viper.GetStringSlice(\"AuthenticationRequiredRoutes\"),\n\t\tBase64EncodedPublicKey: viper.GetString(\"Base64EncodedPublicKey\"),\n\t\tKeyType: viper.GetString(\"KeyType\"),\n\t\tPrivateKeyFile: viper.GetString(\"PrivateKeyFile\"),\n\t\tPrivateKeyEncrypted: viper.GetBool(\"PrivateKeyEncrypted\"),\n\t\tPrivateKeyPassword: viper.GetString(\"PrivateKeyPassword\"),\n\t}\n\n\t// Load the Tera Insights RSA public key\n\tpubKey := \"MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA16QwDL9Hyk1vKK2a8\" +\n\t\t\"wCmdiz/0da1ciRJ6z08jQxkfEzPVgrM+Vb8Qq/yS3tcLEA/VD+tucTzwzmZxbg5GvLz\" +\n\t\t\"ygyGoYuIVKhaCq598FCZlnqVHlOqa3b0Gg28I9CsJNXOntiYKff3d0KJ7v2HC2kZvL7\" +\n\t\t\"AnJkw7HxFv5bJCb3NPzfZJ3uLCKuWlG6lowG9pcoys7fogdJP8yrcQQarTQMDxPucY2\" +\n\t\t\"4HBvnP44mBzN3cBLg7sy6p7ZqBJbggrP6EQx2uwFyd5pW0INNW7wBx/wf/kEAQJEuBz\" +\n\t\t\"OKkBQWuR4q7aThFfKNyfklRZ0dgrRQegjMkMy5s9Bwe2cou45VzzA7rSQIDAQAB\"\n\tblock, _ := base64.StdEncoding.DecodeString(pubKey)\n\tpub, err := x509.ParsePKIXPublicKey(block)\n\tif err != nil {\n\t\tpanic(errors.Wrap(err, \"Failed to parse server's public key\"))\n\t}\n\n\t// Read the elliptic private key\n\tfile, err := os.Open(c.PrivateKeyFile)\n\tif err != nil {\n\t\tpanic(errors.Wrap(err, \"Couldn't open private key file\"))\n\t}\n\n\tder := []byte{}\n\t_, err = file.Read(der)\n\tif err != nil {\n\t\tpanic(errors.Wrap(err, \"Couldn't read private key file\"))\n\t}\n\n\tp, _ := pem.Decode(der)\n\tif p == nil {\n\t\tpanic(errors.Wrap(err, \"File was not PEM-formatted\"))\n\t}\n\n\tvar key []byte\n\tif c.PrivateKeyEncrypted {\n\t\tkey, err = x509.DecryptPEMBlock(p, []byte(c.PrivateKeyPassword))\n\t\tif err != nil {\n\t\t\tpanic(errors.Wrap(err, \"Couldn't decrypt private key\"))\n\t\t}\n\t} else {\n\t\tkey = p.Bytes\n\t}\n\n\tpriv, err := x509.ParseECPrivateKey(key)\n\tif err != nil {\n\t\tpanic(errors.Wrap(err, \"Couldn't parse file as DER-encoded ECDSA \"+\n\t\t\t\"private key\"))\n\t}\n\n\tdb, err := gorm.Open(c.DatabaseType, c.DatabaseName)\n\tif err != nil {\n\t\tpanic(errors.Wrap(err, \"Could not open database\"))\n\t}\n\n\terr = db.AutoMigrate(&AppInfo{}).\n\t\tAutoMigrate(&AppServerInfo{}).\n\t\tAutoMigrate(&Key{}).\n\t\tAutoMigrate(&Admin{}).\n\t\tAutoMigrate(&KeySignature{}).\n\t\tAutoMigrate(&SigningKey{}).\n\t\tAutoMigrate(&Permission{}).Error\n\tif err != nil {\n\t\tpanic(errors.Wrap(err, \"Could not migrate schemas\"))\n\t}\n\n\treturn Server{\n\t\tc,\n\t\tdb,\n\t\tcacher{\n\t\t\tbaseURL: c.getBaseURLWithProtocol(),\n\t\t\texpiration: c.ExpirationTime,\n\t\t\tclean: c.CleanTime,\n\t\t\tregistrationRequests: cache.New(c.ExpirationTime, c.CleanTime),\n\t\t\tauthenticationRequests: cache.New(c.ExpirationTime, c.CleanTime),\n\t\t\tchallengeToRequestID: cache.New(c.ExpirationTime, c.CleanTime),\n\t\t\tadmins: cache.New(c.ExpirationTime, c.CleanTime),\n\t\t\tsigningKeys: cache.New(c.ExpirationTime, c.CleanTime),\n\t\t\tadminRegistrations: cache.New(c.ExpirationTime, c.CleanTime),\n\t\t\tvalidPublicKeys: cache.New(cache.NoExpiration,\n\t\t\t\tcache.NoExpiration),\n\t\t},\n\t\tpub.(*rsa.PublicKey),\n\t\tpriv,\n\t}\n}", "func New() *Config {\n\treturn &Config{\n\t\tWebsite: Website{\n\t\t\tURL: \"localhost\",\n\t\t\tHTTPPort: \":80\",\n\t\t\tHTTPSPort: \":443\",\n\t\t\tCert: \"cert.pem\",\n\t\t\tKey: \"key.pem\",\n\t\t\tDirectory: \"app/public\",\n\t\t},\n\t}\n}", "func NewConfig(conf dynamic.Configuration) *Configuration {\n\tif conf.HTTP == nil && conf.TCP == nil && conf.UDP == nil {\n\t\treturn &Configuration{}\n\t}\n\n\truntimeConfig := &Configuration{}\n\n\tif conf.HTTP != nil {\n\t\trouters := conf.HTTP.Routers\n\t\tif len(routers) > 0 {\n\t\t\truntimeConfig.Routers = make(map[string]*RouterInfo, len(routers))\n\t\t\tfor k, v := range routers {\n\t\t\t\truntimeConfig.Routers[k] = &RouterInfo{Router: v, Status: StatusEnabled}\n\t\t\t}\n\t\t}\n\n\t\tservices := conf.HTTP.Services\n\t\tif len(services) > 0 {\n\t\t\truntimeConfig.Services = make(map[string]*ServiceInfo, len(services))\n\t\t\tfor k, v := range services {\n\t\t\t\truntimeConfig.Services[k] = &ServiceInfo{Service: v, Status: StatusEnabled}\n\t\t\t}\n\t\t}\n\n\t\tmiddlewares := conf.HTTP.Middlewares\n\t\tif len(middlewares) > 0 {\n\t\t\truntimeConfig.Middlewares = make(map[string]*MiddlewareInfo, len(middlewares))\n\t\t\tfor k, v := range middlewares {\n\t\t\t\truntimeConfig.Middlewares[k] = &MiddlewareInfo{Middleware: v, Status: StatusEnabled}\n\t\t\t}\n\t\t}\n\t}\n\n\tif conf.TCP != nil {\n\t\tif len(conf.TCP.Routers) > 0 {\n\t\t\truntimeConfig.TCPRouters = make(map[string]*TCPRouterInfo, len(conf.TCP.Routers))\n\t\t\tfor k, v := range conf.TCP.Routers {\n\t\t\t\truntimeConfig.TCPRouters[k] = &TCPRouterInfo{TCPRouter: v, Status: StatusEnabled}\n\t\t\t}\n\t\t}\n\n\t\tif len(conf.TCP.Services) > 0 {\n\t\t\truntimeConfig.TCPServices = make(map[string]*TCPServiceInfo, len(conf.TCP.Services))\n\t\t\tfor k, v := range conf.TCP.Services {\n\t\t\t\truntimeConfig.TCPServices[k] = &TCPServiceInfo{TCPService: v, Status: StatusEnabled}\n\t\t\t}\n\t\t}\n\n\t\tif len(conf.TCP.Middlewares) > 0 {\n\t\t\truntimeConfig.TCPMiddlewares = make(map[string]*TCPMiddlewareInfo, len(conf.TCP.Middlewares))\n\t\t\tfor k, v := range conf.TCP.Middlewares {\n\t\t\t\truntimeConfig.TCPMiddlewares[k] = &TCPMiddlewareInfo{TCPMiddleware: v, Status: StatusEnabled}\n\t\t\t}\n\t\t}\n\t}\n\n\tif conf.UDP != nil {\n\t\tif len(conf.UDP.Routers) > 0 {\n\t\t\truntimeConfig.UDPRouters = make(map[string]*UDPRouterInfo, len(conf.UDP.Routers))\n\t\t\tfor k, v := range conf.UDP.Routers {\n\t\t\t\truntimeConfig.UDPRouters[k] = &UDPRouterInfo{UDPRouter: v, Status: StatusEnabled}\n\t\t\t}\n\t\t}\n\n\t\tif len(conf.UDP.Services) > 0 {\n\t\t\truntimeConfig.UDPServices = make(map[string]*UDPServiceInfo, len(conf.UDP.Services))\n\t\t\tfor k, v := range conf.UDP.Services {\n\t\t\t\truntimeConfig.UDPServices[k] = &UDPServiceInfo{UDPService: v, Status: StatusEnabled}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn runtimeConfig\n}", "func getServer(st *storage.Storage) *settings.Server {\n\troot := v.GetString(\"root\")\n\troot, err := filepath.Abs(root)\n\tcheckErr(err)\n\tserver := &settings.Server{}\n\tserver.BaseURL = v.GetString(\"baseurl\")\n\tserver.Root = root\n\tserver.Address = v.GetString(\"address\")\n\tserver.Port = v.GetString(\"port\")\n\tserver.TLSKey = v.GetString(\"key\")\n\tserver.TLSCert = v.GetString(\"cert\")\n\tserver.Log = v.GetString(\"log\")\n\treturn server\n}", "func parseServerConfig(file io.Reader) (serverConfig ServerConfig, err error) {\n\tserverConfig = ServerConfig{}\n\tcontents, err := ioutil.ReadAll(file)\n\tif err != nil {\n\t\treturn ServerConfig{}, err\n\t}\n\terr = json.Unmarshal(contents, &serverConfig)\n\tif err != nil {\n\t\treturn ServerConfig{}, err\n\t}\n\treturn serverConfig, nil\n}", "func BuildDBConfig() *DBConfig {\n\tdbConfig := DBConfig{\n\t\tHost: \"127.0.0.1\",\n\t\tPort: 3306,\n\t\tUser: \"root\",\n\t\tPassword: \"admin\",\n\t\tDBName: \"WhereYouAtFriend\",\n\t}\n\treturn &dbConfig\n}", "func Build(kubeClient *client.Client) (*RouterConfig, error) {\n\t// Get all relevant information from k8s:\n\t// deis-router rc\n\t// All services with label \"routable=true\"\n\t// deis-builder service, if it exists\n\t// These are used to construct a model...\n\trouterRC, err := getRC(kubeClient)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tappServices, err := getAppServices(kubeClient)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// builderService might be nil if it's not found and that's ok.\n\tbuilderService, err := getBuilderService(kubeClient)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tplatformCertSecret, err := getSecret(kubeClient, \"deis-router-platform-cert\", namespace)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// Build the model...\n\trouterConfig, err := build(kubeClient, routerRC, platformCertSecret, appServices, builderService)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn routerConfig, nil\n}", "func MakeConfig(c *def.App) (out *nine.Config) {\n\tC := c.Cats\n\tvar configFile string\n\tvar tn, sn, rn bool\n\tout = &nine.Config{\n\t\tConfigFile: &configFile,\n\t\tAppDataDir: C.Str(\"app\", \"appdatadir\"),\n\t\tDataDir: C.Str(\"app\", \"datadir\"),\n\t\tLogDir: C.Str(\"app\", \"logdir\"),\n\t\tLogLevel: C.Str(\"log\", \"level\"),\n\t\tSubsystems: C.Map(\"log\", \"subsystem\"),\n\t\tNetwork: C.Str(\"p2p\", \"network\"),\n\t\tAddPeers: C.Tags(\"p2p\", \"addpeer\"),\n\t\tConnectPeers: C.Tags(\"p2p\", \"connect\"),\n\t\tMaxPeers: C.Int(\"p2p\", \"maxpeers\"),\n\t\tListeners: C.Tags(\"p2p\", \"listen\"),\n\t\tDisableListen: C.Bool(\"p2p\", \"nolisten\"),\n\t\tDisableBanning: C.Bool(\"p2p\", \"disableban\"),\n\t\tBanDuration: C.Duration(\"p2p\", \"banduration\"),\n\t\tBanThreshold: C.Int(\"p2p\", \"banthreshold\"),\n\t\tWhitelists: C.Tags(\"p2p\", \"whitelist\"),\n\t\tUsername: C.Str(\"rpc\", \"user\"),\n\t\tPassword: C.Str(\"rpc\", \"pass\"),\n\t\tServerUser: C.Str(\"rpc\", \"user\"),\n\t\tServerPass: C.Str(\"rpc\", \"pass\"),\n\t\tLimitUser: C.Str(\"limit\", \"user\"),\n\t\tLimitPass: C.Str(\"limit\", \"pass\"),\n\t\tRPCConnect: C.Str(\"rpc\", \"connect\"),\n\t\tRPCListeners: C.Tags(\"rpc\", \"listen\"),\n\t\tRPCCert: C.Str(\"tls\", \"cert\"),\n\t\tRPCKey: C.Str(\"tls\", \"key\"),\n\t\tRPCMaxClients: C.Int(\"rpc\", \"maxclients\"),\n\t\tRPCMaxWebsockets: C.Int(\"rpc\", \"maxwebsockets\"),\n\t\tRPCMaxConcurrentReqs: C.Int(\"rpc\", \"maxconcurrentreqs\"),\n\t\tRPCQuirks: C.Bool(\"rpc\", \"quirks\"),\n\t\tDisableRPC: C.Bool(\"rpc\", \"disable\"),\n\t\tNoTLS: C.Bool(\"tls\", \"disable\"),\n\t\tDisableDNSSeed: C.Bool(\"p2p\", \"nodns\"),\n\t\tExternalIPs: C.Tags(\"p2p\", \"externalips\"),\n\t\tProxy: C.Str(\"proxy\", \"address\"),\n\t\tProxyUser: C.Str(\"proxy\", \"user\"),\n\t\tProxyPass: C.Str(\"proxy\", \"pass\"),\n\t\tOnionProxy: C.Str(\"proxy\", \"address\"),\n\t\tOnionProxyUser: C.Str(\"proxy\", \"user\"),\n\t\tOnionProxyPass: C.Str(\"proxy\", \"pass\"),\n\t\tOnion: C.Bool(\"proxy\", \"tor\"),\n\t\tTorIsolation: C.Bool(\"proxy\", \"isolation\"),\n\t\tTestNet3: &tn,\n\t\tRegressionTest: &rn,\n\t\tSimNet: &sn,\n\t\tAddCheckpoints: C.Tags(\"chain\", \"addcheckpoints\"),\n\t\tDisableCheckpoints: C.Bool(\"chain\", \"disablecheckpoints\"),\n\t\tDbType: C.Str(\"chain\", \"dbtype\"),\n\t\tProfile: C.Int(\"app\", \"profile\"),\n\t\tCPUProfile: C.Str(\"app\", \"cpuprofile\"),\n\t\tUpnp: C.Bool(\"app\", \"upnp\"),\n\t\tMinRelayTxFee: C.Float(\"p2p\", \"minrelaytxfee\"),\n\t\tFreeTxRelayLimit: C.Float(\"p2p\", \"freetxrelaylimit\"),\n\t\tNoRelayPriority: C.Bool(\"p2p\", \"norelaypriority\"),\n\t\tTrickleInterval: C.Duration(\"p2p\", \"trickleinterval\"),\n\t\tMaxOrphanTxs: C.Int(\"p2p\", \"maxorphantxs\"),\n\t\tAlgo: C.Str(\"mining\", \"algo\"),\n\t\tGenerate: C.Bool(\"mining\", \"generate\"),\n\t\tGenThreads: C.Int(\"mining\", \"genthreads\"),\n\t\tMiningAddrs: C.Tags(\"mining\", \"addresses\"),\n\t\tMinerListener: C.Str(\"mining\", \"listener\"),\n\t\tMinerPass: C.Str(\"mining\", \"pass\"),\n\t\tBlockMinSize: C.Int(\"block\", \"minsize\"),\n\t\tBlockMaxSize: C.Int(\"block\", \"maxsize\"),\n\t\tBlockMinWeight: C.Int(\"block\", \"minweight\"),\n\t\tBlockMaxWeight: C.Int(\"block\", \"maxweight\"),\n\t\tBlockPrioritySize: C.Int(\"block\", \"prioritysize\"),\n\t\tUserAgentComments: C.Tags(\"p2p\", \"useragentcomments\"),\n\t\tNoPeerBloomFilters: C.Bool(\"p2p\", \"nobloomfilters\"),\n\t\tNoCFilters: C.Bool(\"p2p\", \"nocfilters\"),\n\t\tSigCacheMaxSize: C.Int(\"chain\", \"sigcachemaxsize\"),\n\t\tBlocksOnly: C.Bool(\"p2p\", \"blocksonly\"),\n\t\tTxIndex: C.Bool(\"chain\", \"txindex\"),\n\t\tAddrIndex: C.Bool(\"chain\", \"addrindex\"),\n\t\tRelayNonStd: C.Bool(\"chain\", \"relaynonstd\"),\n\t\tRejectNonStd: C.Bool(\"chain\", \"rejectnonstd\"),\n\t\tTLSSkipVerify: C.Bool(\"tls\", \"skipverify\"),\n\t\tWallet: C.Bool(\"wallet\", \"enable\"),\n\t\tNoInitialLoad: C.Bool(\"wallet\", \"noinitialload\"),\n\t\tWalletPass: C.Str(\"wallet\", \"pass\"),\n\t\tWalletServer: C.Str(\"wallet\", \"server\"),\n\t\tCAFile: C.Str(\"tls\", \"cafile\"),\n\t\tOneTimeTLSKey: C.Bool(\"tls\", \"onetime\"),\n\t\tServerTLS: C.Bool(\"tls\", \"server\"),\n\t\tLegacyRPCListeners: C.Tags(\"rpc\", \"listen\"),\n\t\tLegacyRPCMaxClients: C.Int(\"rpc\", \"maxclients\"),\n\t\tLegacyRPCMaxWebsockets: C.Int(\"rpc\", \"maxwebsockets\"),\n\t\tExperimentalRPCListeners: &[]string{},\n\t\tState: node.StateCfg,\n\t}\n\treturn\n}", "func GetConfig() common.AresServerConfig {\n\treturn config\n}", "func (conf PostgresConfig) Build() string {\n\tconst formatParam = \"%s=%s \"\n\tvar buffer bytes.Buffer\n\n\tif conf.Database != \"\" {\n\t\tbuffer.WriteString(fmt.Sprintf(formatParam, \"dbname\", conf.Database))\n\t}\n\n\tif conf.UserID != \"\" {\n\t\tbuffer.WriteString(fmt.Sprintf(formatParam, \"user\", conf.UserID))\n\t}\n\n\tif conf.Password != \"\" {\n\t\tbuffer.WriteString(fmt.Sprintf(formatParam, \"password\", conf.Password))\n\t}\n\n\tif conf.Host != nil {\n\t\tbuffer.WriteString(fmt.Sprintf(formatParam, \"host\", *conf.Host))\n\t}\n\n\tif conf.Port != nil {\n\t\tbuffer.WriteString(fmt.Sprintf(formatParam, \"port\", strconv.Itoa(*conf.Port)))\n\t}\n\n\tif conf.SslMode != \"\" {\n\t\tbuffer.WriteString(fmt.Sprintf(formatParam, \"sslmode\", conf.SslMode))\n\t}\n\n\tif conf.ConnectionTimeout != nil {\n\t\tbuffer.WriteString(fmt.Sprintf(formatParam, \"connect_timeout\", strconv.Itoa(*conf.ConnectionTimeout)))\n\t}\n\n\treturn buffer.String()\n}", "func New() Config {\n\treturn Config{\n\t\tDatabase: \"messaging\",\n\t\tThreadColl: \"thread\",\n\t\tMessageColl: \"message\",\n\t}\n}", "func MakeServerConfig(b []byte) (*ServerConfig, error) {\n\tc := &ServerConfig{}\n\terr := json.Unmarshal(b, c)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn c, nil\n}", "func CreateConfig(apiServerURL, kubeCfgPath string) (*rest.Config, error) {\n\tvar (\n\t\tconfig *rest.Config\n\t\terr error\n\t)\n\n\tswitch {\n\t// If the apiServerURL and the kubeCfgPath are empty then we can try getting\n\t// the rest.Config from the InClusterConfig\n\tcase apiServerURL == \"\" && kubeCfgPath == \"\":\n\t\tif config, err = inClusterConfig(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\tcase kubeCfgPath != \"\":\n\t\tif config, err = clientcmd.BuildConfigFromFlags(\"\", kubeCfgPath); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\tcase strings.HasPrefix(apiServerURL, \"https://\"):\n\t\tif config, err = rest.InClusterConfig(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tconfig.Host = apiServerURL\n\tdefault:\n\t\tconfig = &rest.Config{Host: apiServerURL}\n\t}\n\n\treturn config, nil\n}", "func GetConfig(fname string) (config Config, err error) {\n\tconfig = Config{}\n\n\tdata, err := simplejson.Load(fname)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tconfig.Id = data.Get(\"id\").MustString(\"\")\n\tconfig.Name = data.Get(\"name\").MustString(\"\")\n\tconfig.Role = data.Get(\"role\").MustString(\"\")\n\tconfig.PassWord = data.Get(\"password\").MustString(\"\")\n\tconfig.ServerKey = data.Get(\"server_key\").MustString(\"\")\n\tconfig.ServerUrl = data.Get(\"server_url\").MustString(\"\")\n\tconfig.DaemonUser = data.Get(\"daemon_user\").MustString(\"\")\n\tconfig.PidFile = data.Get(\"pid_file\").MustString(\"\")\n\tconfig.LogFile = data.Get(\"log_file\").MustString(\"\")\n\tconfig.BaseDir = data.Get(\"base_dir\").MustString(\"\")\n\tconfig.DataDir = data.Get(\"data_dir\").MustString(\"\")\n\tconfig.HttpListen = data.Get(\"http_listen\").MustString(\"\")\n\tconfig.File = fname\n\n\tif config.Id == \"\" {\n\t\treturn config, errors.New(\"missing id config\")\n\t}\n\n\tif config.Role == \"\" {\n\t\treturn config, errors.New(\"missing role config\")\n\t}\n\n\tif config.ServerKey == \"\" {\n\t\treturn config, errors.New(\"missing server_key config\")\n\t}\n\n\tif config.PidFile == \"\" {\n\t\treturn config, errors.New(\"missing pid_file config\")\n\t}\n\n\tif config.LogFile == \"\" {\n\t\treturn config, errors.New(\"missing log_file config\")\n\t}\n\n\tif config.BaseDir == \"\" {\n\t\treturn config, errors.New(\"missing base_dir config\")\n\t}\n\n\tif config.Role == \"server\" {\n\t\tif config.PassWord == \"\" {\n\t\t\treturn config, errors.New(\"missing password config\")\n\t\t}\n\t\tif config.DataDir == \"\" {\n\t\t\treturn config, errors.New(\"missing data_dir config\")\n\t\t}\n\t\tif config.HttpListen == \"\" {\n\t\t\treturn config, errors.New(\"missing http_listen config\")\n\t\t}\n\t} else {\n\t\tif config.ServerUrl == \"\" {\n\t\t\treturn config, errors.New(\"missing server_url config\")\n\t\t}\n\t}\n\n\tif !strings.HasSuffix(config.BaseDir, \"/\") {\n\t\tconfig.BaseDir += \"/\"\n\t}\n\n\tif strings.HasSuffix(config.ServerUrl, \"/\") {\n\t\tconfig.ServerUrl = config.ServerUrl[:len(config.ServerUrl)-1]\n\t}\n\n\terr = SaveConfig(config)\n\n\treturn\n}", "func NewBuilder() *Builder {\n\treturn &Builder{\n\t\tnew(Server),\n\t}\n}", "func New(config Config) *Server {\n\treturn &Server{\n\t\tconfig: config,\n\t\tregistrars: make([]Registration, 0, 1),\n\t}\n}", "func createOpenAPIBuilderConfig() *common.Config {\n\treturn &common.Config{\n\t\tProtocolList: []string{\"https\"},\n\t\tIgnorePrefixes: []string{\"/swaggerapi\"},\n\t\tInfo: &spec.Info{\n\t\t\tInfoProps: spec.InfoProps{\n\t\t\t\tTitle: \"Argo-Events\",\n\t\t\t\tVersion: \"v0.6\",\n\t\t\t},\n\t\t},\n\t}\n}", "func (b *AdapterBase) Config() (*apiserver.Config, error) {\n\tif b.config == nil {\n\t\tb.InstallFlags() // just to be sure\n\n\t\tconfig, err := b.CustomMetricsAdapterServerOptions.Config()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tb.config = config\n\t}\n\n\treturn b.config, nil\n}", "func Config(dsn string, watch bool) Option {\n\treturn func(s *Server) error {\n\t\tconfigStore, err := config.NewStore(dsn, watch)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"failed to apply Config option\")\n\t\t}\n\n\t\ts.configStore = configStore\n\t\treturn nil\n\t}\n}", "func New(c context.Context) (config.Interface, error) {\n\tsettings, err := FetchCachedSettings(c)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif settings.ConfigServiceURL == \"\" {\n\t\treturn nil, ErrNotConfigured\n\t}\n\n\tc = client.UseServiceAccountTransport(c, nil, nil)\n\tcfg := remote.New(c, settings.ConfigServiceURL+\"/_ah/api/config/v1/\")\n\tif settings.CacheExpirationSec != 0 {\n\t\tf := NewCacheFilter(c, time.Duration(settings.CacheExpirationSec)*time.Second)\n\t\tcfg = f(c, cfg)\n\t}\n\treturn cfg, nil\n}", "func GetCfg(appVersion string, gitRevision string) (Cfg, error) {\n\tversion := getopt.BoolLong(\"version\", 'V', \"Print version information and exit.\")\n\tlistPlugins := getopt.BoolLong(\"list-plugins\", 'l', \"Print the list of plugins.\")\n\thelp := getopt.BoolLong(\"help\", 'h', \"Print usage information and exit\")\n\trevalOnly := getopt.BoolLong(\"revalidate-only\", 'y', \"Whether to exclude files not named 'regex_revalidate.config'\")\n\tdir := getopt.StringLong(\"dir\", 'D', \"\", \"ATS config directory, used for config files without location parameters or with relative paths. May be blank. If blank and any required config file location parameter is missing or relative, will error.\")\n\tviaRelease := getopt.BoolLong(\"via-string-release\", 'r', \"Whether to use the Release value from the RPM package as a replacement for the ATS version specified in the build that is returned in the Via and Server headers from ATS.\")\n\tdnsLocalBind := getopt.BoolLong(\"dns-local-bind\", 'b', \"Whether to use the server's Service Addresses to set the ATS DNS local bind address.\")\n\tdisableParentConfigComments := getopt.BoolLong(\"disable-parent-config-comments\", 'c', \"Disable adding a comments to parent.config individual lines\")\n\tdefaultEnableH2 := getopt.BoolLong(\"default-client-enable-h2\", '2', \"Whether to enable HTTP/2 on Delivery Services by default, if they have no explicit Parameter. This is irrelevant if ATS records.config is not serving H2. If omitted, H2 is disabled.\")\n\tdefaultTLSVersionsStr := getopt.StringLong(\"default-client-tls-versions\", 'T', \"\", \"Comma-delimited list of default TLS versions for Delivery Services with no Parameter, e.g. '--default-tls-versions=1.1,1.2,1.3'. If omitted, all versions are enabled.\")\n\tnoOutgoingIP := getopt.BoolLong(\"no-outgoing-ip\", 'i', \"Whether to not set the records.config outgoing IP to the server's addresses in Traffic Ops. Default is false.\")\n\tatsVersion := getopt.StringLong(\"ats-version\", 'a', \"\", \"The ATS version, e.g. 9.1.2-42.abc123.el7.x86_64. If omitted, generation will attempt to get the ATS version from the Server Parameters, and fall back to lib/go-atscfg.DefaultATSVersion\")\n\tverbosePtr := getopt.CounterLong(\"verbose\", 'v', `Log verbosity. Logging is output to stderr. By default, errors are logged. To log warnings, pass '-v'. To log info, pass '-vv'. To omit error logging, see '-s'`)\n\tsilentPtr := getopt.BoolLong(\"silent\", 's', `Silent. Errors are not logged, and the 'verbose' flag is ignored. If a fatal error occurs, the return code will be non-zero but no text will be output to stderr`)\n\tcache := getopt.StringLong(\"cache\", 'C', \"ats\", \"Cache server type. Generate configuration files for specific cache server type, e.g. 'ats', 'varnish'.\")\n\n\tconst useStrategiesFlagName = \"use-strategies\"\n\tconst defaultUseStrategies = t3cutil.UseStrategiesFlagFalse\n\tuseStrategiesPtr := getopt.EnumLong(useStrategiesFlagName, 0, []string{string(t3cutil.UseStrategiesFlagTrue), string(t3cutil.UseStrategiesFlagCore), string(t3cutil.UseStrategiesFlagFalse), string(t3cutil.UseStrategiesFlagCore), \"\"}, \"\", \"[true | core| false] whether to generate config using strategies.yaml instead of parent.config. If true use the parent_select plugin, if 'core' use ATS core strategies, if false use parent.config.\")\n\n\tconst goDirectFlagName = \"go-direct\"\n\tgoDirectPtr := getopt.StringLong(goDirectFlagName, 'G', \"false\", \"[true|false|old] default will set go_direct to false, you can set go_direct true, or old will be based on opposite of parent_is_proxy directive.\")\n\n\tgetopt.Parse()\n\n\tif *version {\n\t\tcfg := &Cfg{Version: appVersion, GitRevision: gitRevision}\n\t\tfmt.Println(cfg.AppVersion())\n\t\tos.Exit(0)\n\t} else if *help {\n\t\tgetopt.PrintUsage(os.Stdout)\n\t\tos.Exit(0)\n\t} else if *listPlugins {\n\t\treturn Cfg{ListPlugins: true}, nil\n\t}\n\n\tlogLocationError := log.LogLocationStderr\n\tlogLocationWarn := log.LogLocationNull\n\tlogLocationInfo := log.LogLocationNull\n\tlogLocationDebug := log.LogLocationNull\n\tif *silentPtr {\n\t\tlogLocationError = log.LogLocationNull\n\t} else {\n\t\tif *verbosePtr >= 1 {\n\t\t\tlogLocationWarn = log.LogLocationStderr\n\t\t}\n\t\tif *verbosePtr >= 2 {\n\t\t\tlogLocationInfo = log.LogLocationStderr\n\t\t\tlogLocationDebug = log.LogLocationStderr // t3c only has 3 verbosity options: none (-s), error (default or --verbose=0), warning (-v), and info (-vv). Any code calling log.Debug is treated as Info.\n\t\t}\n\t}\n\n\tif *verbosePtr > 2 {\n\t\treturn Cfg{}, errors.New(\"Too many verbose options. The maximum log verbosity level is 2 (-vv or --verbose=2) for errors (0), warnings (1), and info (2)\")\n\t}\n\n\t// The flag takes the full version, for forward-compatibility in case we need it in the future,\n\t// but we only need the major version at the moment.\n\tatsMajorVersion := uint(0)\n\tif *atsVersion != \"\" {\n\t\terr := error(nil)\n\t\tatsMajorVersion, err = atscfg.GetATSMajorVersionFromATSVersion(*atsVersion)\n\t\tif err != nil {\n\t\t\treturn Cfg{}, errors.New(\"parsing ATS version '\" + *atsVersion + \"': \" + err.Error())\n\t\t}\n\t}\n\n\tdefaultTLSVersions := atscfg.DefaultDefaultTLSVersions\n\n\t*defaultTLSVersionsStr = strings.TrimSpace(*defaultTLSVersionsStr)\n\tif len(*defaultTLSVersionsStr) > 0 {\n\t\tdefaultTLSVersionsStrs := strings.Split(*defaultTLSVersionsStr, \",\")\n\n\t\tdefaultTLSVersions = []atscfg.TLSVersion{}\n\t\tfor _, tlsVersionStr := range defaultTLSVersionsStrs {\n\t\t\ttlsVersion := atscfg.StringToTLSVersion(tlsVersionStr)\n\t\t\tif tlsVersion == atscfg.TLSVersionInvalid {\n\t\t\t\treturn Cfg{}, errors.New(\"unknown TLS Version '\" + tlsVersionStr + \"' in '\" + *defaultTLSVersionsStr + \"'\")\n\t\t\t}\n\t\t\tdefaultTLSVersions = append(defaultTLSVersions, tlsVersion)\n\t\t}\n\t}\n\n\tif !getopt.IsSet(useStrategiesFlagName) {\n\t\t*useStrategiesPtr = defaultUseStrategies.String()\n\t}\n\n\tswitch *goDirectPtr {\n\tcase \"false\", \"true\", \"old\":\n\tdefault:\n\t\treturn Cfg{}, errors.New(goDirectFlagName + \" should be false, true, or old\")\n\t}\n\n\tcfg := Cfg{\n\t\tLogLocationErr: logLocationError,\n\t\tLogLocationWarn: logLocationWarn,\n\t\tLogLocationInfo: logLocationInfo,\n\t\tLogLocationDebug: logLocationDebug,\n\t\tListPlugins: *listPlugins,\n\t\tRevalOnly: *revalOnly,\n\t\tDir: *dir,\n\t\tViaRelease: *viaRelease,\n\t\tSetDNSLocalBind: *dnsLocalBind,\n\t\tNoOutgoingIP: *noOutgoingIP,\n\t\tATSMajorVersion: atsMajorVersion,\n\t\tParentComments: !(*disableParentConfigComments),\n\t\tDefaultEnableH2: *defaultEnableH2,\n\t\tDefaultTLSVersions: defaultTLSVersions,\n\t\tVersion: appVersion,\n\t\tGitRevision: gitRevision,\n\t\tUseStrategies: t3cutil.UseStrategiesFlag(*useStrategiesPtr),\n\t\tGoDirect: *goDirectPtr,\n\t\tCache: *cache,\n\t}\n\tif err := log.InitCfg(cfg); err != nil {\n\t\treturn Cfg{}, errors.New(\"Initializing loggers: \" + err.Error() + \"\\n\")\n\t}\n\treturn cfg, nil\n}", "func (c completedConfig) New(delegationTarget genericapiserver.DelegationTarget) (*OAuthAPIServer, error) {\n\tdelegateAPIServer := delegationTarget\n\tvar err error\n\n\tdelegateAPIServer, err = c.withOAuthAPIServer(delegateAPIServer)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdelegateAPIServer, err = c.withUserAPIServer(delegateAPIServer)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tgenericServer, err := c.GenericConfig.New(\"oauth-apiserver\", delegateAPIServer)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ts := &OAuthAPIServer{\n\t\tGenericAPIServer: genericServer,\n\t}\n\n\treturn s, nil\n}", "func New(cfg *Config) *Server {\n\tdefaultConfig(cfg)\n\tlog.Printf(\"%+v\\n\", cfg)\n\treturn &Server{\n\t\tcfg: cfg,\n\t\thandlers: make([]connectionHandler, cfg.Count),\n\t\tevents: make(chan eventWithData, cfg.Count),\n\t}\n}", "func (htpc *HttpProcessorConfig) Build() stream.StreamHandler {\n\tclient := htpc.customClient\n\tif client == nil {\n\t\ttransport := &http.Transport{\n\t\t\tDial: func(network, addr string) (net.Conn, error) {\n\t\t\t\treturn net.DialTimeout(network, addr, htpc.connectionTimeout)\n\t\t\t},\n\t\t}\n\t\tclient = &http.Client{\n\t\t\tTransport: transport,\n\t\t}\n\t}\n\treturn &httpProcessor{cfg: *htpc, client: client}\n}", "func New() *Config {\n\treturn &Config{\n\t\tEncryptor: &encryption.KMSEncryptor{\n\t\t\tKMS: kms.New(session.New(), &aws.Config{Region: aws.String(os.Getenv(\"EC2_REGION\"))}),\n\t\t},\n\t\tdata: (unsafe.Pointer)(&configData{\n\t\t\tbody: new(sjson.Json),\n\t\t\tdecrypted: make(map[uint64]*sjson.Json),\n\t\t}),\n\t\tobservers: make([]chan bool, 0),\n\t}\n}", "func newConfig() *config {\n\treturn &config{\n\t\tAddr: \":80\",\n\t\tCacheSize: 1000,\n\t\tLogLevel: \"info\",\n\t\tRequestTimeout: 3000,\n\t\tTargetAddr: \"https://places.aviasales.ru\",\n\t}\n}", "func NewFromConfig(config ServerConfig) (*Server, error) {\n\tlogger := config.Logger\n\tconf := config.Config\n\n\tret := &Server{\n\t\tConfig: conf,\n\t\t// Control whether Veneur should emit metric\n\t\t// \"veneur.flush.unique_timeseries_total\", which may come at a slight\n\t\t// performance hit to workers.\n\t\tCountUniqueTimeseries: conf.CountUniqueTimeseries,\n\t\t// This must come before worker initialization. We need to initialize\n\t\t// workers with state from *Server.IsWorker.\n\t\tenableProfiling: conf.EnableProfiling,\n\t\tForwardAddr: conf.ForwardAddress,\n\t\tgrpcListenAddress: conf.GrpcAddress,\n\t\tHostname: conf.Hostname,\n\t\tHistogramPercentiles: conf.Percentiles,\n\t\tHTTPAddr: conf.HTTPAddress,\n\t\tHTTPClient: &http.Client{\n\t\t\t// make sure that POSTs to datadog do not overflow the flush interval\n\t\t\tTimeout: conf.Interval * 9 / 10,\n\t\t\tTransport: &http.Transport{\n\t\t\t\t// If we're idle more than one interval something is up\n\t\t\t\tIdleConnTimeout: conf.Interval * 2,\n\t\t\t},\n\t\t},\n\t\tHttpCustomHandlers: config.HttpCustomHandlers,\n\t\tInterval: conf.Interval,\n\t\tlogger: logrus.NewEntry(config.Logger),\n\t\tmetricMaxLength: conf.MetricMaxLength,\n\t\tnumListeningHTTP: new(int32),\n\t\tnumReaders: conf.NumReaders,\n\t\tparser: samplers.NewParser(conf.ExtendTags),\n\t\tRcvbufBytes: conf.ReadBufferSizeBytes,\n\t\t// closed in Shutdown; Same approach and http.Shutdown\n\t\tshutdown: make(chan struct{}),\n\t\ttraceMaxLengthBytes: conf.TraceMaxLengthBytes,\n\t\tSpanChan: make(chan *ssf.SSFSpan, conf.SpanChannelCapacity),\n\t\tstuckIntervals: conf.FlushWatchdogMissedFlushes,\n\t\tsynchronizeInterval: conf.SynchronizeWithInterval,\n\t\t// Allocate the slice, we'll fill it with workers later.\n\t\tWorkers: make([]*Worker, max(1, conf.NumWorkers)),\n\t}\n\n\tret.HistogramAggregates.Value = 0\n\tfor _, agg := range conf.Aggregates {\n\t\tret.HistogramAggregates.Value += samplers.AggregatesLookup[agg]\n\t}\n\tret.HistogramAggregates.Count = len(conf.Aggregates)\n\n\tscopes, err := scopesFromConfig(conf)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\tret.Statsd = scopedstatsd.NewClient(config.Statsd, conf.VeneurMetricsAdditionalTags, scopes)\n\n\tret.TraceClient, err = trace.NewChannelClient(ret.SpanChan,\n\t\ttrace.ReportStatistics(config.Statsd, 1*time.Second, []string{\"ssf_format:internal\"}),\n\t\tnormalizeSpans(conf),\n\t)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tif conf.Debug {\n\t\tlogger.SetLevel(logrus.DebugLevel)\n\t}\n\n\tmpf := 0\n\tif conf.MutexProfileFraction > 0 {\n\t\tmpf = runtime.SetMutexProfileFraction(conf.MutexProfileFraction)\n\t}\n\n\tlogger.WithFields(logrus.Fields{\n\t\t\"MutexProfileFraction\": conf.MutexProfileFraction,\n\t\t\"previousMutexProfileFraction\": mpf,\n\t}).Info(\"Set mutex profile fraction\")\n\n\tif conf.BlockProfileRate > 0 {\n\t\truntime.SetBlockProfileRate(conf.BlockProfileRate)\n\t}\n\tlogger.WithField(\"BlockProfileRate\", conf.BlockProfileRate).Info(\"Set block profile rate (nanoseconds)\")\n\n\tret.FlushOnShutdown = conf.FlushOnShutdown\n\n\t// Use the pre-allocated Workers slice to know how many to start.\n\tlogger.WithField(\"number\", len(ret.Workers)).Info(\"Preparing workers\")\n\tfor i := range ret.Workers {\n\t\tret.Workers[i] = NewWorker(i+1, ret.IsLocal(), ret.CountUniqueTimeseries, ret.TraceClient, logger, ret.Statsd)\n\t\t// do not close over loop index\n\t\tgo func(w *Worker) {\n\t\t\tdefer func() {\n\t\t\t\tConsumePanic(ret.TraceClient, ret.Hostname, recover())\n\t\t\t}()\n\t\t\tw.Work()\n\t\t}(ret.Workers[i])\n\t}\n\n\tret.EventWorker = NewEventWorker(ret.TraceClient, ret.Statsd)\n\n\tfor _, addrStr := range conf.StatsdListenAddresses {\n\t\taddr, err := protocol.ResolveAddr(addrStr.Value)\n\t\tif err != nil {\n\t\t\treturn ret, err\n\t\t}\n\t\tret.StatsdListenAddrs = append(ret.StatsdListenAddrs, addr)\n\t}\n\n\tfor _, addrStr := range conf.SsfListenAddresses {\n\t\taddr, err := protocol.ResolveAddr(addrStr.Value)\n\t\tif err != nil {\n\t\t\treturn ret, err\n\t\t}\n\t\tret.SSFListenAddrs = append(ret.SSFListenAddrs, addr)\n\t}\n\n\tfor _, addrStr := range conf.GrpcListenAddresses {\n\t\taddr, err := protocol.ResolveAddr(addrStr.Value)\n\t\tif err != nil {\n\t\t\treturn ret, err\n\t\t}\n\t\tret.GRPCListenAddrs = append(ret.GRPCListenAddrs, addr)\n\t}\n\n\tif conf.TLSKey.Value != \"\" {\n\t\tif conf.TLSCertificate == \"\" {\n\t\t\terr = errors.New(\"tls_key is set; must set tls_certificate\")\n\t\t\tlogger.WithError(err).Error(\"Improper TLS configuration\")\n\t\t\treturn ret, err\n\t\t}\n\n\t\t// load the TLS key and certificate\n\t\tvar cert tls.Certificate\n\t\tcert, err = tls.X509KeyPair([]byte(conf.TLSCertificate), []byte(conf.TLSKey.Value))\n\t\tif err != nil {\n\t\t\tlogger.WithError(err).Error(\"Improper TLS configuration\")\n\t\t\treturn ret, err\n\t\t}\n\n\t\tclientAuthMode := tls.NoClientCert\n\t\tvar clientCAs *x509.CertPool\n\t\tif conf.TLSAuthorityCertificate != \"\" {\n\t\t\t// load the authority; require clients to present certificated signed by this authority\n\t\t\tclientAuthMode = tls.RequireAndVerifyClientCert\n\t\t\tclientCAs = x509.NewCertPool()\n\t\t\tok := clientCAs.AppendCertsFromPEM([]byte(conf.TLSAuthorityCertificate))\n\t\t\tif !ok {\n\t\t\t\terr = errors.New(\"tls_authority_certificate: Could not load any certificates\")\n\t\t\t\tlogger.WithError(err).Error(\"Improper TLS configuration\")\n\t\t\t\treturn ret, err\n\t\t\t}\n\t\t}\n\n\t\tret.tlsConfig = &tls.Config{\n\t\t\tCertificates: []tls.Certificate{cert},\n\t\t\tClientAuth: clientAuthMode,\n\t\t\tClientCAs: clientCAs,\n\t\t}\n\t}\n\n\t// Configure tracing sinks if we are listening for ssf\n\tif len(conf.SsfListenAddresses) > 0 || len(conf.GrpcListenAddresses) > 0 {\n\t\ttrace.Enable()\n\n\t\t// Set up as many span workers as we need:\n\t\tif conf.NumSpanWorkers > 0 {\n\t\t\tret.SpanWorkerGoroutines = conf.NumSpanWorkers\n\t\t} else {\n\t\t\tret.SpanWorkerGoroutines = 1\n\t\t}\n\t}\n\n\tret.metricSinks, err =\n\t\tret.createMetricSinks(logger, &conf, config.MetricSinkTypes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tret.spanSinks, err =\n\t\tret.createSpanSinks(logger, &conf, config.SpanSinkTypes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Set up a span sink that extracts metrics from SSF spans and\n\t// reports them via the metric workers:\n\tprocessors := make([]ssfmetrics.Processor, len(ret.Workers))\n\tfor i, w := range ret.Workers {\n\t\tprocessors[i] = w\n\t}\n\tmetricSink, err := ssfmetrics.NewMetricExtractionSink(\n\t\tprocessors, conf.IndicatorSpanTimerName, conf.ObjectiveSpanTimerName,\n\t\tret.TraceClient, logger, &ret.parser)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\tret.spanSinks = append(ret.spanSinks, metricSink)\n\n\t// After all sinks are initialized, set the list of tags to exclude\n\tret.setSinkExcludedTags(conf.TagsExclude, ret.metricSinks, ret.spanSinks)\n\n\tif conf.HTTPQuit {\n\t\tlogger.WithField(\"endpoint\", httpQuitEndpoint).Info(\"Enabling graceful shutdown endpoint (via HTTP POST request)\")\n\t\tret.httpQuit = true\n\t}\n\n\tret.sources, err = ret.createSources(logger, &conf, config.SourceTypes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Setup the grpc server if it was configured\n\tif ret.grpcListenAddress != \"\" {\n\t\tret.grpcServer = proxy.New(ret.grpcListenAddress,\n\t\t\tconfig.Logger.WithField(\"source\", \"proxy\"),\n\t\t\tproxy.WithTraceClient(ret.TraceClient))\n\n\t\tret.sources = append(ret.sources, internalSource{\n\t\t\tsource: ret.grpcServer,\n\t\t\ttags: []string{},\n\t\t})\n\t}\n\n\t// If this is a global veneur then initialize the listening per protocol metrics\n\tif !ret.IsLocal() {\n\t\tret.listeningPerProtocolMetrics = &GlobalListeningPerProtocolMetrics{\n\t\t\tdogstatsdTcpReceivedTotal: 0,\n\t\t\tdogstatsdUdpReceivedTotal: 0,\n\t\t\tdogstatsdUnixReceivedTotal: 0,\n\t\t\tdogstatsdGrpcReceivedTotal: 0,\n\t\t\tssfUdpReceivedTotal: 0,\n\t\t\tssfUnixReceivedTotal: 0,\n\t\t\tssfGrpcReceivedTotal: 0,\n\t\t}\n\t\tlogger.Info(\"Tracking listening per protocol metrics on global instance\")\n\t}\n\n\tlogger.WithField(\"config\", conf).Debug(\"Initialized server\")\n\treturn ret, nil\n}", "func DefaultServiceConfigFromEnv() Server {\n\tconfigOnce.Do(func() {\n\t\tconfig = Server{\n\t\t\tDatabase: Database{\n\t\t\t\tHost: util.GetEnv(\"PGHOST\", \"postgres\"),\n\t\t\t\tPort: util.GetEnvAsInt(\"PGPORT\", 5432),\n\t\t\t\tDatabase: util.GetEnv(\"PGDATABASE\", \"development\"),\n\t\t\t\tUsername: util.GetEnv(\"PGUSER\", \"dbuser\"),\n\t\t\t\tPassword: util.GetEnv(\"PGPASSWORD\", \"\"),\n\t\t\t\tAdditionalParams: map[string]string{\n\t\t\t\t\t\"sslmode\": util.GetEnv(\"PGSSLMODE\", \"disable\"),\n\t\t\t\t},\n\t\t\t\tMaxOpenConns: util.GetEnvAsInt(\"DB_MAX_OPEN_CONNS\", runtime.NumCPU()*2),\n\t\t\t\tMaxIdleConns: util.GetEnvAsInt(\"DB_MAX_IDLE_CONNS\", 1),\n\t\t\t\tConnMaxLifetime: time.Second * time.Duration(util.GetEnvAsInt(\"DB_CONN_MAX_LIFETIME_SEC\", 60)),\n\t\t\t},\n\t\t\tEcho: EchoServer{\n\t\t\t\tDebug: util.GetEnvAsBool(\"SERVER_ECHO_DEBUG\", false),\n\t\t\t\tListenAddress: util.GetEnv(\"SERVER_ECHO_LISTEN_ADDRESS\", \":8080\"),\n\t\t\t\tHideInternalServerErrorDetails: util.GetEnvAsBool(\"SERVER_ECHO_HIDE_INTERNAL_SERVER_ERROR_DETAILS\", true),\n\t\t\t\tBaseURL: util.GetEnv(\"SERVER_ECHO_BASE_URL\", \"http://localhost:8080\"),\n\t\t\t\tEnableCORSMiddleware: util.GetEnvAsBool(\"SERVER_ECHO_ENABLE_CORS_MIDDLEWARE\", true),\n\t\t\t\tEnableLoggerMiddleware: util.GetEnvAsBool(\"SERVER_ECHO_ENABLE_LOGGER_MIDDLEWARE\", true),\n\t\t\t\tEnableRecoverMiddleware: util.GetEnvAsBool(\"SERVER_ECHO_ENABLE_RECOVER_MIDDLEWARE\", true),\n\t\t\t\tEnableRequestIDMiddleware: util.GetEnvAsBool(\"SERVER_ECHO_ENABLE_REQUEST_ID_MIDDLEWARE\", true),\n\t\t\t\tEnableTrailingSlashMiddleware: util.GetEnvAsBool(\"SERVER_ECHO_ENABLE_TRAILING_SLASH_MIDDLEWARE\", true),\n\t\t\t},\n\t\t\tPaths: PathsServer{\n\t\t\t\t// Please ALWAYS work with ABSOLUTE (ABS) paths from ENV_VARS (however you may resolve a project-relative to absolute for the default value)\n\t\t\t\tAPIBaseDirAbs: util.GetEnv(\"SERVER_PATHS_API_BASE_DIR_ABS\", filepath.Join(util.GetProjectRootDir(), \"/api\")), // /app/api (swagger.yml)\n\t\t\t\tMntBaseDirAbs: util.GetEnv(\"SERVER_PATHS_MNT_BASE_DIR_ABS\", filepath.Join(util.GetProjectRootDir(), \"/assets/mnt\")), // /app/assets/mnt (user-generated content)\n\t\t\t},\n\t\t\tAuth: AuthServer{\n\t\t\t\tAccessTokenValidity: time.Second * time.Duration(util.GetEnvAsInt(\"SERVER_AUTH_ACCESS_TOKEN_VALIDITY\", 86400)),\n\t\t\t\tPasswordResetTokenValidity: time.Second * time.Duration(util.GetEnvAsInt(\"SERVER_AUTH_PASSWORD_RESET_TOKEN_VALIDITY\", 900)),\n\t\t\t\tDefaultUserScopes: util.GetEnvAsStringArr(\"SERVER_AUTH_DEFAULT_USER_SCOPES\", []string{\"app\"}),\n\t\t\t\tLastAuthenticatedAtThreshold: time.Second * time.Duration(util.GetEnvAsInt(\"SERVER_AUTH_LAST_AUTHENTICATED_AT_THRESHOLD\", 900)),\n\t\t\t},\n\t\t\tManagement: ManagementServer{\n\t\t\t\tSecret: util.GetMgmtSecret(\"SERVER_MANAGEMENT_SECRET\"),\n\t\t\t},\n\t\t\tMailer: Mailer{\n\t\t\t\tDefaultSender: util.GetEnv(\"SERVER_MAILER_DEFAULT_SENDER\", \"[email protected]\"),\n\t\t\t\tSend: util.GetEnvAsBool(\"SERVER_MAILER_SEND\", true),\n\t\t\t\tWebTemplatesEmailBaseDirAbs: util.GetEnv(\"SERVER_MAILER_WEB_TEMPLATES_EMAIL_BASE_DIR_ABS\", filepath.Join(util.GetProjectRootDir(), \"/web/templates/email\")), // /app/web/templates/email\n\t\t\t\tTransporter: util.GetEnvEnum(\"SERVER_MAILER_TRANSPORTER\", MailerTransporterMock.String(), []string{MailerTransporterSMTP.String(), MailerTransporterMock.String()}),\n\t\t\t},\n\t\t\tSMTP: transport.SMTPMailTransportConfig{\n\t\t\t\tHost: util.GetEnv(\"SERVER_SMTP_HOST\", \"mailhog\"),\n\t\t\t\tPort: util.GetEnvAsInt(\"SERVER_SMTP_PORT\", 1025),\n\t\t\t\tUsername: util.GetEnv(\"SERVER_SMTP_USERNAME\", \"\"),\n\t\t\t\tPassword: util.GetEnv(\"SERVER_SMTP_PASSWORD\", \"\"),\n\t\t\t\tAuthType: transport.SMTPAuthTypeFromString(util.GetEnv(\"SERVER_SMTP_AUTH_TYPE\", transport.SMTPAuthTypeNone.String())),\n\t\t\t\tUseTLS: util.GetEnvAsBool(\"SERVER_SMTP_USE_TLS\", false),\n\t\t\t\tTLSConfig: nil,\n\t\t\t},\n\t\t\tFrontend: FrontendServer{\n\t\t\t\tBaseURL: util.GetEnv(\"SERVER_FRONTEND_BASE_URL\", \"http://localhost:3000\"),\n\t\t\t\tPasswordResetEndpoint: util.GetEnv(\"SERVER_FRONTEND_PASSWORD_RESET_ENDPOINT\", \"/set-new-password\"),\n\t\t\t},\n\t\t\tLogger: LoggerServer{\n\t\t\t\tLevel: util.LogLevelFromString(util.GetEnv(\"SERVER_LOGGER_LEVEL\", zerolog.DebugLevel.String())),\n\t\t\t\tRequestLevel: util.LogLevelFromString(util.GetEnv(\"SERVER_LOGGER_REQUEST_LEVEL\", zerolog.DebugLevel.String())),\n\t\t\t\tLogRequestBody: util.GetEnvAsBool(\"SERVER_LOGGER_LOG_REQUEST_BODY\", false),\n\t\t\t\tLogRequestHeader: util.GetEnvAsBool(\"SERVER_LOGGER_LOG_REQUEST_HEADER\", false),\n\t\t\t\tLogRequestQuery: util.GetEnvAsBool(\"SERVER_LOGGER_LOG_REQUEST_QUERY\", false),\n\t\t\t\tLogResponseBody: util.GetEnvAsBool(\"SERVER_LOGGER_LOG_RESPONSE_BODY\", false),\n\t\t\t\tLogResponseHeader: util.GetEnvAsBool(\"SERVER_LOGGER_LOG_RESPONSE_HEADER\", false),\n\t\t\t\tPrettyPrintConsole: util.GetEnvAsBool(\"SERVER_LOGGER_PRETTY_PRINT_CONSOLE\", false),\n\t\t\t},\n\t\t\tPush: PushService{\n\t\t\t\tUseFCMProvider: util.GetEnvAsBool(\"SERVER_PUSH_USE_FCM\", false),\n\t\t\t\tUseMockProvider: util.GetEnvAsBool(\"SERVER_PUSH_USE_MOCK\", true),\n\t\t\t},\n\t\t\tFCMConfig: provider.FCMConfig{\n\t\t\t\tGoogleApplicationCredentials: util.GetEnv(\"GOOGLE_APPLICATION_CREDENTIALS\", \"\"),\n\t\t\t\tProjectID: util.GetEnv(\"SERVER_FCM_PROJECT_ID\", \"no-fcm-project-id-set\"),\n\t\t\t\tValidateOnly: util.GetEnvAsBool(\"SERVER_FCM_VALIDATE_ONLY\", true),\n\t\t\t},\n\t\t}\n\n\t})\n\n\treturn config\n}", "func NewServer(configFilename string, config *Config) *Server {\n\tcasefoldedName, err := Casefold(config.Server.Name)\n\tif err != nil {\n\t\tlog.Println(fmt.Sprintf(\"Server name isn't valid: [%s]\", config.Server.Name), err.Error())\n\t\treturn nil\n\t}\n\n\t// startup check that we have HELP entries for every command\n\tfor name := range Commands {\n\t\t_, exists := Help[strings.ToLower(name)]\n\t\tif !exists {\n\t\t\tlog.Fatal(\"Help entry does not exist for \", name)\n\t\t}\n\t}\n\n\tif config.AuthenticationEnabled {\n\t\tSupportedCapabilities[SASL] = true\n\t}\n\n\tif config.Limits.LineLen.Tags > 512 || config.Limits.LineLen.Rest > 512 {\n\t\tSupportedCapabilities[MaxLine] = true\n\t\tCapValues[MaxLine] = fmt.Sprintf(\"%d,%d\", config.Limits.LineLen.Tags, config.Limits.LineLen.Rest)\n\t}\n\n\toperClasses, err := config.OperatorClasses()\n\tif err != nil {\n\t\tlog.Fatal(\"Error loading oper classes:\", err.Error())\n\t}\n\topers, err := config.Operators(operClasses)\n\tif err != nil {\n\t\tlog.Fatal(\"Error loading operators:\", err.Error())\n\t}\n\n\tconnectionLimits, err := NewConnectionLimits(config.Server.ConnectionLimits)\n\tif err != nil {\n\t\tlog.Fatal(\"Error loading connection limits:\", err.Error())\n\t}\n\tconnectionThrottle, err := NewConnectionThrottle(config.Server.ConnectionThrottle)\n\tif err != nil {\n\t\tlog.Fatal(\"Error loading connection throttler:\", err.Error())\n\t}\n\n\tserver := &Server{\n\t\taccounts: make(map[string]*ClientAccount),\n\t\tauthenticationEnabled: config.AuthenticationEnabled,\n\t\tchannels: make(ChannelNameMap),\n\t\tclients: NewClientLookupSet(),\n\t\tcommands: make(chan Command),\n\t\tconfigFilename: configFilename,\n\t\tconnectionLimits: connectionLimits,\n\t\tconnectionThrottle: connectionThrottle,\n\t\tctime: time.Now(),\n\t\tcurrentOpers: make(map[*Client]bool),\n\t\tidle: make(chan *Client),\n\t\tlimits: Limits{\n\t\t\tAwayLen: int(config.Limits.AwayLen),\n\t\t\tChannelLen: int(config.Limits.ChannelLen),\n\t\t\tKickLen: int(config.Limits.KickLen),\n\t\t\tMonitorEntries: int(config.Limits.MonitorEntries),\n\t\t\tNickLen: int(config.Limits.NickLen),\n\t\t\tTopicLen: int(config.Limits.TopicLen),\n\t\t\tChanListModes: int(config.Limits.ChanListModes),\n\t\t\tLineLen: LineLenLimits{\n\t\t\t\tTags: config.Limits.LineLen.Tags,\n\t\t\t\tRest: config.Limits.LineLen.Rest,\n\t\t\t},\n\t\t},\n\t\tlisteners: make(map[string]ListenerInterface),\n\t\tmonitoring: make(map[string][]Client),\n\t\tname: config.Server.Name,\n\t\tnameCasefolded: casefoldedName,\n\t\tnetworkName: config.Network.Name,\n\t\tnewConns: make(chan clientConn),\n\t\toperclasses: *operClasses,\n\t\toperators: opers,\n\t\tsignals: make(chan os.Signal, len(ServerExitSignals)),\n\t\trehashSignal: make(chan os.Signal, 1),\n\t\trestAPI: &config.Server.RestAPI,\n\t\twhoWas: NewWhoWasList(config.Limits.WhowasEntries),\n\t\tcheckIdent: config.Server.CheckIdent,\n\t}\n\n\t// open data store\n\tdb, err := buntdb.Open(config.Datastore.Path)\n\tif err != nil {\n\t\tlog.Fatal(fmt.Sprintf(\"Failed to open datastore: %s\", err.Error()))\n\t}\n\tserver.store = db\n\n\t// check db version\n\terr = server.store.View(func(tx *buntdb.Tx) error {\n\t\tversion, _ := tx.Get(keySchemaVersion)\n\t\tif version != latestDbSchema {\n\t\t\tlog.Println(fmt.Sprintf(\"Database must be updated. Expected schema v%s, got v%s.\", latestDbSchema, version))\n\t\t\treturn errDbOutOfDate\n\t\t}\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\t// close the db\n\t\tdb.Close()\n\t\treturn nil\n\t}\n\n\t// load *lines\n\tserver.loadDLines()\n\tserver.loadKLines()\n\n\t// load password manager\n\terr = server.store.View(func(tx *buntdb.Tx) error {\n\t\tsaltString, err := tx.Get(keySalt)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Could not retrieve salt string: %s\", err.Error())\n\t\t}\n\n\t\tsalt, err := base64.StdEncoding.DecodeString(saltString)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tpwm := NewPasswordManager(salt)\n\t\tserver.passwords = &pwm\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\tlog.Fatal(fmt.Sprintf(\"Could not load salt: %s\", err.Error()))\n\t}\n\n\tif config.Server.MOTD != \"\" {\n\t\tfile, err := os.Open(config.Server.MOTD)\n\t\tif err == nil {\n\t\t\tdefer file.Close()\n\n\t\t\treader := bufio.NewReader(file)\n\t\t\tfor {\n\t\t\t\tline, err := reader.ReadString('\\n')\n\t\t\t\tif err != nil {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tline = strings.TrimRight(line, \"\\r\\n\")\n\t\t\t\t// \"- \" is the required prefix for MOTD, we just add it here to make\n\t\t\t\t// bursting it out to clients easier\n\t\t\t\tline = fmt.Sprintf(\"- %s\", line)\n\n\t\t\t\tserver.motdLines = append(server.motdLines, line)\n\t\t\t}\n\t\t}\n\t}\n\n\tif config.Server.Password != \"\" {\n\t\tserver.password = config.Server.PasswordBytes()\n\t}\n\n\tfor _, addr := range config.Server.Listen {\n\t\tserver.createListener(addr, config.TLSListeners())\n\t}\n\n\tif config.Server.Wslisten != \"\" {\n\t\tserver.wslisten(config.Server.Wslisten, config.Server.TLSListeners)\n\t}\n\n\t// registration\n\taccountReg := NewAccountRegistration(config.Registration.Accounts)\n\tserver.accountRegistration = &accountReg\n\n\t// Attempt to clean up when receiving these signals.\n\tsignal.Notify(server.signals, ServerExitSignals...)\n\tsignal.Notify(server.rehashSignal, syscall.SIGHUP)\n\n\tserver.setISupport()\n\n\t// start API if enabled\n\tif server.restAPI.Enabled {\n\t\tLog.info.Printf(\"%s rest API started on %s .\", server.name, server.restAPI.Listen)\n\t\tserver.startRestAPI()\n\t}\n\n\treturn server\n}", "func NewServer(c *Config) (*Server, error) {\n\t// validate config\n\tif err := validation.Validate.Struct(c); err != nil {\n\t\treturn nil, fmt.Errorf(\"invalid config: %v\", err)\n\t}\n\n\t// create root context\n\tctx, cancel := context.WithCancel(context.Background())\n\n\t// register handlers\n\tmux := runtime.NewServeMux()\n\topts := []grpc.DialOption{grpc.WithInsecure()}\n\terr := proto.RegisterTodosHandlerFromEndpoint(ctx, mux, c.Endpoint, opts)\n\tif err != nil {\n\t\tdefer cancel()\n\t\treturn nil, fmt.Errorf(\"unable to register gateway handler: %v\", err)\n\t}\n\n\ts := Server{\n\t\tcancel: cancel,\n\t\tlog: c.Log,\n\t\tmux: mux,\n\t\tport: c.Port,\n\t}\n\treturn &s, nil\n}", "func newConfig(envParams envParams) error {\n\t// Initialize server config.\n\tsrvCfg := newServerConfigV14()\n\n\t// If env is set for a fresh start, save them to config file.\n\tif globalIsEnvCreds {\n\t\tsrvCfg.SetCredential(envParams.creds)\n\t}\n\n\tif globalIsEnvBrowser {\n\t\tsrvCfg.SetBrowser(envParams.browser)\n\t}\n\n\t// Create config path.\n\tif err := createConfigDir(); err != nil {\n\t\treturn err\n\t}\n\n\t// hold the mutex lock before a new config is assigned.\n\t// Save the new config globally.\n\t// unlock the mutex.\n\tserverConfigMu.Lock()\n\tserverConfig = srvCfg\n\tserverConfigMu.Unlock()\n\n\t// Save config into file.\n\treturn serverConfig.Save()\n}", "func getHttpServerConfig(configVars map[string]string) *HttpServerConfig {\n\thost := configVars[\"HTTP_SERVER_HOST\"]\n\n\t// It is fine not to have the host, we can default it to `localhost`\n\tif host == \"\" {\n\t\thost = \"localhost\"\n\t}\n\n\tport := configVars[\"HTTP_SERVER_PORT\"]\n\n\t// Http server can't really work without a port where it needs to listens to for incoming requests\n\tif port == \"\" {\n\t\tpanicMissingEnvVariable(\"HTTP_SERVER_PORT\")\n\t}\n\n\treturn &HttpServerConfig{\n\t\tHost: host,\n\t\tPort: port,\n\t}\n}", "func New(ctx context.Context, opts ...Option) *Server {\n\tctx, span := trace.StartSpan(ctx, \"server_init\")\n\tdefer span.End()\n\n\tlog := common.Logger(ctx)\n\tengine := gin.New()\n\ts := &Server{\n\t\tRouter: engine,\n\t\tAdminRouter: engine,\n\t\tsvcConfigs: map[string]*http.Server{\n\t\t\tWebServer: &http.Server{\n\t\t\t\tMaxHeaderBytes: getEnvInt(EnvMaxHeaderSize, http.DefaultMaxHeaderBytes),\n\t\t\t\tReadHeaderTimeout: getEnvDuration(EnvReadHeaderTimeout, 0),\n\t\t\t\tReadTimeout: getEnvDuration(EnvReadTimeout, 0),\n\t\t\t\tWriteTimeout: getEnvDuration(EnvWriteTimeout, 0),\n\t\t\t\tIdleTimeout: getEnvDuration(EnvHTTPIdleTimeout, 0),\n\t\t\t},\n\t\t\tAdminServer: &http.Server{},\n\t\t\tGRPCServer: &http.Server{},\n\t\t},\n\t\t// MUST initialize these before opts\n\t\tappListeners: new(appListeners),\n\t\tfnListeners: new(fnListeners),\n\t\ttriggerListeners: new(triggerListeners),\n\n\t\t// Almost everything else is configured through opts (see NewFromEnv for ex.) or below\n\t}\n\n\tfor _, opt := range opts {\n\t\tif opt == nil {\n\t\t\tcontinue\n\t\t}\n\t\terr := opt(ctx, s)\n\t\tif err != nil {\n\t\t\tlog.WithError(err).Fatal(\"Error during server opt initialization.\")\n\t\t}\n\t}\n\n\tif s.svcConfigs[WebServer].Addr == \"\" {\n\t\ts.svcConfigs[WebServer].Addr = fmt.Sprintf(\":%d\", DefaultPort)\n\t}\n\tif s.svcConfigs[AdminServer].Addr == \"\" {\n\t\ts.svcConfigs[AdminServer].Addr = fmt.Sprintf(\":%d\", DefaultPort)\n\t}\n\tif s.svcConfigs[GRPCServer].Addr == \"\" {\n\t\ts.svcConfigs[GRPCServer].Addr = fmt.Sprintf(\":%d\", DefaultGRPCPort)\n\t}\n\n\trequireConfigSet := func(id string, val interface{}) {\n\t\tif val == nil {\n\t\t\tlog.Fatalf(\"Invalid configuration for server type %s, %s must be configured during startup\", s.nodeType, id)\n\t\t}\n\t}\n\trequireConfigNotSet := func(id string, val interface{}) {\n\t\tif val != nil {\n\t\t\tlog.Fatalf(\"Invalid configuration for server type %s, %s must not be configured during startup\", s.nodeType, id)\n\t\t}\n\t}\n\n\t// Check that WithAgent options have been processed correctly.\n\t// Yuck the yuck - server should really be split into several interfaces (LB, Runner, API) and each should be instantiated separately\n\tswitch s.nodeType {\n\tcase ServerTypeAPI:\n\t\trequireConfigNotSet(\"agent\", s.agent)\n\t\trequireConfigSet(\"datastore\", s.datastore)\n\t\trequireConfigSet(\"triggerAnnotator\", s.triggerAnnotator)\n\tcase ServerTypeFull:\n\t\trequireConfigSet(\"agent\", s.agent)\n\t\trequireConfigSet(\"lbReadAccess\", s.lbReadAccess)\n\t\trequireConfigSet(\"datastore\", s.datastore)\n\t\trequireConfigSet(\"triggerAnnotator\", s.triggerAnnotator)\n\n\tcase ServerTypeLB:\n\t\trequireConfigSet(\"lbReadAccess\", s.lbReadAccess)\n\t\trequireConfigSet(\"agent\", s.agent)\n\n\tcase ServerTypePureRunner:\n\t\trequireConfigSet(\"agent\", s.agent)\n\n\tdefault:\n\n\t\tlog.Fatal(\"unknown server type %d\", s.nodeType)\n\n\t}\n\n\ts.Router.Use(loggerWrap, traceWrap) // TODO should be opts\n\toptionalCorsWrap(s.Router) // TODO should be an opt\n\tapiMetricsWrap(s)\n\t// panicWrap is last, specifically so that logging, tracing, cors, metrics, etc wrappers run\n\ts.Router.Use(panicWrap)\n\ts.AdminRouter.Use(panicWrap)\n\ts.bindHandlers(ctx)\n\n\treturn s\n}", "func New(content map[string]interface{}) *Config {\n\treturn &Config{\n\t\tm: content,\n\t}\n}", "func New(config *Config) *APIServer {\n\treturn &APIServer{\n\t\tconfig: config, // Initialization config file \"toml\" from config.go\n\t}\n}" ]
[ "0.66880894", "0.6239105", "0.61657727", "0.6142114", "0.60939157", "0.6093432", "0.5998546", "0.59107727", "0.5907868", "0.5894315", "0.5862838", "0.5836971", "0.582672", "0.57969505", "0.57863814", "0.5756004", "0.5753981", "0.5732995", "0.57201874", "0.56668013", "0.56526446", "0.56481785", "0.56071794", "0.5600552", "0.55853844", "0.55584526", "0.55541587", "0.55304855", "0.54974174", "0.54907775", "0.54771125", "0.5374064", "0.5359531", "0.5353499", "0.5341132", "0.53368133", "0.5329678", "0.52957237", "0.5288893", "0.52885056", "0.52807915", "0.52710813", "0.52665555", "0.5248204", "0.52419084", "0.5236456", "0.52298576", "0.5206057", "0.5204313", "0.5200579", "0.5198726", "0.5190673", "0.5187066", "0.51798326", "0.51657", "0.5157755", "0.5138608", "0.5133597", "0.5122968", "0.51126397", "0.5104695", "0.5082091", "0.50799346", "0.50699127", "0.50665945", "0.5064978", "0.5061785", "0.50529844", "0.5048735", "0.50486845", "0.50440216", "0.5040773", "0.5038025", "0.50349534", "0.5033164", "0.502377", "0.50046456", "0.49999183", "0.4998353", "0.49894464", "0.4985303", "0.49840277", "0.49837214", "0.4982612", "0.49686962", "0.49682015", "0.4966587", "0.49642217", "0.49632624", "0.49545893", "0.49438235", "0.49424362", "0.4940182", "0.49310264", "0.4930159", "0.49295315", "0.49251646", "0.49135533", "0.49074045", "0.49037206" ]
0.7297781
0
NewClient returns Singularity HTTP endpoint.
func NewClient(c serverConfig) *Client { r := resty.New(). SetRESTMode(). SetRetryCount(c.Retry). SetRetryWaitTime(c.RetryWaitTime). SetHostURL(c.URL). AddRetryCondition( // Condition function will be provided with *resty.Response as a // parameter. It is expected to return (bool, error) pair. Resty will retry // in case condition returns true or non nil error. func(r *resty.Response) (bool, error) { return r.StatusCode() == http.StatusNotFound, nil }, ) return &Client{ Rest: r, } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func New(httpClient *http.Client, config Config) (*Client, error) {\n\tc := NewClient(httpClient)\n\tc.Config = config\n\n\tbaseURL, err := url.Parse(\"https://\" + config.Host)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tc.BaseURL = baseURL\n\treturn c, nil\n}", "func New(endpoint string) *Client {\n\treturn &Client{endpoint, &http.Client{}, \"\"}\n}", "func NewClient(url string) *Client {\n\n\thttpClient := http.DefaultClient\n\tbaseURL := fmt.Sprintf(\"%s%s/\", url, APIVersion)\n\treturn &Client{\n\t\tsling: sling.New().Client(httpClient).Base(baseURL),\n\t}\n}", "func New(endpoint *url.URL, client *http.Client) *Client {\n\tif client == nil {\n\t\tclient = httpClient\n\t}\n\n\tif len(endpoint.Path) > 0 && !strings.HasSuffix(endpoint.Path, \"/\") {\n\t\tendpoint.Path = endpoint.Path + \"/\"\n\t}\n\n\treturn &Client{client, endpoint, make(http.Header), endpoint.Query()}\n}", "func New(url string) *Client {\n\treturn &Client{&http.Client{}, url, func(r *http.Request) *http.Request { return r }}\n}", "func NewClient() *Client {\n baseURL, _ := url.Parse(defaultBaseURL)\n return &Client{client: http.DefaultClient, BaseURL: baseURL, UserAgent: userAgent}\n}", "func NewHTTPClient(instance string, otTracer stdopentracing.Tracer, zipkinTracer *stdzipkin.Tracer, logger log.Logger) (service.AddsvcService, error) { // Quickly sanitize the instance string.\n\tif !strings.HasPrefix(instance, \"http\") {\n\t\tinstance = \"http://\" + instance\n\t}\n\tu, err := url.Parse(instance)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// We construct a single ratelimiter middleware, to limit the total outgoing\n\t// QPS from this client to all methods on the remote instance. We also\n\t// construct per-endpoint circuitbreaker middlewares to demonstrate how\n\t// that's done, although they could easily be combined into a single breaker\n\t// for the entire remote instance, too.\n\tlimiter := ratelimit.NewErroringLimiter(rate.NewLimiter(rate.Every(time.Second), 100))\n\n\t// Zipkin HTTP Client Trace can either be instantiated per endpoint with a\n\t// provided operation name or a global tracing client can be instantiated\n\t// without an operation name and fed to each Go kit endpoint as ClientOption.\n\t// In the latter case, the operation name will be the endpoint's http method.\n\tzipkinClient := zipkin.HTTPClientTrace(zipkinTracer)\n\n\t// global client middlewares\n\toptions := []httptransport.ClientOption{\n\t\tzipkinClient,\n\t}\n\n\te := endpoints.Endpoints{}\n\n\t// Each individual endpoint is an http/transport.Client (which implements\n\t// endpoint.Endpoint) that gets wrapped with various middlewares. If you\n\t// made your own client library, you'd do this work there, so your server\n\t// could rely on a consistent set of client behavior.\n\t// The Sum endpoint is the same thing, with slightly different\n\t// middlewares to demonstrate how to specialize per-endpoint.\n\tvar sumEndpoint endpoint.Endpoint\n\t{\n\t\tsumEndpoint = httptransport.NewClient(\n\t\t\t\"POST\",\n\t\t\tcopyURL(u, \"/sum\"),\n\t\t\tencodeHTTPSumRequest,\n\t\t\tdecodeHTTPSumResponse,\n\t\t\tappend(options, httptransport.ClientBefore(opentracing.ContextToHTTP(otTracer, logger)))...,\n\t\t).Endpoint()\n\t\tsumEndpoint = opentracing.TraceClient(otTracer, \"Sum\")(sumEndpoint)\n\t\tsumEndpoint = zipkin.TraceEndpoint(zipkinTracer, \"Sum\")(sumEndpoint)\n\t\tsumEndpoint = limiter(sumEndpoint)\n\t\tsumEndpoint = circuitbreaker.Gobreaker(gobreaker.NewCircuitBreaker(gobreaker.Settings{\n\t\t\tName: \"Sum\",\n\t\t\tTimeout: 30 * time.Second,\n\t\t}))(sumEndpoint)\n\t\te.SumEndpoint = sumEndpoint\n\t}\n\n\t// The Concat endpoint is the same thing, with slightly different\n\t// middlewares to demonstrate how to specialize per-endpoint.\n\tvar concatEndpoint endpoint.Endpoint\n\t{\n\t\tconcatEndpoint = httptransport.NewClient(\n\t\t\t\"POST\",\n\t\t\tcopyURL(u, \"/concat\"),\n\t\t\tencodeHTTPConcatRequest,\n\t\t\tdecodeHTTPConcatResponse,\n\t\t\tappend(options, httptransport.ClientBefore(opentracing.ContextToHTTP(otTracer, logger)))...,\n\t\t).Endpoint()\n\t\tconcatEndpoint = opentracing.TraceClient(otTracer, \"Concat\")(concatEndpoint)\n\t\tconcatEndpoint = zipkin.TraceEndpoint(zipkinTracer, \"Concat\")(concatEndpoint)\n\t\tconcatEndpoint = limiter(concatEndpoint)\n\t\tconcatEndpoint = circuitbreaker.Gobreaker(gobreaker.NewCircuitBreaker(gobreaker.Settings{\n\t\t\tName: \"Concat\",\n\t\t\tTimeout: 30 * time.Second,\n\t\t}))(concatEndpoint)\n\t\te.ConcatEndpoint = concatEndpoint\n\t}\n\n\t// Returning the endpoint.Set as a service.Service relies on the\n\t// endpoint.Set implementing the Service methods. That's just a simple bit\n\t// of glue code.\n\treturn e, nil\n}", "func NewClient(with ...ClientOption) *Client {\n\ttimeout := DefaultTimeout\n\n\tclient := &Client{\n\t\tclient: &http.Client{\n\t\t\tTimeout: timeout,\n\t\t},\n\t\tbase: getBaseURL(url.URL{\n\t\t\tScheme: \"https\",\n\t\t\tHost: \"api.secrethub.io\",\n\t\t}),\n\t\tuserAgent: DefaultUserAgent,\n\t}\n\tclient.Options(with...)\n\treturn client\n}", "func New(url string) *Client {\n\treturn &Client{url: url, httpC: http.DefaultClient}\n}", "func New() *Client {\n\treturn &Client{\n\t\tclient: &http.Client{},\n\t\tendpoint: *defaultEndpoint,\n\t}\n}", "func New(url string) Client {\n\treturn &client{\n\t\tbaseURL: url,\n\t}\n}", "func New(addr string) (*Client, error) {\n\treturn &Client{\n\t\taddr: addr,\n\t\thttpClient: &http.Client{},\n\t}, nil\n}", "func New(addr string) (*Client, error) {\n\treturn &Client{\n\t\taddr: addr,\n\t\thttpClient: &http.Client{},\n\t}, nil\n}", "func New(url string) *Client {\n\treturn NewWithHTTP(url, http.DefaultClient)\n}", "func NewClient(httpClient *http.Client, URL string, Token string, Source string, SourceType string, Index string) (*Client) {\n\t// Create a new client\n\tif httpClient == nil {\n\t\ttr := &http.Transport{TLSClientConfig: &tls.Config{InsecureSkipVerify: true}} // turn off certificate checking\n\t\thttpClient = &http.Client{Timeout: time.Second * 20, Transport: tr}\n\t}\n\n\tc := &Client{HTTPClient: httpClient, URL: URL, Token: Token, Source: Source, SourceType: SourceType, Index: Index}\n\n\treturn c\n}", "func New(url string) *Client {\n\treturn &Client{\n\t\tclient: http2.NewClient(nil),\n\t\turl: url,\n\t}\n}", "func New() *Client {\n\treturn &Client{client: &http.Client{}}\n}", "func New(url string) Client {\n\treturn &clientImpl{\n\t\tRPCClient: jsonrpc.NewRPCClient(url),\n\t\tLogger: slf4go.Get(\"eth-rpc-client\"),\n\t}\n}", "func NewClient(httpClient *http.Client, username string, password string) *Client {\n\tbase := sling.New().Client(httpClient).Base(msfUrl)\n\tbase.SetBasicAuth(username, password)\n\treturn &Client{\n\t\tsling: base,\n\t\tNBA: newNBAService(base.New()),\n\t}\n}", "func New(endpoint string) *Client {\n\treturn &Client{\n\t\tendpoint: endpoint,\n\t}\n}", "func New(url string, httpClient *http.Client, customHeaders http.Header) *Client {\n\tif httpClient == nil {\n\t\thttpClient = &http.Client{\n\t\t\tTimeout: defaultHTTPTimeout,\n\t\t}\n\t}\n\n\treturn &Client{\n\t\turl: url,\n\t\thttpClient: httpClient,\n\t\tcustomHeaders: customHeaders,\n\t}\n}", "func New(endpoint string, client *http.Client) (*Client, error) {\n\tif _, err := url.Parse(endpoint); err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to parse endpoint: %w\", err)\n\t}\n\ticlient := pb.NewJotFSProtobufClient(endpoint, client)\n\treturn &Client{iclient}, nil\n}", "func NewClient(c Configuration) (Client, error) {\n\tcli := Client{\n\t\tName: \"splunk-http-collector-client\",\n\t}\n\tif err := cli.Configure(c.Collector.Proto, c.Collector.Host, c.Collector.Port); err != nil {\n\t\treturn cli, err\n\t}\n\tlog.Debugf(\"%s: proto=%s\", cli.Name, c.Collector.Proto)\n\tlog.Debugf(\"%s: host=%s\", cli.Name, c.Collector.Host)\n\tlog.Debugf(\"%s: port=%d\", cli.Name, c.Collector.Port)\n\tlog.Debugf(\"%s: token=%s\", cli.Name, c.Collector.Token)\n\tlog.Debugf(\"%s: timeout=%d\", cli.Name, c.Collector.Timeout)\n\tlog.Debugf(\"%s: endpoint.health=%s\", cli.Name, cli.Endpoints.Health)\n\tlog.Debugf(\"%s: endpoint.event=%s\", cli.Name, cli.Endpoints.Event)\n\tlog.Debugf(\"%s: endpoint.raw=%s\", cli.Name, cli.Endpoints.Raw)\n\tt := &http.Transport{\n\t\tTLSClientConfig: &tls.Config{\n\t\t\tInsecureSkipVerify: true,\n\t\t},\n\t}\n\tcli.client = &http.Client{\n\t\tTimeout: time.Duration(c.Collector.Timeout) * time.Second,\n\t\tTransport: t,\n\t}\n\tcli.Token = c.Collector.Token\n\tif err := cli.HealthCheck(); err != nil {\n\t\treturn cli, err\n\t}\n\treturn cli, nil\n}", "func NewClient(serverAddress string, cli *http.Client) *Client {\n\tif cli == nil {\n\t\tcli = http.DefaultClient\n\t}\n\treturn &Client{\"http://\" + serverAddress + \"/api/v1/\", cli}\n}", "func New(server *url.URL) *genclient.Fulcio {\n\trt := httptransport.New(server.Host, genclient.DefaultBasePath, []string{server.Scheme})\n\trt.Consumers[\"application/pem-certificate-chain\"] = runtime.TextConsumer()\n\treturn genclient.New(rt, strfmt.Default)\n}", "func New(uri, repository string) *Client {\n\treturn &Client{uri, repository}\n}", "func NewClient(httpClient *http.Client) (*Client, error) {\n\tc := &Client{\n\t\thttpClient: httpClient,\n\t}\n\tu, err := url.Parse(APIEndpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tc.endpoint = u\n\treturn c, nil\n}", "func NewClient() *Client {\n\t// Init new http.Client.\n\thttpClient := http.DefaultClient\n\n\t// Parse BE URL.\n\tbaseURL, _ := url.Parse(backendURL)\n\n\tc := &Client{\n\t\tclient: httpClient,\n\t\tBackendURL: baseURL,\n\t\tUserAgent: userAgent,\n\t}\n\n\tc.Pwned = &PwnedService{client: c}\n\tc.Cache = &CacheService{client: c}\n\treturn c\n}", "func NewClient(t string) *gophercloud.ServiceClient {\n\tvar err error\n\tao, region, err := authMethod()\n\tif err != nil {\n\t\tfmt.Printf(\"Error retrieving authentication credentials: %s\\n\", err)\n\t}\n\tif ao.IdentityEndpoint == \"\" {\n\t\tao.IdentityEndpoint = rackspace.RackspaceUSIdentity\n\t}\n\tpc, err := rackspace.AuthenticatedClient(ao)\n\tif err != nil {\n\t\tfmt.Printf(\"Error creating ProviderClient: %s\\n\", err)\n\t\tos.Exit(1)\n\t}\n\tvar sc *gophercloud.ServiceClient\n\tswitch t {\n\tcase \"compute\":\n\t\tsc, err = rackspace.NewComputeV2(pc, gophercloud.EndpointOpts{\n\t\t\tRegion: region,\n\t\t})\n\t\tbreak\n\tcase \"blockstorage\":\n\t\tsc, err = rackspace.NewBlockStorageV1(pc, gophercloud.EndpointOpts{\n\t\t\tRegion: region,\n\t\t})\n\t\tbreak\n\tcase \"networking\":\n\t\tsc, err = rackspace.NewNetworkV2(pc, gophercloud.EndpointOpts{\n\t\t\tRegion: region,\n\t\t})\n\t\tbreak\n\t}\n\tif err != nil {\n\t\tfmt.Printf(\"Error creating ServiceClient (%s): %s\\n\", err, t)\n\t\tos.Exit(1)\n\t}\n\t// sc.UserAgent.Prepend(\"rack/\" + util.Version)\n\treturn sc\n}", "func New(baseURL *url.URL) *Client {\n\treturn &Client{\n\t\tu: baseURL,\n\t}\n}", "func NewClient(endpoint string, headers map[string]string) *Client {\n\treturn &Client{\n\t\tEndpoint: endpoint,\n\t\tHeaders: headers,\n\t\tclient: &http.Client{},\n\t}\n}", "func NewClient(base string) *Client {\n\treturn &Client{\n\t\tLogger: slf4go.Get(\"ethclient\"),\n\t\tclient: sling.New().Base(base),\n\t}\n}", "func NewClient(cfg *Config) (*Client, error) {\r\n\tBaseURL := new(url.URL)\r\n\tvar err error\r\n\r\n\tviper.SetEnvPrefix(\"TS\")\r\n\tviper.BindEnv(\"LOG\")\r\n\r\n\tswitch l := viper.Get(\"LOG\"); l {\r\n\tcase \"trace\":\r\n\t\tlog.SetLevel(log.TraceLevel)\r\n\tcase \"debug\":\r\n\t\tlog.SetLevel(log.DebugLevel)\r\n\tcase \"info\":\r\n\t\tlog.SetLevel(log.InfoLevel)\r\n\tcase \"warn\":\r\n\t\tlog.SetLevel(log.WarnLevel)\r\n\tcase \"fatal\":\r\n\t\tlog.SetLevel(log.FatalLevel)\r\n\tcase \"panic\":\r\n\t\tlog.SetLevel(log.PanicLevel)\r\n\t}\r\n\r\n\tif cfg.BaseURL != \"\" {\r\n\t\tBaseURL, err = url.Parse(cfg.BaseURL)\r\n\t\tif err != nil {\r\n\t\t\treturn nil, err\r\n\t\t}\r\n\t} else {\r\n\t\tBaseURL, err = url.Parse(defaultBaseURL)\r\n\t\tif err != nil {\r\n\t\t\treturn nil, err\r\n\t\t}\r\n\t}\r\n\r\n\tnewClient := &Client{\r\n\t\tBaseURL: BaseURL,\r\n\t\tclient: http.DefaultClient,\r\n\t\tcreds: &Credentials{\r\n\t\t\tAPIKey: cfg.APIKey,\r\n\t\t\tOrganizationID: cfg.OrganizationID,\r\n\t\t\tUserID: cfg.UserID,\r\n\t\t},\r\n\t}\r\n\r\n\tnewClient.Rulesets = &RulesetService{newClient}\r\n\tnewClient.Rules = &RuleService{newClient}\r\n\r\n\treturn newClient, nil\r\n}", "func New(addr string) (*Client, error) {\n\tc, err := rpc.DialHTTP(\"tcp\", addr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &Client{c: c}, nil\n}", "func NewClient(url string) *Client {\n\treturn &Client{&http.Client{}, url}\n}", "func NewClient(apiKey string) *Client {\n\treturn &Client{&http.Client{}, defaultURL, apiKey}\n}", "func New(context *contexter.Context) (*Client) {\n return &Client {\n urlBaseIndex: 0,\n\t\tcontext: context,\n }\n}", "func newClient(apiKey string) *Client {\n\tvar url *url.URL\n\turl, _ = url.Parse(\"https://vulners.com/api/v3\")\n\treturn &Client{baseURL: url, apiKey: apiKey}\n}", "func NewClient(config ClientConfig) (Client, error) {\n\t// raise error on client creation if the url is invalid\n\tneturl, err := url.Parse(config.URL)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\thttpClient := http.DefaultClient\n\n\tif config.TLSInsecureSkipVerify {\n\t\thttpClient.Transport = &http.Transport{TLSClientConfig: &tls.Config{InsecureSkipVerify: true}}\n\t}\n\n\tc := &client{\n\t\tclient: httpClient,\n\t\trawurl: neturl.String(),\n\t\tusername: config.Username,\n\t\tpassword: config.Password,\n\t}\n\n\t// create a single service object and reuse it for each API service\n\tc.service.client = c\n\tc.knowledge = (*knowledgeService)(&c.service)\n\n\treturn c, nil\n}", "func NewClient(connectionstring, pooluser string) (sc GenericClient) {\n\tif strings.HasPrefix(connectionstring, \"stratum+tcp://\") {\n\t\tsc = &StratumClient{\n\t\t\tconnectionstring: strings.TrimPrefix(connectionstring, \"stratum+tcp://\"),\n\t\t\tUser: pooluser,\n\t\t}\n\t} else {\n\t\ts := SiadClient{}\n\t\ts.siadurl = \"http://\" + connectionstring + \"/miner/header\"\n\t\tsc = &s\n\t}\n\treturn\n}", "func New(address, apiKey string, httpClient *http.Client) *Client {\n\tclient := Client{\n\t\tAddress: strings.TrimRight(address, \"/\"),\n\t\tAPIKey: apiKey,\n\t\thttpClient: httpClient,\n\t}\n\n\treturn &client\n}", "func NewClient(url string) *Client {\n\ttr := http.DefaultTransport\n\thttp := &http.Client{Transport: tr}\n\tclient := &Client{http: http, url: url}\n\treturn client\n}", "func NewClient(apiKey string) *Client {\n\tu, _ := url.ParseRequestURI(DefaultBaseURL)\n\treturn &Client{\n\t\tclient: &http.Client{},\n\t\tapiKey: apiKey,\n\t\tbaseURL: u,\n\t}\n}", "func NewClient(config *Config) *Client {\n\ttr := config.Transport()\n\n\treturn &Client{\n\t\tconfig: config.Clone(),\n\t\ttr: tr,\n\t\tclient: &http.Client{Transport: tr},\n\t}\n}", "func NewClient(baseARN string, regional bool) *Client {\n\treturn &Client{\n\t\tBaseARN: baseARN,\n\t\tEndpoint: \"sts.amazonaws.com\",\n\t\tUseRegionalEndpoint: regional,\n\t}\n}", "func New(uri string) (*Client, error) {\n\tu, e := url.Parse(uri)\n\tif e != nil {\n\t\treturn nil, fmt.Errorf(\"url.Parse: %w\", e)\n\t}\n\n\tc := &Client{\n\t\turi: u.String(),\n\t}\n\treturn c, nil\n}", "func NewClient(config HostConfig) *Client {\n\tc := &Client{\n\t\tconfig: config,\n\t}\n\tc.client = &http.Client{\n\t\tTransport: &http.Transport{\n\t\t\tTLSClientConfig: &tls.Config{\n\t\t\t\tInsecureSkipVerify: !config.Verify,\n\t\t\t},\n\t\t},\n\t}\n\n\tgrpcAddress := c.config.GRPC\n\tsecure := false\n\tif grpcAddress == `` {\n\t\tu, _ := url.Parse(c.config.API)\n\t\tgrpcAddress = u.Hostname()\n\t\tgrpcPort := u.Port()\n\t\tif u.Scheme == `http` {\n\t\t\tsecure = false\n\t\t\tif grpcPort == `` {\n\t\t\t\tgrpcPort = `80`\n\t\t\t}\n\t\t} else {\n\t\t\tsecure = true\n\t\t\tif grpcPort == `` {\n\t\t\t\tgrpcPort = `443`\n\t\t\t}\n\t\t}\n\n\t\tgrpcAddress = fmt.Sprintf(`%s:%s`, grpcAddress, grpcPort)\n\t}\n\n\tvar conn *grpc.ClientConn\n\tvar err error\n\tif secure {\n\t\tif conn, err = grpc.Dial(\n\t\t\tgrpcAddress,\n\t\t\tgrpc.WithTransportCredentials(credentials.NewTLS(&tls.Config{})),\n\t\t\tgrpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(100<<20)),\n\t\t); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t} else {\n\t\tif conn, err = grpc.Dial(\n\t\t\tgrpcAddress,\n\t\t\tgrpc.WithInsecure(),\n\t\t\tgrpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(100<<20)),\n\t\t); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\n\tc.cc = conn\n\n\tc.blog = protocols.NewTaoBlogClient(c.cc)\n\tc.management = protocols.NewManagementClient(c.cc)\n\n\treturn c\n}", "func New(addr string) *Client {\n\treturn &Client{\n\t\taddr: addr,\n\t\tHTTPClient: &http.Client{\n\t\t\tTimeout: 1 * time.Minute,\n\t\t},\n\t}\n}", "func newClient(certFile, keyFile string) (*http.Client, error) {\n\tcaCert, err := ioutil.ReadFile(\"/etc/insights-client/cert-api.access.redhat.com.pem\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcaCertPool, err := x509.SystemCertPool()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcaCertPool.AppendCertsFromPEM(caCert)\n\n\tcert, err := tls.LoadX509KeyPair(certFile, keyFile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttlsConfig := tls.Config{\n\t\tRootCAs: caCertPool,\n\t\tCertificates: []tls.Certificate{cert},\n\t\tMaxVersion: tls.VersionTLS12, // cloud.redhat.com appears to exhibit this openssl bug https://github.com/openssl/openssl/issues/9767\n\t}\n\n\ttlsConfig.BuildNameToCertificate()\n\ttransport := http.Transport{\n\t\tTLSClientConfig: &tlsConfig,\n\t}\n\tclient := http.Client{\n\t\tTransport: &transport,\n\t}\n\treturn &client, nil\n}", "func NewClient(config *sdk.Config, credential *auth.Credential) *Client {\n\tvar handler sdk.RequestHandler = func(c *sdk.Client, req request.Common) (request.Common, error) {\n\t\terr := req.SetProjectId(PickResourceID(req.GetProjectId()))\n\t\treturn req, err\n\t}\n\tvar (\n\t\tuaccountClient = *uaccount.NewClient(config, credential)\n\t\tuhostClient = *uhost.NewClient(config, credential)\n\t\tunetClient = *unet.NewClient(config, credential)\n\t\tvpcClient = *vpc.NewClient(config, credential)\n\t\tudpnClient = *udpn.NewClient(config, credential)\n\t\tpathxClient = *pathx.NewClient(config, credential)\n\t\tudiskClient = *udisk.NewClient(config, credential)\n\t\tulbClient = *ulb.NewClient(config, credential)\n\t\tudbClient = *udb.NewClient(config, credential)\n\t\tumemClient = *umem.NewClient(config, credential)\n\t\tuphostClient = *uphost.NewClient(config, credential)\n\t\tpuhostClient = *puhost.NewClient(config, credential)\n\t\tpudbClient = *pudb.NewClient(config, credential)\n\t\tpumemClient = *pumem.NewClient(config, credential)\n\t\tppathxClient = *ppathx.NewClient(config, credential)\n\t)\n\n\tuaccountClient.Client.AddRequestHandler(handler)\n\tuhostClient.Client.AddRequestHandler(handler)\n\tunetClient.Client.AddRequestHandler(handler)\n\tvpcClient.Client.AddRequestHandler(handler)\n\tudpnClient.Client.AddRequestHandler(handler)\n\tpathxClient.Client.AddRequestHandler(handler)\n\tudiskClient.Client.AddRequestHandler(handler)\n\tulbClient.Client.AddRequestHandler(handler)\n\tudbClient.Client.AddRequestHandler(handler)\n\tumemClient.Client.AddRequestHandler(handler)\n\tuphostClient.Client.AddRequestHandler(handler)\n\tpuhostClient.Client.AddRequestHandler(handler)\n\tpudbClient.Client.AddRequestHandler(handler)\n\tpumemClient.Client.AddRequestHandler(handler)\n\tppathxClient.Client.AddRequestHandler(handler)\n\n\treturn &Client{\n\t\tuaccountClient,\n\t\tuhostClient,\n\t\tunetClient,\n\t\tvpcClient,\n\t\tudpnClient,\n\t\tpathxClient,\n\t\tudiskClient,\n\t\tulbClient,\n\t\tudbClient,\n\t\tumemClient,\n\t\tuphostClient,\n\t\tpuhostClient,\n\t\tpudbClient,\n\t\tpumemClient,\n\t\tppathxClient,\n\t}\n}", "func New(endpoint string, client *http.Client, opts *Options) (*Client, error) {\n\turl, err := url.ParseRequestURI(endpoint)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to parse endpoint: %v\", err)\n\t}\n\n\tmode := CompressZstd\n\tcacheDir := os.TempDir()\n\tif opts != nil {\n\t\tmode = opts.Compression\n\t\tif opts.CacheDir != \"\" {\n\t\t\tcacheDir = opts.CacheDir\n\t\t}\n\t}\n\n\treturn &Client{\n\t\thost: *url,\n\t\thclient: client,\n\t\ticlient: pb.NewJotFSProtobufClient(endpoint, client),\n\t\tcacheDir: cacheDir,\n\t\tmode: mode,\n\t}, nil\n}", "func New() (client *Client) {\n\thttpClient := &http.Client{}\n\treturn &Client{\n\t\thttpClient: httpClient,\n\t\tTimeout: 0,\n\t\tDisableKeepAlives: true,\n\t\tIdleConnectionTimeout: 0,\n\t\ttransport: &http.Transport{},\n\t\tMaxRetriesOnError: 1,\n\t}\n}", "func NewClient() Client {\n\tclient := Client{\n\t\tBaseURL: \"https://openapi.etsy.com/v2\",\n\t\tClient: Authenticate(),\n\t}\n\treturn client\n}", "func NewClient(url string) *Client {\n\treturn &Client{\n\t\thttpClient: &http.Client{Timeout: time.Minute},\n\t\turl: url,\n\t\tminVersion: minVersion,\n\t}\n}", "func NewClient(baseURL string, apiKey string) Client {\n\treturn &httpClient{\n\t\tapiKey: apiKey,\n\t\tbaseURL: baseURL,\n\t\tinst: &http.Client{},\n\t}\n}", "func NewClient(c *Config) *Client {\n\treturn &Client{\n\t\tBaseURL: BaseURLV1,\n\t\tUname: c.Username,\n\t\tPword: c.Password,\n\t\tHTTPClient: &http.Client{\n\t\t\tTimeout: time.Minute,\n\t\t},\n\t}\n}", "func newClient(opts *ClientOpts) (*Client, error) {\n\tbaseClient, err := newAPIClient(opts)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tclient := &Client{\n\t\tAPIClient: *baseClient,\n\t}\n\n\t// init base operator\n\tclient.Node = newNodeClient(client)\n\tclient.Namespace = newNameSpaceClient(client)\n\tclient.ConfigMap = newConfigMapClient(client)\n\tclient.Service = newServiceClient(client)\n\tclient.Pod = newPodClient(client)\n\tclient.ReplicationController = newReplicationControllerClient(client)\n\tclient.StatefulSet = newStatefulSetClient(client)\n\tclient.DaemonSet = newDaemonSetClient(client)\n\tclient.Deployment = newDeploymentClient(client)\n\tclient.ReplicaSet = newReplicaSetClient(client)\n\n\treturn client, nil\n}", "func NewClient(registryURL string) *Client {\n\treturn &Client{\n\t\turl: registryURL + \"/sgulreg/services\",\n\t\thttpClient: http.DefaultClient,\n\t\treqMux: &sync.RWMutex{},\n\t\tregistered: false,\n\t}\n}", "func New(url string,\n\tcfg *config.Config,\n\tcredentialProvider *credentials.Credentials,\n\tstatsEngine stats.Engine,\n\tpublishMetricsInterval time.Duration,\n\trwTimeout time.Duration,\n\tdisableResourceMetrics bool) wsclient.ClientServer {\n\tcs := &clientServer{\n\t\tstatsEngine: statsEngine,\n\t\tpublishTicker: nil,\n\t\tpublishHealthTicker: nil,\n\t\tpublishMetricsInterval: publishMetricsInterval,\n\t}\n\tcs.URL = url\n\tcs.AgentConfig = cfg\n\tcs.CredentialProvider = credentialProvider\n\tcs.ServiceError = &tcsError{}\n\tcs.RequestHandlers = make(map[string]wsclient.RequestHandler)\n\tcs.MakeRequestHook = signRequestFunc(url, cs.AgentConfig.AWSRegion, credentialProvider)\n\tcs.TypeDecoder = NewTCSDecoder()\n\tcs.RWTimeout = rwTimeout\n\tcs.disableResourceMetrics = disableResourceMetrics\n\t// TODO make this context inherited from the handler\n\tcs.ctx, cs.cancel = context.WithCancel(context.TODO())\n\treturn cs\n}", "func NewClient(endpoint string, cli *http.Client) (*Client, error) {\n\tu, err := url.ParseRequestURI(endpoint)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"gaurun: failed to parse url - endpoint = %s: %w\", endpoint, err)\n\t}\n\n\tif cli == nil {\n\t\tcli = http.DefaultClient\n\t}\n\n\treturn &Client{\n\t\tEndpoint: u,\n\t\tHTTPClient: cli,\n\t}, nil\n}", "func NewClient(endpoint string) (*gophercloud.ProviderClient, error) {\n\tif endpoint == \"\" {\n\t\treturn os.NewClient(RackspaceUSIdentity)\n\t}\n\treturn os.NewClient(endpoint)\n}", "func NewClient(s ClientSettings) (*Client, error) {\n\tproxy := http.ProxyFromEnvironment\n\tif s.Proxy != nil {\n\t\tproxy = http.ProxyURL(s.Proxy)\n\t}\n\tlogger.Info(\"HTTP URL: %s\", s.URL)\n\tvar dialer, tlsDialer transport.Dialer\n\tvar err error\n\n\tdialer = transport.NetDialer(s.Timeout)\n\ttlsDialer = transport.TLSDialer(dialer, s.TLS, s.Timeout)\n\n\tif st := s.Observer; st != nil {\n\t\tdialer = transport.StatsDialer(dialer, st)\n\t\ttlsDialer = transport.StatsDialer(tlsDialer, st)\n\t}\n\tparams := s.Parameters\n\tvar encoder bodyEncoder\n\tcompression := s.CompressionLevel\n\tif compression == 0 {\n\t\tswitch s.Format {\n\t\tcase \"json\":\n\t\t\tencoder = newJSONEncoder(nil)\n\t\tcase \"json_lines\":\n\t\t\tencoder = newJSONLinesEncoder(nil)\n\t\t}\n\t} else {\n\t\tswitch s.Format {\n\t\tcase \"json\":\n\t\t\tencoder, err = newGzipEncoder(compression, nil)\n\t\tcase \"json_lines\":\n\t\t\tencoder, err = newGzipLinesEncoder(compression, nil)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tclient := &Client{\n\t\tConnection: Connection{\n\t\t\tURL: s.URL,\n\t\t\tUsername: s.Username,\n\t\t\tPassword: s.Password,\n\t\t\tContentType: s.ContentType,\n\t\t\thttp: &http.Client{\n\t\t\t\tTransport: &http.Transport{\n\t\t\t\t\tDial: dialer.Dial,\n\t\t\t\t\tDialTLS: tlsDialer.Dial,\n\t\t\t\t\tProxy: proxy,\n\t\t\t\t},\n\t\t\t\tTimeout: s.Timeout,\n\t\t\t},\n\t\t\tencoder: encoder,\n\t\t},\n\t\tparams: params,\n\t\tcompressionLevel: compression,\n\t\tproxyURL: s.Proxy,\n\t\tbatchPublish: s.BatchPublish,\n\t\theaders: s.Headers,\n\t\tformat: s.Format,\n\t}\n\n\treturn client, nil\n}", "func NewClient(api string, baseURI string, org string) *Client {\n\n\tif !strings.HasSuffix(baseURI, \"/\") {\n\t\tbaseURI += \"/\"\n\t}\n\n\treturn &Client{\n\t\tclient: &http.Client{},\n\t\tsentryAPIKey: api,\n\t\tsentryURI: baseURI,\n\t\tsentryOrg: org,\n\t}\n}", "func NewClient(apiKey string) *Client {\n\treturn &Client{\n\t\tC: http.Client{\n\t\t\tTimeout: 5 * time.Second,\n\t\t},\n\t\tService: \"https://saucenao.com\",\n\t\tAPIKey: apiKey,\n\t}\n}", "func NewClient(meta *metadata.Client, acc string) *http.Client {\n\treturn &http.Client{\n\t\tTransport: newRoundTripper(meta, acc),\n\t}\n}", "func NewClient(httpClient *http.Client, endpoint, apiVersion string, tlsConfig *tls.Config) (*Client, error) {\n\tif endpoint == \"\" {\n\t\treturn nil, errors.New(\"endpoint missing for httpClient configuration\")\n\t}\n\tif apiVersion == \"\" {\n\t\tapiVersion = DefaultAPIVersion\n\t}\n\n\tif tlsConfig != nil {\n\t\ttlsConfig.Renegotiation = tls.RenegotiateFreelyAsClient\n\t\ttlsConfig.BuildNameToCertificate()\n\t\thttpClient.Transport = &http.Transport{TLSClientConfig: tlsConfig}\n\t}\n\n\treturn &Client{\n\t\tendpoint: endpoint,\n\t\tapiVersion: apiVersion,\n\t\thttpClient: httpClient,\n\t}, nil\n}", "func New(token string) *Client {\n\tbaseURL, err := url.Parse(DefaultBaseURL)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn &Client{\n\t\tBaseURL: baseURL,\n\t\tClient: &http.Client{},\n\t\tToken: token,\n\t}\n}", "func NewClient(httpClient *http.Client, apikey string) *Service {\n\treturn &Service{\n\t\tsling: sling.New().Client(httpClient).Base(baseURL).Set(\"Authorization\", apikey),\n\t}\n}", "func NewClient() *Client {\n\tclient := &Client{\n\t\turl: baseURL,\n\t\thttpClient: &http.Client{\n\t\t\tTransport: &http.Transport{\n\t\t\t\tMaxConnsPerHost: maxConnsPerHost,\n\t\t\t},\n\t\t},\n\t}\n\n\treturn client\n}", "func NewHTTPClient(conn net.Conn, opt *codec.Option) (*Client, error) {\n\t_, _ = io.WriteString(conn, fmt.Sprintf(\"CONNECT %s HTTP/1.0\\n\\n\", defaultHandlePath))\n\n\tres, err := http.ReadResponse(bufio.NewReader(conn), &http.Request{Method: \"CONNECT\"})\n\tif err == nil && res.Status == \"200 Connected to Gingle RPC\" {\n\t\treturn NewRPCClient(conn, opt)\n\t}\n\n\tif err == nil {\n\t\terr = fmt.Errorf(\"client: failed to new http client, err: unexpected http response\")\n\t}\n\treturn nil, err\n}", "func NewClient(url string) *Client {\n\treturn &Client{URL: url, Default: true}\n}", "func New(opts Options) (*Client, error) {\n\tbaseUrlToParse, err := url.JoinPath(opts.BaseURL, \"/\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbaseURL, err := url.Parse(baseUrlToParse)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif isBaseURLSet(opts.BaseURL) {\n\t\tif !baseURL.IsAbs() {\n\t\t\treturn nil, fmt.Errorf(\"baseURL should be an absolute url\")\n\t\t}\n\n\t\tscheme := baseURL.Scheme\n\t\tif scheme != \"http\" && scheme != \"https\" {\n\t\t\treturn nil, fmt.Errorf(\"unsupported scheme: %s\", scheme)\n\t\t}\n\t}\n\n\tclient := &Client{\n\t\tBaseURL: baseURL,\n\t\tDefaultHeaders: Headers{},\n\n\t\tclient: http.DefaultClient,\n\t}\n\n\tif opts.Headers != nil {\n\t\tclient.DefaultHeaders = opts.Headers\n\t}\n\tif opts.HTTPClient != nil {\n\t\tclient.client = opts.HTTPClient\n\t}\n\tif opts.Host != \"\" {\n\t\tclient.Host = opts.Host\n\t}\n\n\treturn client, nil\n}", "func NewClient() (c *Client) {\n\tvar (\n\t\tcookie *cookiejar.Jar\n\t)\n\n\tcookie, _ = cookiejar.New(nil)\n\n\tc = &Client{\n\t\tClient: &http.Client{\n\t\t\tJar: cookie,\n\t\t},\n\t\tUserAgent: \"Sbss-Client\",\n\t}\n\n\treturn\n}", "func New(sess *session.Session, endpoint string) Client {\n\treturn NewWithHTTPClient(sess, http.DefaultClient, endpoint)\n}", "func NewClient(endpoint string) *Client {\n\treturn &Client{\n\t\tendpoint: endpoint,\n\t}\n}", "func NewClient(c *Config) (*Client, error) {\n\tdef := DefaultConfig()\n\tif def == nil {\n\t\treturn nil, fmt.Errorf(\"could not create/read default configuration\")\n\t}\n\tif def.Error != nil {\n\t\treturn nil, errwrap.Wrapf(\"error encountered setting up default configuration: {{err}}\", def.Error)\n\t}\n\n\tif c == nil {\n\t\tc = def\n\t}\n\n\tc.modifyLock.Lock()\n\tdefer c.modifyLock.Unlock()\n\n\tif c.MinRetryWait == 0 {\n\t\tc.MinRetryWait = def.MinRetryWait\n\t}\n\n\tif c.MaxRetryWait == 0 {\n\t\tc.MaxRetryWait = def.MaxRetryWait\n\t}\n\n\tif c.HttpClient == nil {\n\t\tc.HttpClient = def.HttpClient\n\t}\n\tif c.HttpClient.Transport == nil {\n\t\tc.HttpClient.Transport = def.HttpClient.Transport\n\t}\n\n\taddress := c.Address\n\tif c.AgentAddress != \"\" {\n\t\taddress = c.AgentAddress\n\t}\n\n\tu, err := c.ParseAddress(address)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tclient := &Client{\n\t\taddr: u,\n\t\tconfig: c,\n\t\theaders: make(http.Header),\n\t}\n\n\tif c.ReadYourWrites {\n\t\tclient.replicationStateStore = &replicationStateStore{}\n\t}\n\n\t// Add the VaultRequest SSRF protection header\n\tclient.headers[RequestHeaderName] = []string{\"true\"}\n\n\tif token := os.Getenv(EnvVaultToken); token != \"\" {\n\t\tclient.token = token\n\t}\n\n\tif namespace := os.Getenv(EnvVaultNamespace); namespace != \"\" {\n\t\tclient.setNamespace(namespace)\n\t}\n\n\treturn client, nil\n}", "func (c *Config) NewClient() *APIClient {\n\tclient := api.NewClient(c.AccessToken, c.AccessTokenSecret, c.Zone)\n\n\tif c.AcceptLanguage != \"\" {\n\t\tclient.AcceptLanguage = c.AcceptLanguage\n\t}\n\tif c.APIRootURL != \"\" {\n\t\tapi.SakuraCloudAPIRoot = c.APIRootURL\n\t}\n\tif c.RetryMax > 0 {\n\t\tclient.RetryMax = c.RetryMax\n\t}\n\tif c.RetryInterval > 0 {\n\t\tclient.RetryInterval = time.Duration(c.RetryInterval) * time.Second\n\t}\n\tif c.TimeoutMinute > 0 {\n\t\tclient.DefaultTimeoutDuration = time.Duration(c.TimeoutMinute) * time.Minute\n\t}\n\n\thttpClient := &http.Client{}\n\tif c.APIRequestTimeout > 0 {\n\t\thttpClient.Timeout = time.Duration(c.APIRequestTimeout) * time.Second\n\t}\n\tif c.APIRequestRateLimit > 0 {\n\t\thttpClient.Transport = &api.RateLimitRoundTripper{RateLimitPerSec: c.APIRequestRateLimit}\n\t}\n\tclient.HTTPClient = httpClient\n\n\tif c.TraceMode {\n\t\tclient.TraceMode = true\n\t\tlog.SetPrefix(\"[DEBUG] \")\n\t}\n\tclient.UserAgent = \"Terraform for SakuraCloud/v\" + Version\n\n\treturn &APIClient{\n\t\tClient: client,\n\t}\n}", "func NewClient(resource string) (*Client, os.Error) {\n\tvar client = new(Client)\n\tvar err os.Error\n\n\t// setup host\n\tif client.resource, err = http.ParseURL(resource); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Setup conn\n\tvar tcpConn net.Conn\n\tif tcpConn, err = net.Dial(\"tcp\", client.resource.Host); err != nil {\n\t\treturn nil, err\n\t}\n\tclient.conn = http.NewClientConn(tcpConn, nil)\n\n\treturn client, nil\n}", "func NewClient() *Client {\n\tclient := &Client{\n\t\tEndpoint: \"http://192.168.1.1/JNAP/\",\n\t}\n\treturn client\n}", "func NewClient(config *ClientConfig) *Client {\n\tvar client *Client\n\n\thttpClient := &fasthttp.Client{\n\t\tName: \"Gocursive\",\n\t\tMaxConnsPerHost: 10240,\n\t}\n\n\tif !strings.HasSuffix(config.url.Path, \"/\") {\n\t\tconfig.url.Path += \"/\"\n\t}\n\n\tconfig.outputDir, _ = filepath.Abs(config.outputDir)\n\n\tclient = &Client{\n\t\tconfig: config,\n\t\thttpClient: httpClient,\n\t\tdirectories: []string{},\n\t\tfiles: []*url.URL{},\n\t\tbytesTotal: 0,\n\t\tbytesRecv: 0,\n\t}\n\n\treturn client\n}", "func NewClient(config *Config) (client *Client, err error) {\n\t// bootstrap the config\n\tdefConfig := DefaultConfig()\n\n\tif len(config.ApiAddress) == 0 {\n\t\tconfig.ApiAddress = defConfig.ApiAddress\n\t}\n\n\tif len(config.Username) == 0 {\n\t\tconfig.Username = defConfig.Username\n\t}\n\n\tif len(config.Password) == 0 {\n\t\tconfig.Password = defConfig.Password\n\t}\n\n\tif len(config.Token) == 0 {\n\t\tconfig.Token = defConfig.Token\n\t}\n\n\tif len(config.UserAgent) == 0 {\n\t\tconfig.UserAgent = defConfig.UserAgent\n\t}\n\n\tif config.HttpClient == nil {\n\t\tconfig.HttpClient = defConfig.HttpClient\n\t}\n\n\tif config.HttpClient.Transport == nil {\n\t\tconfig.HttpClient.Transport = shallowDefaultTransport()\n\t}\n\n\tvar tp *http.Transport\n\n\tswitch t := config.HttpClient.Transport.(type) {\n\tcase *http.Transport:\n\t\ttp = t\n\tcase *oauth2.Transport:\n\t\tif bt, ok := t.Base.(*http.Transport); ok {\n\t\t\ttp = bt\n\t\t}\n\t}\n\n\tif tp != nil {\n\t\tif tp.TLSClientConfig == nil {\n\t\t\ttp.TLSClientConfig = &tls.Config{}\n\t\t}\n\t\ttp.TLSClientConfig.InsecureSkipVerify = config.SkipSslValidation\n\t}\n\n\tconfig.ApiAddress = strings.TrimRight(config.ApiAddress, \"/\")\n\n\tclient = &Client{\n\t\tConfig: *config,\n\t}\n\n\tif err := client.refreshEndpoint(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn client, nil\n}", "func NewClient(u string) *Client {\n\treturn &Client{URL: u}\n}", "func New(modemURI string) *Client {\n\treturn &Client{modemURI, http.DefaultClient}\n}", "func newClient(cfg upspin.Config, server, cache *upspin.Endpoint) (upspin.Config, upspin.Client) {\n\tcfg = setCertPool(cfg)\n\tcfg = config.SetStoreEndpoint(cfg, *server)\n\tcfg = config.SetDirEndpoint(cfg, *server)\n\tcfg = config.SetCacheEndpoint(cfg, *cache)\n\treturn cfg, client.New(cfg)\n}", "func NewClient(apiURL string, logger lager.Logger) Client {\n\treturn &client{\n\t\trequestGenerator: rata.NewRequestGenerator(apiURL, api.Routes),\n\t\tgivenHTTPClient: &http.Client{\n\t\t\tTransport: &http.Transport{\n\t\t\t\tDisableKeepAlives: false,\n\t\t\t\tResponseHeaderTimeout: 20 * time.Second,\n\t\t\t\tMaxIdleConns: 200,\n\t\t\t},\n\t\t},\n\t\tlogger: logger,\n\t}\n}", "func newBaseClient() *baseClient {\n\treturn &baseClient{\n\t\thttpClient: http.DefaultClient,\n\t\tmethod: \"GET\",\n\t\theader: make(http.Header),\n\t}\n}", "func NewClient(endpoint string) *Client {\n\tc := &Client{\n\t\tendpoint: endpoint,\n\t}\n\n\tif c.httpClient == nil {\n\t\tc.httpClient = http.DefaultClient\n\t}\n\treturn c\n}", "func New() Client {\n\treturn &client{}\n}", "func newClient() *sts.STS {\n\tsess := session.Must(session.NewSessionWithOptions(session.Options{\n\t\tSharedConfigState: session.SharedConfigEnable,\n\t}))\n\tconfig := aws.NewConfig()\n\tif debug {\n\t\tconfig.WithLogLevel(aws.LogDebugWithHTTPBody)\n\t}\n\treturn sts.New(sess, config)\n}", "func New() *Client {\n\treturn &Client{*http.DefaultClient, MaxSendAttempts}\n}", "func (h httpUtil) NewClientSimple() (*http.Client, error) {\n\treturn h.NewClient(\"\", false, nil)\n}", "func NewClient(endpointURL, soapActionBase string, cl *http.Client) Caller {\n\tif cl == nil {\n\t\tcl = http.DefaultClient\n\t}\n\tif cl.Transport == nil {\n\t\tcl.Transport = http.DefaultTransport\n\t}\n\tcl.Transport = soaptrip.New(cl.Transport)\n\treturn &soapClient{\n\t\tClient: cl,\n\t\tURL: endpointURL,\n\t\tSOAPActionBase: soapActionBase,\n\t\tbufpool: bp.New(1024),\n\t}\n}", "func GetNewClient(service string, httpFactory HttpClientFactory) (*OauthClient, error) {\n\tjar, err := cookiejar.New(nil)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Munge on the service a little bit, force it to have no trailing / and always start with https://\n\turl, err := url.Parse(service)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\turl.Scheme = \"https\"\n\turl.Path = \"\"\n\n\tclient := &OauthClient{}\n\tclient.Service = url.String()\n\tif httpFactory != nil {\n\t\tclient.Client = httpFactory()\n\t} else {\n\t\tclient.Client = &http.Client{}\n\t}\n\tclient.Client.Jar = jar\n\tclient.Headers = make(map[string]string)\n\tclient.SourceHeader = \"cloud-golang-sdk\"\n\treturn client, err\n}", "func New(logger log.Logger) *Client {\n\treturn &Client{\n\t\tlogger: logger,\n\n\t\tClient: http.Client{\n\t\t\tTransport: &nethttp.Transport{\n\t\t\t\tRoundTripper: &http.Transport{\n\t\t\t\t\tDialContext: (&net.Dialer{\n\t\t\t\t\t\tTimeout: 30 * time.Second,\n\t\t\t\t\t\tKeepAlive: 30 * time.Second,\n\t\t\t\t\t\tDualStack: true,\n\t\t\t\t\t}).DialContext,\n\t\t\t\t\tMaxIdleConns: 1,\n\t\t\t\t\tIdleConnTimeout: 10 * time.Millisecond,\n\t\t\t\t\tTLSHandshakeTimeout: 10 * time.Second,\n\t\t\t\t\tExpectContinueTimeout: 1 * time.Second,\n\t\t\t\t\tDisableKeepAlives: true,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}", "func New() *Client {\n return &Client{&API{}}\n}", "func NewClient(uri string, cli *http.Client) *Client {\n\treturn &Client{cli, strings.TrimSuffix(uri, \"/\")}\n}", "func NewClient(httpClient *http.Client, atlasSubdomain string) (*Client, error) {\n\tif httpClient == nil {\n\t\thttpClient = &http.Client{}\n\t}\n\n\tvar baseURLStr strings.Builder\n\tbaseURLStr.WriteString(\"https://\")\n\tbaseURLStr.WriteString(atlasSubdomain)\n\tbaseURLStr.WriteString(\".\")\n\tbaseURLStr.WriteString(defaultBaseURL)\n\n\tbaseURL, err := url.Parse(baseURLStr.String())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tc := &Client{client: httpClient, BaseURL: baseURL}\n\tc.common.client = c\n\tc.ApplicationRole = (*ApplicationRoleService)(&c.common)\n\tc.AuditRecords = (*AuditRecordsService)(&c.common)\n\tc.AvatarsService = (*AvatarsService)(&c.common)\n\treturn c, nil\n}", "func New() Client {\n\tc := http.DefaultClient\n\tc.Timeout = time.Second * 10\n\n\treturn &client{c: c}\n}", "func New(addr string, retryTimeout time.Duration, dialer rpc.DialerFn) *Client {\n\tsl := rpc.NewStreamLayer(nil, byte(api.ClientMessage), dialer)\n\treturn &Client{addr, sl, retryTimeout}\n}", "func New(opts ...Option) *Client {\n\tvar c Client\n\n\tfor _, opt := range opts {\n\t\topt(&c)\n\t}\n\n\tif c.client == nil {\n\t\tc.client = hystrix.NewClient()\n\t}\n\n\tht := httptransport.NewWithClient(\n\t\tc.url.Host,\n\t\tc.url.Path,\n\t\t[]string{c.url.Scheme},\n\t\t&http.Client{\n\t\t\tTransport: NewHystrixTransport(c.client),\n\t\t},\n\t)\n\n\tc.Client = *admin.New(ht, nil)\n\n\treturn &c\n}", "func New(subdomain string) *Client {\n\treturn &Client{\n\t\tScheme: \"https\",\n\t\tAPIScheme: \"https\",\n\t\tAPIHost: \"digto.org\",\n\t\tAPIHeaderHost: \"digto.org\",\n\t\tSubdomain: subdomain,\n\t\thttpClient: &http.Client{},\n\t\tLog: func(s ...interface{}) {},\n\t}\n}" ]
[ "0.7146831", "0.7028338", "0.701566", "0.69679964", "0.69676816", "0.6900288", "0.6884137", "0.68382514", "0.68368256", "0.68338436", "0.6792138", "0.6765314", "0.6765314", "0.67554337", "0.67366046", "0.67330617", "0.66954935", "0.6687802", "0.66775715", "0.66609675", "0.6654732", "0.6649712", "0.66400534", "0.66386694", "0.66258794", "0.6616877", "0.660816", "0.65964776", "0.6591136", "0.6570962", "0.65669274", "0.6551386", "0.6549308", "0.65447146", "0.65424585", "0.6542276", "0.6531506", "0.6522917", "0.6511447", "0.6507886", "0.64985615", "0.6496067", "0.6494125", "0.64932543", "0.64931035", "0.64786106", "0.64785075", "0.647278", "0.64682055", "0.64549524", "0.6452675", "0.6450131", "0.64402187", "0.6432521", "0.64323914", "0.6428235", "0.6427561", "0.64174473", "0.6417226", "0.6416747", "0.6416048", "0.6411225", "0.64112234", "0.6409109", "0.64084834", "0.64018023", "0.63989776", "0.6396736", "0.63954186", "0.6380339", "0.6379467", "0.63753617", "0.6373817", "0.6373443", "0.6369552", "0.63677436", "0.6367321", "0.6364113", "0.63617474", "0.6358665", "0.63522345", "0.63521796", "0.63501024", "0.6343645", "0.6343119", "0.6342644", "0.6342495", "0.6339196", "0.6330764", "0.63260174", "0.6324665", "0.63242346", "0.632377", "0.63218725", "0.6320585", "0.6314412", "0.63102126", "0.6307251", "0.6302534", "0.62926483", "0.62921834" ]
0.0
-1
/ Get ProtectedEntity for Persistent Volume referenced by this PVC ProtectedEntity. Candidates are, IVD ProtectedEntity(nonGuestCluster) and ParaVirt ProtectedEntity(GuestCluster).
func (this PVCProtectedEntity) getProtectedEntityForPV(ctx context.Context, pv *core_v1.PersistentVolume) (astrolabe.ProtectedEntity, error) { if pv.Spec.CSI != nil { if pv.Spec.CSI.Driver == VSphereCSIProvisioner { if pv.Spec.AccessModes[0] == core_v1.ReadWriteOnce { var pvIDstr string if this.ppetm.isGuest { pvIDstr = pv.Name // use pv name rather than pv volume handle as the ID of paravirt PE, since it is easier to retrieve pv volume handle from pv name } else { pvIDstr = pv.Spec.CSI.VolumeHandle } pvPEType := this.getComponentPEType() pvPEID := astrolabe.NewProtectedEntityIDWithSnapshotID(pvPEType, pvIDstr, this.id.GetSnapshotID()) pvPE, err := this.ppetm.pem.GetProtectedEntity(ctx, pvPEID) if err != nil { return nil, errors.Wrapf(err, "Could not get Protected Entity for PV %s", pvPEID.String()) } return pvPE, nil } else { return nil, errors.Errorf("Unexpected access mode, %v, for Persistent Volume %s", pv.Spec.AccessModes[0], pv.Name) } } } return nil, errors.Errorf("Could not find PE for Persistent Volume %s", pv.Name) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (s *StackEbrc) GetVolume(ref string) (*abstract.Volume, fail.Error) {\n\tlogrus.Debug(\"ebrc.Client.GetVolume() called\")\n\tdefer logrus.Debug(\"ebrc.Client.GetVolume() done\")\n\n\tvar volume abstract.Volume\n\n\t_, vdc, err := s.getOrgVdc()\n\tif err != nil {\n\t\treturn nil, fail.Wrap(err, fmt.Sprintf(\"Error listing volumes\"))\n\t}\n\n\t// FIXME: Add data\n\tdr, err := vdc.QueryDisk(ref)\n\tif err == nil {\n\t\tthed, err := vdc.FindDiskByHREF(dr.Disk.HREF)\n\t\tif err == nil {\n\t\t\tvolume = abstract.Volume{\n\t\t\t\tName: thed.Disk.Name,\n\t\t\t\tSize: thed.Disk.Size,\n\t\t\t\tID: thed.Disk.Id,\n\t\t\t}\n\t\t}\n\t}\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &volume, nil\n}", "func (client VolumesClient) Get(ctx context.Context, location string, storageSubSystem string, storagePool string, volume string) (result Volume, err error) {\n\treq, err := client.GetPreparer(ctx, location, storageSubSystem, storagePool, volume)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"fabric.VolumesClient\", \"Get\", nil, \"Failure preparing request\")\n\t\treturn\n\t}\n\n\tresp, err := client.GetSender(req)\n\tif err != nil {\n\t\tresult.Response = autorest.Response{Response: resp}\n\t\terr = autorest.NewErrorWithError(err, \"fabric.VolumesClient\", \"Get\", resp, \"Failure sending request\")\n\t\treturn\n\t}\n\n\tresult, err = client.GetResponder(resp)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"fabric.VolumesClient\", \"Get\", resp, \"Failure responding to request\")\n\t}\n\n\treturn\n}", "func (d *Data) GetVolume(v dvid.VersionID, vox *Labels, supervoxels bool, scale uint8, roiname dvid.InstanceName) ([]byte, error) {\n\tr, err := imageblk.GetROI(v, roiname, vox)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err := d.GetLabels(v, supervoxels, scale, vox, r); err != nil {\n\t\treturn nil, err\n\t}\n\treturn vox.Data(), nil\n}", "func (x SecureCredentialEntity) GetNerdStorage() NerdStorageEntityScope {\n\treturn x.NerdStorage\n}", "func (me *PROTECTEDSOURCEVOLUMEINFO_IMPL) GetProtectedSourceVolumeInfo(\n\tSourceInfo *models.GetSourceVolumeInfoParams) (*models.ProtectedSourceVolumeInfo, error) {\n\t//the endpoint path uri\n\t_pathUrl := \"/protectedSourceVolumeInfo/{sourceId}\"\n\n\t//variable to hold errors\n\tvar err error = nil\n\t//process optional template parameters\n\t_pathUrl, err = apihelper.AppendUrlWithTemplateParameters(_pathUrl, map[string]interface{}{\n\t\t\"sourceId\": SourceInfo.SourceId,\n\t})\n\tif err != nil {\n\t\t//error in template param handling\n\t\treturn nil, err\n\t}\n\n\t//the base uri for api requests\n\t_queryBuilder := configuration.GetBaseURI(configuration.ENUM_DEFAULT, me.config)\n\n\t//prepare query string for API call\n\t_queryBuilder = _queryBuilder + _pathUrl\n\n\t//validate and preprocess url\n\t_queryBuilder, err = apihelper.CleanUrl(_queryBuilder)\n\tif err != nil {\n\t\t//error in url validation or cleaning\n\t\treturn nil, err\n\t}\n\t//prepare headers for the outgoing request\n\theaders := map[string]interface{}{\n\t\t\"user-agent\": \"app-Go-sdk-1.1.1\",\n\t\t\"accept\": \"application/json\",\n\t\t\"Authorization\": fmt.Sprintf(\"Bearer %s\", me.config.OAuthAccessToken()),\n\t}\n\n\t//prepare API request\n\t_request := unirest.Get(_queryBuilder, headers)\n\t//and invoke the API call request to fetch the response\n\t_response, err := unirest.AsString(_request, me.config.SkipSSL())\n\tif err != nil {\n\t\t//error in API invocation\n\t\treturn nil, err\n\t}\n\n\t//error handling using HTTP status codes\n\tif _response.Code == 401 {\n\t\terr = apihelper.NewAPIError(\"Unauthorized\", _response.Code, _response.RawBody)\n\t} else if _response.Code == 404 {\n\t\terr = apihelper.NewAPIError(\"Snapshot does not exist.\", _response.Code, _response.RawBody)\n\t} else if _response.Code == 500 {\n\t\terr = apihelper.NewAPIError(\"Unexpected error\", _response.Code, _response.RawBody)\n\t} else if _response.Code == 502 {\n\t\terr = apihelper.NewAPIError(\"Bad Gateway.\", _response.Code, _response.RawBody)\n\t} else if _response.Code == 504 {\n\t\terr = apihelper.NewAPIError(\"Gateway Timeout.\", _response.Code, _response.RawBody)\n\t} else if (_response.Code < 200) || (_response.Code > 206) { //[200,206] = HTTP OK\n\t\terr = apihelper.NewAPIError(\"HTTP Response Not OK\", _response.Code, _response.RawBody)\n\t}\n\tif err != nil {\n\t\t//error detected in status code validation\n\t\treturn nil, err\n\t}\n\n\t//returning the response\n\tvar retVal *models.ProtectedSourceVolumeInfo = &models.ProtectedSourceVolumeInfo{}\n\terr = json.Unmarshal(_response.RawBody, &retVal)\n\n\tif err != nil {\n\t\t//error in parsing\n\t\treturn nil, err\n\t}\n\treturn retVal, nil\n\n}", "func getBoundPV(client kubernetes.Interface, pvc *v1.PersistentVolumeClaim) (*v1.PersistentVolume, error) {\n\t// Get new copy of the claim\n\tclaim, err := client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), pvc.Name, metav1.GetOptions{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Get the bound PV\n\tpv, err := client.CoreV1().PersistentVolumes().Get(context.TODO(), claim.Spec.VolumeName, metav1.GetOptions{})\n\treturn pv, err\n}", "func (a *Client) GetProtectedEntityInfo(params *GetProtectedEntityInfoParams, opts ...ClientOption) (*GetProtectedEntityInfoOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetProtectedEntityInfoParams()\n\t}\n\top := &runtime.ClientOperation{\n\t\tID: \"getProtectedEntityInfo\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/astrolabe/{service}/{protectedEntityID}\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &GetProtectedEntityInfoReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t}\n\tfor _, opt := range opts {\n\t\topt(op)\n\t}\n\n\tresult, err := a.transport.Submit(op)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*GetProtectedEntityInfoOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for getProtectedEntityInfo: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}", "func (p *glusterBlockProvisioner) Provision(options controller.VolumeOptions) (*v1.PersistentVolume, error) {\n var err error\n\n glog.V(4).Infof(\"glusterblock: Provison VolumeOptions %v\", options)\n\t//scName := storageutil.GetClaimStorageClass(r.options.PVC)\n\tcfg, err := parseClassParameters(options.Parameters, p.client)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tp.provConfig = *cfg\n\n\tglog.V(4).Infof(\"glusterfs: creating volume with configuration %+v\", p.provConfig)\n // TODO: \n\tif options.PVC.Spec.Selector != nil {\n\t\treturn nil, fmt.Errorf(\"claim Selector is not supported\")\n\t}\n\tserver, path, err := p.createVolume(options.PVName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tglog.V(1).Infof(\"Server and path returned :%v %v\", server, path)\n\n\tpv := &v1.PersistentVolume{\n\t\tObjectMeta: v1.ObjectMeta{\n\t\t\tName: options.PVName,\n\t\t\tAnnotations: map[string]string{\n\t\t\t\t\"glusterBlockProvisionerIdentity\": string(p.identity),\n\t\t\t},\n\t\t},\n\t\tSpec: v1.PersistentVolumeSpec{\n\t\t\tPersistentVolumeReclaimPolicy: options.PersistentVolumeReclaimPolicy,\n\t\t\tAccessModes: options.PVC.Spec.AccessModes,\n\t\t\tCapacity: v1.ResourceList{\n\t\t\t\tv1.ResourceName(v1.ResourceStorage): options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)],\n\t\t\t},\n\t\t\tPersistentVolumeSource: v1.PersistentVolumeSource{\n\t\t\t\tISCSI: &v1.ISCSIVolumeSource{\n\t\t\t\t\tTargetPortal: server,\n\t\t\t\t\tIQN: path,\n\t\t\t\t\tLun: 0,\n\t\t\t\t\tFSType: \"ext3\",\n\t\t\t\t\tReadOnly: false,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\treturn pv, nil\n}", "func (p *Poloniex) GetVolume(ctx context.Context) (interface{}, error) {\n\tvar resp interface{}\n\tpath := \"/public?command=return24hVolume\"\n\n\treturn resp, p.SendHTTPRequest(ctx, exchange.RestSpot, path, &resp)\n}", "func (p *glusterBlockProvisioner) Delete(volume *v1.PersistentVolume) error {\n\tann, ok := volume.Annotations[\"glusterBlockProvisionerIdentity\"]\n\tif !ok {\n\t\treturn errors.New(\"identity annotation not found on PV\")\n\t}\n\tif ann != string(p.identity) {\n\t\treturn &controller.IgnoredError{\"identity annotation on PV does not match ours\"}\n\t}\n\n\treturn nil\n}", "func (e *Department) EntStorage() ent.Storage { return ent.GetStorage(e) }", "func (srv *VolumeService) Get(ref string) (*api.Volume, error) {\n\tvolumes, err := srv.List()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor _, volume := range volumes {\n\t\tif volume.ID == ref || volume.Name == ref {\n\t\t\treturn &volume, nil\n\t\t}\n\t}\n\treturn nil, fmt.Errorf(\"Volume '%s' does not exists\", ref)\n}", "func (m *CommunicationsIdentitySet) GetGuest()(Identityable) {\n val, err := m.GetBackingStore().Get(\"guest\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(Identityable)\n }\n return nil\n}", "func (o *SparseCloudSnapshotAccount) GetProtected() (out bool) {\n\n\tif o.Protected == nil {\n\t\treturn\n\t}\n\n\treturn *o.Protected\n}", "func (r RepositoryImpl) GetVoteEntity(id int64) *VoteEntity {\n\tif v, ok := r.database[id]; ok {\n\t\treturn &v\n\t}\n\treturn nil\n}", "func (a *HyperflexApiService) GetHyperflexProtectedClusterByMoid(ctx context.Context, moid string) ApiGetHyperflexProtectedClusterByMoidRequest {\n\treturn ApiGetHyperflexProtectedClusterByMoidRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t\tmoid: moid,\n\t}\n}", "func (c *vmClient) Get(uid meta.UID) (*api.VM, error) {\n\tlog.Debugf(\"Client.Get; UID: %q, Kind: %s\", uid, api.KindVM)\n\tobject, err := c.storage.GetByID(api.KindVM, uid)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn object.(*api.VM), nil\n}", "func (o *VirtualizationVmwareVirtualMachineAllOf) GetProtectedVm() bool {\n\tif o == nil || o.ProtectedVm == nil {\n\t\tvar ret bool\n\t\treturn ret\n\t}\n\treturn *o.ProtectedVm\n}", "func (c *configuration) PersistentVolume(clientSet ClientSet) *PersistentVolume {\n\tif clientSet != nil {\n\t\treturn NewPersistentVolume(clientSet)\n\t}\n\treturn nil\n}", "func (s *persistentVolumeLister) Get(name string) (*corev1.PersistentVolume, error) {\n\treturn s.client.CoreV1().PersistentVolumes().Get(name, metav1.GetOptions{})\n}", "func (digitalocean DigitalOcean) GetVolume(id string) (*godo.Volume, error) {\n\tdoc, err := DigitalOceanClient()\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tvolume, _, err := doc.client.Storage.GetVolume(doc.context, id)\n\n\tif err != nil {\n\t\t//log.Fatal(err)\n\t}\n\n\treturn volume, err\n}", "func (p *cinderProvisioner) Delete(pv *v1.PersistentVolume) error {\n\tann, ok := pv.Annotations[provisionerIDAnn]\n\tif !ok {\n\t\treturn errors.New(\"identity annotation not found on PV\")\n\t}\n\tif ann != p.identity {\n\t\treturn &controller.IgnoredError{\n\t\t\tReason: \"identity annotation on PV does not match ours\",\n\t\t}\n\t}\n\t// TODO when beta is removed, have to check kube version and pick v1/beta\n\t// accordingly: maybe the controller lib should offer a function for that\n\n\tvolumeID, ok := pv.Annotations[cinderVolumeID]\n\tif !ok {\n\t\treturn errors.New(cinderVolumeID + \" annotation not found on PV\")\n\t}\n\n\tctx := deleteCtx{p, pv}\n\tmapper, err := newVolumeMapperFromPV(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmapper.AuthTeardown(ctx)\n\n\terr = disconnectCinderVolume(p, volumeID)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = unreserveCinderVolume(p, volumeID)\n\tif err != nil {\n\t\t// TODO: Create placeholder PV?\n\t\tglog.Errorf(\"Failed to unreserve volume: %v\", err)\n\t\treturn err\n\t}\n\n\terr = deleteCinderVolume(p, volumeID)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tglog.V(2).Infof(\"Successfully deleted cinder volume %s\", volumeID)\n\treturn nil\n}", "func getVirtualHardDisk(c *wssdcloudstorage.VirtualHardDisk, group string) *storage.VirtualHardDisk {\n\treturn &storage.VirtualHardDisk{\n\t\tName: &c.Name,\n\t\tID: &c.Id,\n\t\tVersion: &c.Status.Version.Number,\n\t\tVirtualHardDiskProperties: &storage.VirtualHardDiskProperties{\n\t\t\tStatuses: status.GetStatuses(c.GetStatus()),\n\t\t\tDiskSizeBytes: &c.Size,\n\t\t\tDynamic: &c.Dynamic,\n\t\t\tBlocksizebytes: &c.Blocksizebytes,\n\t\t\tLogicalsectorbytes: &c.Logicalsectorbytes,\n\t\t\tPhysicalsectorbytes: &c.Physicalsectorbytes,\n\t\t\tControllernumber: &c.Controllernumber,\n\t\t\tControllerlocation: &c.Controllerlocation,\n\t\t\tDisknumber: &c.Disknumber,\n\t\t\tVirtualMachineName: &c.VirtualmachineName,\n\t\t\tScsipath: &c.Scsipath,\n\t\t\tHyperVGeneration: c.HyperVGeneration,\n\t\t\tDiskFileFormat: c.DiskFileFormat,\n\t\t},\n\t\tTags: tags.ProtoToMap(c.Tags),\n\t}\n}", "func (a *Client) DeleteProtectedEntity(params *DeleteProtectedEntityParams, opts ...ClientOption) (*DeleteProtectedEntityOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewDeleteProtectedEntityParams()\n\t}\n\top := &runtime.ClientOperation{\n\t\tID: \"deleteProtectedEntity\",\n\t\tMethod: \"DELETE\",\n\t\tPathPattern: \"/astrolabe/{service}/{protectedEntityID}\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &DeleteProtectedEntityReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t}\n\tfor _, opt := range opts {\n\t\topt(op)\n\t}\n\n\tresult, err := a.transport.Submit(op)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*DeleteProtectedEntityOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for deleteProtectedEntity: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}", "func (x InfrastructureHostEntity) GetNerdStorage() NerdStorageEntityScope {\n\treturn x.NerdStorage\n}", "func getPersistentVolumeSpec(fcdID string, persistentVolumeReclaimPolicy v1.PersistentVolumeReclaimPolicy, labels map[string]string) *v1.PersistentVolume {\n\tvar (\n\t\tpvConfig framework.PersistentVolumeConfig\n\t\tpv *v1.PersistentVolume\n\t\tclaimRef *v1.ObjectReference\n\t)\n\tpvConfig = framework.PersistentVolumeConfig{\n\t\tNamePrefix: \"vspherepv-\",\n\t\tPVSource: v1.PersistentVolumeSource{\n\t\t\tCSI: &v1.CSIPersistentVolumeSource{\n\t\t\t\tDriver: e2evSphereCSIBlockDriverName,\n\t\t\t\tVolumeHandle: fcdID,\n\t\t\t\tReadOnly: false,\n\t\t\t\tFSType: \"ext4\",\n\t\t\t},\n\t\t},\n\t\tPrebind: nil,\n\t}\n\n\tpv = &v1.PersistentVolume{\n\t\tTypeMeta: metav1.TypeMeta{},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tGenerateName: pvConfig.NamePrefix,\n\t\t},\n\t\tSpec: v1.PersistentVolumeSpec{\n\t\t\tPersistentVolumeReclaimPolicy: persistentVolumeReclaimPolicy,\n\t\t\tCapacity: v1.ResourceList{\n\t\t\t\tv1.ResourceName(v1.ResourceStorage): resource.MustParse(\"2Gi\"),\n\t\t\t},\n\t\t\tPersistentVolumeSource: pvConfig.PVSource,\n\t\t\tAccessModes: []v1.PersistentVolumeAccessMode{\n\t\t\t\tv1.ReadWriteOnce,\n\t\t\t},\n\t\t\tClaimRef: claimRef,\n\t\t\tStorageClassName: \"\",\n\t\t},\n\t\tStatus: v1.PersistentVolumeStatus{},\n\t}\n\tif labels != nil {\n\t\tpv.Labels = labels\n\t}\n\t// Annotation needed to delete a statically created pv\n\tannotations := make(map[string]string)\n\tannotations[\"pv.kubernetes.io/provisioned-by\"] = e2evSphereCSIBlockDriverName\n\tpv.Annotations = annotations\n\treturn pv\n}", "func (a *Client) ListProtectedEntities(params *ListProtectedEntitiesParams, opts ...ClientOption) (*ListProtectedEntitiesOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewListProtectedEntitiesParams()\n\t}\n\top := &runtime.ClientOperation{\n\t\tID: \"listProtectedEntities\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/astrolabe/{service}\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &ListProtectedEntitiesReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t}\n\tfor _, opt := range opts {\n\t\topt(op)\n\t}\n\n\tresult, err := a.transport.Submit(op)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*ListProtectedEntitiesOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for listProtectedEntities: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}", "func (cl *Client) gceVolumeDetach(ctx context.Context, vda *csp.VolumeDetachArgs, vid string) (*csp.Volume, error) {\n\tif vda.Force {\n\t\tcl.csp.dbgF(\"ignoring unsupported force on detach [%s]\", vid)\n\t}\n\tcomputeService, err := cl.getComputeService(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\top, err := computeService.Instances().DetachDisk(cl.projectID, cl.attrs[AttrZone].Value, vda.NodeIdentifier, vid).Context(ctx).Do()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err = cl.waitForOperation(ctx, op); err != nil {\n\t\treturn nil, err\n\t}\n\tvol, err := cl.vr.gceVolumeGet(ctx, vid)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn vol, nil\n}", "func (cl *Client) gceVolumeGet(ctx context.Context, name string) (*csp.Volume, error) {\n\tdisk, err := cl.computeService.Disks().Get(cl.projectID, cl.attrs[AttrZone].Value, name).Context(ctx).Do()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn gceDiskToVolume(disk), nil\n}", "func (m *Drive) GetOwner()(IdentitySetable) {\n return m.owner\n}", "func (m *ProviderTerms) GetVolume() int64 {\n\tif m.Volume == 0 {\n\t\tmbps := (m.QoS.UploadMbps + m.QoS.DownloadMbps) / octet // mega bytes per second\n\t\tduration := float32(m.ExpiredAt - time.Now()) // duration in seconds\n\t\t// rounded of bytes per second multiplied by duration in seconds\n\t\tm.Volume = int64(mbps * duration)\n\t}\n\n\treturn m.Volume\n}", "func (o *SparseSSHAuthorizationPolicy) GetProtected() (out bool) {\n\n\tif o.Protected == nil {\n\t\treturn\n\t}\n\n\treturn *o.Protected\n}", "func (v *KubernetesVolume) GetVolume(name string) (corev1.Volume, error) {\n\tvolume := corev1.Volume{\n\t\tName: name,\n\t}\n\tif v.HostPathLegacy != nil {\n\t\treturn volume, errors.New(\"legacy host_path field is not supported anymore, please migrate to hostPath\")\n\t}\n\tif v.HostPath != nil {\n\t\tvolume.VolumeSource = corev1.VolumeSource{\n\t\t\tHostPath: v.HostPath,\n\t\t}\n\t\treturn volume, nil\n\t} else if v.EmptyDir != nil {\n\t\tvolume.VolumeSource = corev1.VolumeSource{\n\t\t\tEmptyDir: v.EmptyDir,\n\t\t}\n\t\treturn volume, nil\n\t} else if v.PersistentVolumeClaim != nil {\n\t\tvolume.VolumeSource = corev1.VolumeSource{\n\t\t\tPersistentVolumeClaim: &v.PersistentVolumeClaim.PersistentVolumeSource,\n\t\t}\n\t\treturn volume, nil\n\t}\n\t// return a default emptydir volume if none configured\n\tvolume.VolumeSource = corev1.VolumeSource{\n\t\tEmptyDir: &corev1.EmptyDirVolumeSource{},\n\t}\n\treturn volume, nil\n}", "func (o *HyperflexReplicationPlatDatastore) GetDatastoreEr() HyperflexEntityReference {\n\tif o == nil || o.DatastoreEr.Get() == nil {\n\t\tvar ret HyperflexEntityReference\n\t\treturn ret\n\t}\n\treturn *o.DatastoreEr.Get()\n}", "func Get(d Driver, vName string) (Volume, Mount, error) {\n\tlog.Debugf(\"Entering Get: name: %s\", vName)\n\td.GetLock().RLock()\n\tdefer d.GetLock().RUnlock()\n\treturn getVolumeMount(d, vName)\n}", "func (p *nfsProvisioner) Provision(options controller.ProvisionOptions) (*v1.PersistentVolume, error) {\n\tvolume, err := p.createVolume(options)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tannotations := make(map[string]string)\n\tannotations[annCreatedBy] = createdBy\n\tannotations[annExportBlock] = volume.exportBlock\n\tannotations[annExportID] = strconv.FormatUint(uint64(volume.exportID), 10)\n\tannotations[annProjectBlock] = volume.projectBlock\n\tannotations[annProjectID] = strconv.FormatUint(uint64(volume.projectID), 10)\n\tif volume.supGroup != 0 {\n\t\tannotations[VolumeGidAnnotationKey] = strconv.FormatUint(volume.supGroup, 10)\n\t}\n\t// Only use legacy mount options annotation if StorageClass.MountOptions is empty\n\tif volume.mountOptions != \"\" && options.StorageClass.MountOptions == nil {\n\t\tannotations[MountOptionAnnotation] = volume.mountOptions\n\t}\n\tannotations[annProvisionerID] = string(p.identity)\n\n\tpv := &v1.PersistentVolume{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: options.PVName,\n\t\t\tLabels: map[string]string{},\n\t\t\tAnnotations: annotations,\n\t\t},\n\t\tSpec: v1.PersistentVolumeSpec{\n\t\t\tPersistentVolumeReclaimPolicy: *options.StorageClass.ReclaimPolicy,\n\t\t\tAccessModes: options.PVC.Spec.AccessModes,\n\t\t\tCapacity: v1.ResourceList{\n\t\t\t\tv1.ResourceName(v1.ResourceStorage): options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)],\n\t\t\t},\n\t\t\tPersistentVolumeSource: v1.PersistentVolumeSource{\n\t\t\t\tNFS: &v1.NFSVolumeSource{\n\t\t\t\t\tServer: volume.server,\n\t\t\t\t\tPath: volume.path,\n\t\t\t\t\tReadOnly: false,\n\t\t\t\t},\n\t\t\t},\n\t\t\tMountOptions: options.StorageClass.MountOptions,\n\t\t},\n\t}\n\n\treturn pv, nil\n}", "func (objectSet *VolumeObjectSet) GetObject(id string) (*model.Volume, error) {\r\n\tresponse, err := objectSet.Client.Get(volumePath, id, model.Volume{})\r\n\tif response == nil {\r\n\t\treturn nil, err\r\n\t}\r\n\treturn response.(*model.Volume), err\r\n}", "func (c *DenseSliceComponent) Get(entity Entity) interface{} {\n\tif 0 <= int(entity) && int(entity) < len(c.dataID) && c.dataID[entity] != -1 {\n\t\treturn c.data[c.dataID[entity]]\n\t}\n\treturn nil\n}", "func (l *Location) Volume() string {\n\treturn l.Authority.String()\n}", "func (o *QtreeCreateRequest) Volume() string {\n\tvar r string\n\tif o.VolumePtr == nil {\n\t\treturn r\n\t}\n\tr = *o.VolumePtr\n\treturn r\n}", "func (x ThirdPartyServiceEntity) GetNerdStorage() NerdStorageEntityScope {\n\treturn x.NerdStorage\n}", "func (a *HyperflexApiService) GetHyperflexVolumeByMoid(ctx context.Context, moid string) ApiGetHyperflexVolumeByMoidRequest {\n\treturn ApiGetHyperflexVolumeByMoidRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t\tmoid: moid,\n\t}\n}", "func (m *VirtualEndpoint) GetCrossCloudGovernmentOrganizationMapping()(CloudPcCrossCloudGovernmentOrganizationMappingable) {\n val, err := m.GetBackingStore().Get(\"crossCloudGovernmentOrganizationMapping\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(CloudPcCrossCloudGovernmentOrganizationMappingable)\n }\n return nil\n}", "func (s *Service) Get(ctx context.Context, name string) (*corev1.PersistentVolumeClaim, error) {\n\tn, err := s.client.CoreV1().PersistentVolumeClaims(defaultNamespaceName).Get(ctx, name, metav1.GetOptions{})\n\tif err != nil {\n\t\treturn nil, xerrors.Errorf(\"get persistentVolumeClaim: %w\", err)\n\t}\n\treturn n, nil\n}", "func (cosigner *LocalCosigner) GetEphemeralSecretPart(req CosignerGetEphemeralSecretPartRequest) (CosignerGetEphemeralSecretPartResponse, error) {\n\tres := CosignerGetEphemeralSecretPartResponse{}\n\n\t// protects the meta map\n\tcosigner.lastSignStateMutex.Lock()\n\tdefer cosigner.lastSignStateMutex.Unlock()\n\n\thrsKey := HRSKey{\n\t\tHeight: req.Height,\n\t\tRound: req.Round,\n\t\tStep: req.Step,\n\t}\n\n\tmeta, ok := cosigner.hrsMeta[hrsKey]\n\t// generate metadata placeholder\n\tif !ok {\n\t\tsecret := make([]byte, 32)\n\t\trand.Read(secret)\n\n\t\tmeta = HrsMetadata{\n\t\t\tSecret: secret,\n\t\t\tPeers: make([]PeerMetadata, cosigner.total),\n\t\t}\n\n\t\t// split this secret with shamirs\n\t\t// !! dealt shares need to be saved because dealing produces different shares each time!\n\t\tmeta.DealtShares = tsed25519.DealShares(meta.Secret, cosigner.threshold, cosigner.total)\n\n\t\tcosigner.hrsMeta[hrsKey] = meta\n\t}\n\n\tourEphPublicKey := tsed25519.ScalarMultiplyBase(meta.Secret)\n\n\t// set our values\n\tmeta.Peers[cosigner.key.ID-1].Share = meta.DealtShares[cosigner.key.ID-1]\n\tmeta.Peers[cosigner.key.ID-1].EphemeralSecretPublicKey = ourEphPublicKey\n\n\t// grab the peer info for the ID being requested\n\tpeer, ok := cosigner.peers[req.ID]\n\tif !ok {\n\t\treturn res, errors.New(\"Unknown peer ID\")\n\t}\n\n\tsharePart := meta.DealtShares[req.ID-1]\n\n\t// use RSA public to encrypt user's share part\n\tencrypted, err := rsa.EncryptOAEP(sha256.New(), rand.Reader, &peer.PublicKey, sharePart, nil)\n\tif err != nil {\n\t\treturn res, err\n\t}\n\n\tres.SourceID = cosigner.key.ID\n\tres.SourceEphemeralSecretPublicKey = ourEphPublicKey\n\tres.EncryptedSharePart = encrypted\n\n\t// sign the response payload with our private key\n\t// cosigners can verify the signature to confirm sender validity\n\t{\n\t\tjsonBytes, err := tmJson.Marshal(res)\n\n\t\tif err != nil {\n\t\t\treturn res, err\n\t\t}\n\n\t\tdigest := sha256.Sum256(jsonBytes)\n\t\tsignature, err := rsa.SignPSS(rand.Reader, &cosigner.rsaKey, crypto.SHA256, digest[:], nil)\n\t\tif err != nil {\n\t\t\treturn res, err\n\t\t}\n\n\t\tres.SourceSig = signature\n\t}\n\n\treturn res, nil\n}", "func (p *Provisioner) Delete(pv *v1.PersistentVolume) (err error) {\n\tdefer func() {\n\t\terr = errors.Wrapf(err, \"failed to delete volume %v\", pv.Name)\n\t}()\n\t//Initiate clean up only when reclaim policy is not retain.\n\tif pv.Spec.PersistentVolumeReclaimPolicy != v1.PersistentVolumeReclaimRetain {\n\t\t//TODO: Determine the type of PV\n\t\tpvType := GetLocalPVType(pv)\n\t\tsize := resource.Quantity{}\n\t\treqMap := pv.Spec.Capacity\n\t\tif reqMap != nil {\n\t\t\tsize = pv.Spec.Capacity[\"storage\"]\n\t\t}\n\n\t\tsendEventOrIgnore(pv.Name, size.String(), pvType, analytics.VolumeDeprovision)\n\t\tif pvType == \"local-device\" {\n\t\t\terr := p.DeleteBlockDevice(pv)\n\t\t\tif err != nil {\n\t\t\t\talertlog.Logger.Errorw(\"\",\n\t\t\t\t\t\"eventcode\", \"cstor.local.pv.delete.failure\",\n\t\t\t\t\t\"msg\", \"Failed to delete CStor Local PV\",\n\t\t\t\t\t\"rname\", pv.Name,\n\t\t\t\t\t\"reason\", \"failed to delete block device\",\n\t\t\t\t\t\"storagetype\", pvType,\n\t\t\t\t)\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\n\t\terr = p.DeleteHostPath(pv)\n\t\tif err != nil {\n\t\t\talertlog.Logger.Errorw(\"\",\n\t\t\t\t\"eventcode\", \"cstor.local.pv.delete.failure\",\n\t\t\t\t\"msg\", \"Failed to delete CStor Local PV\",\n\t\t\t\t\"rname\", pv.Name,\n\t\t\t\t\"reason\", \"failed to delete host path\",\n\t\t\t\t\"storagetype\", pvType,\n\t\t\t)\n\t\t}\n\t\treturn err\n\t}\n\tklog.Infof(\"Retained volume %v\", pv.Name)\n\talertlog.Logger.Infow(\"\",\n\t\t\"eventcode\", \"cstor.local.pv.delete.success\",\n\t\t\"msg\", \"Successfully deleted CStor Local PV\",\n\t\t\"rname\", pv.Name,\n\t)\n\treturn nil\n}", "func (r Virtual_Guest_Network_Component) GetGuest() (resp datatypes.Virtual_Guest, err error) {\n\terr = r.Session.DoRequest(\"SoftLayer_Virtual_Guest_Network_Component\", \"getGuest\", nil, &r.Options, &resp)\n\treturn\n}", "func (c *Client) Volume() (float64, error) {\n\treturn c.GetFloatProperty(\"volume\")\n}", "func (x GenericInfrastructureEntity) GetNerdStorage() NerdStorageEntityScope {\n\treturn x.NerdStorage\n}", "func (keyRing *KeyRing) GetSigningEntity() (*openpgp.Entity, error) {\n\tvar signEntity *openpgp.Entity\n\n\tfor _, e := range keyRing.entities {\n\t\t// Entity.PrivateKey must be a signing key\n\t\tif e.PrivateKey != nil {\n\t\t\tif !e.PrivateKey.Encrypted {\n\t\t\t\tsignEntity = e\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\tif signEntity == nil {\n\t\terr := errors.New(\"gopenpgp: cannot sign message, unable to unlock signer key\")\n\t\treturn signEntity, err\n\t}\n\n\treturn signEntity, nil\n}", "func (o FioSpecPtrOutput) Volume() FioSpecVolumePtrOutput {\n\treturn o.ApplyT(func(v *FioSpec) *FioSpecVolume {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.Volume\n\t}).(FioSpecVolumePtrOutput)\n}", "func (client VolumesClient) GetResponder(resp *http.Response) (result Volume, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (o IopingSpecVolumeVolumeSourceGcePersistentDiskOutput) ReadOnly() pulumi.BoolPtrOutput {\n\treturn o.ApplyT(func(v IopingSpecVolumeVolumeSourceGcePersistentDisk) *bool { return v.ReadOnly }).(pulumi.BoolPtrOutput)\n}", "func (p *EtcdClientV3) GetVolume(volName string) (*storage.VolumeExternal, error) {\n\tvolJSON, err := p.Read(config.VolumeURL + \"/\" + volName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvolExternal := &storage.VolumeExternal{}\n\terr = json.Unmarshal([]byte(volJSON), volExternal)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn volExternal, nil\n}", "func (r Virtual_ReservedCapacityGroup_Instance) GetGuest() (resp datatypes.Virtual_Guest, err error) {\n\terr = r.Session.DoRequest(\"SoftLayer_Virtual_ReservedCapacityGroup_Instance\", \"getGuest\", nil, &r.Options, &resp)\n\treturn\n}", "func (m *Group) GetDrive()(Driveable) {\n return m.drive\n}", "func (c Investigate) DomainVolume(domain string, opts QueryOptions) (Volume, error) {\n\tu := &url.URL{\n\t\tPath: fmt.Sprintf(\"domains/volume/%s\", domain),\n\t\tRawQuery: url.Values(opts).Encode(),\n\t}\n\tvar out Volume\n\terr := c.Get(c.BaseURL.ResolveReference(u).String(), &out)\n\treturn out, err\n}", "func Get(client *gophercloud.ServiceClient, id string, bearer map[string]string) (r volumes.GetResult) {\n\t_, r.Err = client.Get(getURL(client, id), &r.Body, &gophercloud.RequestOpts{\n\t\tMoreHeaders: bearer,\n\t})\n\treturn\n}", "func (o *VolumeAttributesType) VolumeVserverDrProtectionAttributes() VolumeVserverDrProtectionAttributesType {\n\tr := *o.VolumeVserverDrProtectionAttributesPtr\n\treturn r\n}", "func (x GenericEntity) GetNerdStorage() NerdStorageEntityScope {\n\treturn x.NerdStorage\n}", "func (o LookupGroupVariableResultOutput) Protected() pulumi.BoolOutput {\n\treturn o.ApplyT(func(v LookupGroupVariableResult) bool { return v.Protected }).(pulumi.BoolOutput)\n}", "func (c *Client) Volume() uint8 {\n\tc.Lock()\n\tdefer c.Unlock()\n\treturn c.volume\n}", "func (e *Account) EntStorage() ent.Storage { return ent.GetStorage(e) }", "func (e *Account) EntStorage() ent.Storage { return ent.GetStorage(e) }", "func (p *cinderProvisioner) Provision(options controller.VolumeOptions) (*v1.PersistentVolume, error) {\n\tif options.PVC.Spec.Selector != nil {\n\t\treturn nil, fmt.Errorf(\"claim Selector is not supported\")\n\t}\n\n\tvolumeID, err := createCinderVolume(p, options)\n\tif err != nil {\n\t\tglog.Errorf(\"Failed to create volume\")\n\t\treturn nil, err\n\t}\n\n\terr = waitForAvailableCinderVolume(p, volumeID)\n\tif err != nil {\n\t\tglog.Errorf(\"Volume did not become available\")\n\t\treturn nil, err\n\t}\n\n\terr = reserveCinderVolume(p, volumeID)\n\tif err != nil {\n\t\t// TODO: Create placeholder PV?\n\t\tglog.Errorf(\"Failed to reserve volume: %v\", err)\n\t\treturn nil, err\n\t}\n\n\tconnection, err := connectCinderVolume(p, volumeID)\n\tif err != nil {\n\t\t// TODO: Create placeholder PV?\n\t\tglog.Errorf(\"Failed to connect volume: %v\", err)\n\t\treturn nil, err\n\t}\n\n\tmapper, err := newVolumeMapperFromConnection(connection)\n\tif err != nil {\n\t\t// TODO: Create placeholder PV?\n\t\tglog.Errorf(\"Unable to create volume mapper: %f\", err)\n\t\treturn nil, err\n\t}\n\n\tctx := provisionCtx{p, options, connection}\n\terr = mapper.AuthSetup(ctx)\n\tif err != nil {\n\t\t// TODO: Create placeholder PV?\n\t\tglog.Errorf(\"Failed to prepare volume auth: %v\", err)\n\t\treturn nil, err\n\t}\n\n\tpv, err := buildPV(mapper, ctx, volumeID)\n\tif err != nil {\n\t\t// TODO: Create placeholder PV?\n\t\tglog.Errorf(\"Failed to build PV: %v\", err)\n\t\treturn nil, err\n\t}\n\n\treturn pv, nil\n}", "func (s *GenericStorage) Get(gvk schema.GroupVersionKind, uid runtime.UID) (runtime.Object, error) {\n\tstorageKey := KeyForUID(gvk, uid)\n\tcontent, err := s.raw.Read(storageKey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn s.decode(content, gvk)\n}", "func (x UnavailableEntity) GetNerdStorage() NerdStorageEntityScope {\n\treturn x.NerdStorage\n}", "func (client VolumesClient) GetPreparer(ctx context.Context, location string, storageSubSystem string, storagePool string, volume string) (*http.Request, error) {\n\tpathParameters := map[string]interface{}{\n\t\t\"location\": autorest.Encode(\"path\", location),\n\t\t\"storagePool\": autorest.Encode(\"path\", storagePool),\n\t\t\"storageSubSystem\": autorest.Encode(\"path\", storageSubSystem),\n\t\t\"subscriptionId\": autorest.Encode(\"path\", client.SubscriptionID),\n\t\t\"volume\": autorest.Encode(\"path\", volume),\n\t}\n\n\tconst APIVersion = \"2016-05-01\"\n\tqueryParameters := map[string]interface{}{\n\t\t\"api-version\": APIVersion,\n\t}\n\n\tpreparer := autorest.CreatePreparer(\n\t\tautorest.AsGet(),\n\t\tautorest.WithBaseURL(client.BaseURI),\n\t\tautorest.WithPathParameters(\"/subscriptions/{subscriptionId}/resourceGroups/System.{location}/providers/Microsoft.Fabric.Admin/fabricLocations/{location}/storageSubSystems/{storageSubSystem}/storagePools/{storagePool}/volumes/{volume}\", pathParameters),\n\t\tautorest.WithQueryParameters(queryParameters))\n\treturn preparer.Prepare((&http.Request{}).WithContext(ctx))\n}", "func (x SyntheticMonitorEntity) GetNerdStorage() NerdStorageEntityScope {\n\treturn x.NerdStorage\n}", "func (ce *ClientEncryption) GetKey(ctx context.Context, id primitive.Binary) *SingleResult {\n\tfilter := bsoncore.NewDocumentBuilder().AppendBinary(\"_id\", id.Subtype, id.Data).Build()\n\treturn ce.keyVaultColl.FindOne(ctx, filter)\n}", "func Get(c *golangsdk.ServiceClient, server_id string, volume_id string) (r GetResult) {\n\t_, r.Err = c.Get(getURL(c, server_id, volume_id), &r.Body, nil)\n\treturn\n}", "func ThisDV(dv *v1alpha1.DataVolume) func() (*v1alpha1.DataVolume, error) {\n\treturn func() (p *v1alpha1.DataVolume, err error) {\n\t\tvirtClient, err := kubecli.GetKubevirtClient()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tp, err = virtClient.CdiClient().CdiV1alpha1().DataVolumes(dv.Namespace).Get(context.Background(), dv.Name, k8smetav1.GetOptions{})\n\t\tif errors.IsNotFound(err) {\n\t\t\treturn nil, nil\n\t\t}\n\t\treturn\n\t}\n}", "func (o *CloudSnapshotAccount) GetProtected() bool {\n\n\treturn o.Protected\n}", "func (a *HyperflexApiService) GetHyperflexDataProtectionPeerByMoid(ctx context.Context, moid string) ApiGetHyperflexDataProtectionPeerByMoidRequest {\n\treturn ApiGetHyperflexDataProtectionPeerByMoidRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t\tmoid: moid,\n\t}\n}", "func (c *Core) GetVolume(id types.VolumeID) (*types.Volume, error) {\n\tc.lock.Lock(id.Name)\n\tdefer c.lock.Unlock(id.Name)\n\n\treturn c.getVolume(id)\n}", "func getPvFromClaim(client clientset.Interface, namespace string, claimName string) *v1.PersistentVolume {\n\tpvclaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Get(claimName, metav1.GetOptions{})\n\tgomega.Expect(err).NotTo(gomega.HaveOccurred())\n\tpv, err := client.CoreV1().PersistentVolumes().Get(pvclaim.Spec.VolumeName, metav1.GetOptions{})\n\tgomega.Expect(err).NotTo(gomega.HaveOccurred())\n\treturn pv\n}", "func (r Virtual_Guest) GetEvaultNetworkStorage() (resp []datatypes.Network_Storage, err error) {\n\terr = r.Session.DoRequest(\"SoftLayer_Virtual_Guest\", \"getEvaultNetworkStorage\", nil, &r.Options, &resp)\n\treturn\n}", "func (o IopingSpecPtrOutput) Volume() IopingSpecVolumePtrOutput {\n\treturn o.ApplyT(func(v *IopingSpec) *IopingSpecVolume {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.Volume\n\t}).(IopingSpecVolumePtrOutput)\n}", "func (m *User) GetDrive()(Driveable) {\n return m.drive\n}", "func (provider *NetconfServiceProvider) GetPrivate() interface{} {\n\treturn provider.Private\n}", "func (x ApmExternalServiceEntity) GetNerdStorage() NerdStorageEntityScope {\n\treturn x.NerdStorage\n}", "func (s *StackEbrc) GetVolumeAttachment(serverID, id string) (*abstract.VolumeAttachment, fail.Error) {\n\tlogrus.Debugf(\">>> stacks.ebrc::GetVolumeAttachment(%s)\", id)\n\tdefer logrus.Debugf(\"<<< stacks.ebrc::GetVolumeAttachment(%s)\", id)\n\n\tvats, err := s.ListVolumeAttachments(serverID)\n\tif err != nil {\n\t\treturn nil, fail.Wrap(err, fmt.Sprintf(\"Error getting attachment\"))\n\t}\n\n\tfor _, vat := range vats {\n\t\tif vat.ID == id && vat.ServerID == serverID {\n\t\t\treturn &vat, nil\n\t\t}\n\t}\n\n\treturn nil, fail.Errorf(fmt.Sprintf(\"Attachment [%s] to [%s] not found\", id, serverID), nil)\n}", "func (s *BoltState) Volume(name string) (*Volume, error) {\n\tif name == \"\" {\n\t\treturn nil, define.ErrEmptyID\n\t}\n\n\tif !s.valid {\n\t\treturn nil, define.ErrDBClosed\n\t}\n\n\tvolName := []byte(name)\n\n\tvolume := new(Volume)\n\tvolume.config = new(VolumeConfig)\n\tvolume.state = new(VolumeState)\n\n\tdb, err := s.getDBCon()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer s.deferredCloseDBCon(db)\n\n\terr = db.View(func(tx *bolt.Tx) error {\n\t\tvolBkt, err := getVolBucket(tx)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn s.getVolumeFromDB(volName, volume, volBkt)\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn volume, nil\n}", "func (a *Client) CopyProtectedEntity(params *CopyProtectedEntityParams, opts ...ClientOption) (*CopyProtectedEntityAccepted, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewCopyProtectedEntityParams()\n\t}\n\top := &runtime.ClientOperation{\n\t\tID: \"copyProtectedEntity\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/astrolabe/{service}\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &CopyProtectedEntityReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t}\n\tfor _, opt := range opts {\n\t\topt(op)\n\t}\n\n\tresult, err := a.transport.Submit(op)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*CopyProtectedEntityAccepted)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for copyProtectedEntity: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}", "func NewPcloudPvminstancesVolumesGetUnauthorized() *PcloudPvminstancesVolumesGetUnauthorized {\n\treturn &PcloudPvminstancesVolumesGetUnauthorized{}\n}", "func (s *Session) findChildEntity(ctx context.Context, parent object.Reference, childName string) (object.Reference, error) {\n\tsi := object.NewSearchIndex(s.Client.VimClient())\n\tref, err := si.FindChild(ctx, parent, childName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif ref == nil {\n\t\t// SearchIndex returns nil when child name is not found\n\t\tlog.Error(fmt.Errorf(\"entity not found\"), \"child entity not found on vSphere\", \"name\", childName)\n\t\treturn nil, &find.NotFoundError{}\n\t}\n\n\t// We have found a child entity with the given name. Populate the inventory path before returning.\n\tchild, err := s.Finder.ObjectReference(ctx, ref.Reference())\n\tif err != nil {\n\t\tlog.Error(err, \"error when setting inventory path for the object\", \"moRef\", ref.Reference().Value)\n\t\treturn nil, err\n\t}\n\n\treturn child, nil\n}", "func GetVirtualGuestService(sess *session.Session) Virtual_Guest {\n\treturn Virtual_Guest{Session: sess}\n}", "func (x ExternalEntity) GetNerdStorage() NerdStorageEntityScope {\n\treturn x.NerdStorage\n}", "func (o IopingSpecOutput) Volume() IopingSpecVolumeOutput {\n\treturn o.ApplyT(func(v IopingSpec) IopingSpecVolume { return v.Volume }).(IopingSpecVolumeOutput)\n}", "func (cl *Client) VolumeDetach(ctx context.Context, vda *csp.VolumeDetachArgs) (*csp.Volume, error) {\n\tsvc, vid, _ := VolumeIdentifierParse(vda.VolumeIdentifier)\n\tswitch svc {\n\tcase ServiceGCE:\n\t\treturn cl.gceVolumeDetach(ctx, vda, vid)\n\t}\n\treturn nil, fmt.Errorf(\"storage type currently unsupported\")\n}", "func (x ApmDatabaseInstanceEntity) GetNerdStorage() NerdStorageEntityScope {\n\treturn x.NerdStorage\n}", "func (r Virtual_Guest) GetAllowedHost() (resp datatypes.Network_Storage_Allowed_Host, err error) {\n\terr = r.Session.DoRequest(\"SoftLayer_Virtual_Guest\", \"getAllowedHost\", nil, &r.Options, &resp)\n\treturn\n}", "func (o FioSpecVolumeVolumeSourceGcePersistentDiskOutput) ReadOnly() pulumi.BoolPtrOutput {\n\treturn o.ApplyT(func(v FioSpecVolumeVolumeSourceGcePersistentDisk) *bool { return v.ReadOnly }).(pulumi.BoolPtrOutput)\n}", "func (d DobsClient) GetVolume(ctx Context, name string) (*APIVolume, error) {\n\n\tapiVolume, err := d.getVolumeByName(ctx, name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvol := &APIVolume{\n\t\tName: apiVolume.Name,\n\t\tID: apiVolume.ID,\n\t\t// DropletID: apiVolume.DropletIDs[0],\n\t}\n\n\treturn vol, nil\n}", "func (keyRing *KeyRing) getSigningEntity() (*openpgp.Entity, error) {\n\tvar signEntity *openpgp.Entity\n\n\tfor _, e := range keyRing.entities {\n\t\t// Entity.PrivateKey must be a signing key\n\t\tif e.PrivateKey != nil {\n\t\t\tif !e.PrivateKey.Encrypted {\n\t\t\t\tsignEntity = e\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\tif signEntity == nil {\n\t\treturn nil, errors.New(\"gopenpgp: cannot sign message, unable to unlock signer key\")\n\t}\n\n\treturn signEntity, nil\n}", "func (r *ImageUpdateAutomationReconciler) getSigningEntity(ctx context.Context, auto imagev1.ImageUpdateAutomation) (*openpgp.Entity, error) {\n\t// get kubernetes secret\n\tsecretName := types.NamespacedName{\n\t\tNamespace: auto.GetNamespace(),\n\t\tName: auto.Spec.Commit.SigningKey.SecretRef.Name,\n\t}\n\tvar secret corev1.Secret\n\tif err := r.Get(ctx, secretName, &secret); err != nil {\n\t\treturn nil, fmt.Errorf(\"could not find signing key secret '%s': %w\", secretName, err)\n\t}\n\n\t// get data from secret\n\tdata, ok := secret.Data[signingSecretKey]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"signing key secret '%s' does not contain a 'git.asc' key\", secretName)\n\t}\n\n\t// read entity from secret value\n\tentities, err := openpgp.ReadArmoredKeyRing(bytes.NewReader(data))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not read signing key from secret '%s': %w\", secretName, err)\n\t}\n\tif len(entities) > 1 {\n\t\treturn nil, fmt.Errorf(\"multiple entities read from secret '%s', could not determine which signing key to use\", secretName)\n\t}\n\treturn entities[0], nil\n}", "func NewPcloudPvminstancesVolumesGetForbidden() *PcloudPvminstancesVolumesGetForbidden {\n\treturn &PcloudPvminstancesVolumesGetForbidden{}\n}", "func (s *BoltBackedService) GetDocument(ctx context.Context, id uuid.UUID, key []byte) (*Document, error) {\n\n\td, err := (*s.db).LoadDocument(id)\n\tif err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"id\": \"id\",\n\t\t}).Fatal(\"Failed to load document:\", err)\n\t\treturn d, err\n\t}\n\n\tlog.WithFields(log.Fields{\n\t\t\"id\": id,\n\t}).Debug(\"Loaded encrypted document\")\n\n\tif len(d.Contents) == 0 {\n\t\t// TODO Document has been deleted how to handle this better?\n\t\treturn d, nil\n\t}\n\n\tif err := d.DecryptInPlace(key); err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"id\": id,\n\t\t\t\"keyHash\": sha256.Sum256(key),\n\t\t}).Fatal(\"Failed to decrypt document\")\n\t\treturn nil, err\n\t}\n\n\treturn d, nil\n}", "func (r *Root) Get(name string) (volume.Volume, error) {\n\tr.m.Lock()\n\tv, exists := r.volumes[name]\n\tr.m.Unlock()\n\tif !exists {\n\t\treturn nil, fmt.Errorf(\"volume not found\")\n\n\t}\n\treturn v, nil\n}", "func (s *Dao) Get(ctx context.Context, req *model.GetKVRequest) (*model.KVDoc, error) {\n\tresp, err := etcdadpt.Get(ctx, key.KV(req.Domain, req.Project, req.ID))\n\tif err != nil {\n\t\topenlog.Error(err.Error())\n\t\treturn nil, err\n\t}\n\tif resp == nil {\n\t\treturn nil, datasource.ErrKeyNotExists\n\t}\n\tcurKV := &model.KVDoc{}\n\terr = json.Unmarshal(resp.Value, curKV)\n\tif err != nil {\n\t\topenlog.Error(\"decode error: \" + err.Error())\n\t\treturn nil, err\n\t}\n\treturn curKV, nil\n}" ]
[ "0.5145643", "0.49404353", "0.48711714", "0.47512802", "0.47413394", "0.47331426", "0.47056416", "0.46269906", "0.4609255", "0.46007136", "0.45736876", "0.45705947", "0.45601365", "0.45379475", "0.45265597", "0.4504617", "0.44908112", "0.44778052", "0.4476148", "0.44699076", "0.4469522", "0.44660643", "0.44532216", "0.4437926", "0.44225675", "0.4407047", "0.4367497", "0.43646473", "0.43617553", "0.43569675", "0.4351974", "0.43373135", "0.43340814", "0.43340042", "0.43249154", "0.43186054", "0.43156517", "0.4300669", "0.43001127", "0.42959565", "0.427462", "0.42715758", "0.426995", "0.42662957", "0.42620206", "0.4260337", "0.42591557", "0.42584082", "0.4257883", "0.4257429", "0.42458072", "0.42400432", "0.4236719", "0.4235463", "0.42344528", "0.4231535", "0.42295393", "0.4222178", "0.42195114", "0.42156252", "0.4211558", "0.41993168", "0.41916612", "0.41916612", "0.41901663", "0.41872108", "0.4186205", "0.41853812", "0.4181516", "0.4173527", "0.41717163", "0.4167272", "0.41641504", "0.4161432", "0.4157321", "0.41409093", "0.4128785", "0.4128306", "0.41240606", "0.4122259", "0.41148555", "0.4111821", "0.41053444", "0.41024446", "0.40982208", "0.40969583", "0.40941527", "0.4092131", "0.40866467", "0.40848666", "0.40831268", "0.40789202", "0.4078351", "0.40714064", "0.40670848", "0.40617874", "0.40593994", "0.40561086", "0.4052958", "0.40522093" ]
0.6989835
0
RunTests executes the scorecard tests as configured
func (o Scorecard) RunTests(ctx context.Context) (testOutput v1alpha3.Test, err error) { err = o.TestRunner.Initialize(ctx) if err != nil { return testOutput, err } tests := o.selectTests() if len(tests) == 0 { return testOutput, nil } for _, test := range tests { result, err := o.TestRunner.RunTest(ctx, test) if err != nil { result = convertErrorToStatus(test.Name, err) } testOutput.Status.Results = append(testOutput.Status.Results, result.Results...) } if !o.SkipCleanup { err = o.TestRunner.Cleanup(ctx) if err != nil { return testOutput, err } } return testOutput, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (o Scorecard) RunTests() (testOutput v1alpha2.ScorecardOutput, err error) {\n\ttests := selectTests(o.Selector, o.Config.Tests)\n\tif len(tests) == 0 {\n\t\tfmt.Println(\"no tests selected\")\n\t\treturn testOutput, err\n\t}\n\n\tbundleData, err := getBundleData(o.BundlePath)\n\tif err != nil {\n\t\treturn testOutput, fmt.Errorf(\"error getting bundle data %w\", err)\n\t}\n\n\t// create a ConfigMap holding the bundle contents\n\to.bundleConfigMap, err = createConfigMap(o, bundleData)\n\tif err != nil {\n\t\treturn testOutput, fmt.Errorf(\"error creating ConfigMap %w\", err)\n\t}\n\n\tfor i, test := range tests {\n\t\tvar err error\n\t\ttests[i].TestPod, err = o.runTest(test)\n\t\tif err != nil {\n\t\t\treturn testOutput, fmt.Errorf(\"test %s failed %w\", test.Name, err)\n\t\t}\n\t}\n\n\tif !o.SkipCleanup {\n\t\tdefer deletePods(o.Client, tests)\n\t\tdefer deleteConfigMap(o.Client, o.bundleConfigMap)\n\t}\n\n\terr = o.waitForTestsToComplete(tests)\n\tif err != nil {\n\t\treturn testOutput, err\n\t}\n\n\ttestOutput = getTestResults(o.Client, tests)\n\n\treturn testOutput, err\n}", "func (o Scorecard) Run(ctx context.Context) (testOutput v1alpha3.TestList, err error) {\n\ttestOutput = v1alpha3.NewTestList()\n\n\tif err := o.TestRunner.Initialize(ctx); err != nil {\n\t\treturn testOutput, err\n\t}\n\n\tfor _, stage := range o.Config.Stages {\n\t\ttests := o.selectTests(stage)\n\t\tif len(tests) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\ttests = o.setTestDefaults(tests)\n\n\t\toutput := make(chan v1alpha3.Test, len(tests))\n\t\tif stage.Parallel {\n\t\t\to.runStageParallel(ctx, tests, output)\n\t\t} else {\n\t\t\to.runStageSequential(ctx, tests, output)\n\t\t}\n\t\tclose(output)\n\t\tfor o := range output {\n\t\t\ttestOutput.Items = append(testOutput.Items, o)\n\t\t}\n\t}\n\n\t// Get timeout error, if any, before calling Cleanup() so deletes don't cause a timeout.\n\tselect {\n\tcase <-ctx.Done():\n\t\terr = ctx.Err()\n\tdefault:\n\t}\n\n\tif !o.SkipCleanup {\n\t\t// Use a separate context for cleanup, which needs to run regardless of a prior timeout.\n\t\tclctx, cancel := context.WithTimeout(context.Background(), cleanupTimeout)\n\t\tdefer cancel()\n\t\tif err := o.TestRunner.Cleanup(clctx); err != nil {\n\t\t\treturn testOutput, err\n\t\t}\n\t}\n\n\treturn testOutput, err\n}", "func (ts *TestSuite) RunTests() {\n\n\tif len(ts.Tests) == 0 {\n\t\tout.Printf(\"No tests to run\\n\")\n\t\treturn\n\t}\n\n\tstartTime := time.Now()\n\n\t// setup search\n\ts := search.NewSearch()\n\tsl := search.NewSearchLimits()\n\tsl.MoveTime = ts.Time\n\tsl.Depth = ts.Depth\n\tif sl.MoveTime > 0 {\n\t\tsl.TimeControl = true\n\t}\n\n\tout.Printf(\"Running Test Suite\\n\")\n\tout.Printf(\"==================================================================\\n\")\n\tout.Printf(\"EPD File: %s\\n\", ts.FilePath)\n\tout.Printf(\"SearchTime: %d ms\\n\", ts.Time.Milliseconds())\n\tout.Printf(\"MaxDepth: %d\\n\", ts.Depth)\n\tout.Printf(\"Date: %s\\n\", time.Now().Local())\n\tout.Printf(\"No of tests: %d\\n\", len(ts.Tests))\n\tout.Println()\n\n\t// execute all tests and store results in the\n\t// test instance\n\tfor i, t := range ts.Tests {\n\t\tout.Printf(\"Test %d of %d\\nTest: %s -- Target Result %s\\n\", i+1, len(ts.Tests), t.line, t.targetMoves.StringUci())\n\t\tstartTime2 := time.Now()\n\t\trunSingleTest(s, sl, t)\n\t\telapsedTime := time.Since(startTime2)\n\t\tt.nodes = s.NodesVisited()\n\t\tt.time = s.LastSearchResult().SearchTime\n\t\tt.nps = util.Nps(s.NodesVisited(), s.LastSearchResult().SearchTime)\n\t\tout.Printf(\"Test finished in %d ms with result %s (%s) - nps: %d\\n\\n\",\n\t\t\telapsedTime.Milliseconds(), t.rType.String(), t.actual.StringUci(), t.nps)\n\t}\n\n\t// sum up result for report\n\ttr := &SuiteResult{}\n\tfor _, t := range ts.Tests {\n\t\ttr.Counter++\n\t\tswitch t.rType {\n\t\tcase NotTested:\n\t\t\ttr.NotTestedCounter++\n\t\tcase Skipped:\n\t\t\ttr.SkippedCounter++\n\t\tcase Failed:\n\t\t\ttr.FailedCounter++\n\t\tcase Success:\n\t\t\ttr.SuccessCounter++\n\t\t}\n\t\ttr.Nodes += t.nodes\n\t\ttr.Time += t.time\n\t}\n\tts.LastResult = tr\n\n\telapsed := time.Since(startTime)\n\n\t// print report\n\tout.Printf(\"Results for Test Suite\\n\", ts.FilePath)\n\tout.Printf(\"------------------------------------------------------------------------------------------------------------------------------------\\n\")\n\tout.Printf(\"EPD File: %s\\n\", ts.FilePath)\n\tout.Printf(\"SearchTime: %d ms\\n\", ts.Time.Milliseconds())\n\tout.Printf(\"MaxDepth: %d\\n\", ts.Depth)\n\tout.Printf(\"Date: %s\\n\", time.Now().Local())\n\tout.Printf(\"====================================================================================================================================\\n\")\n\tout.Printf(\" %-4s | %-10s | %-8s | %-8s | %-15s | %s | %s\\n\", \" Nr.\", \"Result\", \"Move\", \"Value\", \"Expected Result\", \"Fen\", \"Id\")\n\tout.Printf(\"====================================================================================================================================\\n\")\n\tfor i, t := range ts.Tests {\n\t\tif t.tType == DM {\n\t\t\tout.Printf(\" %-4d | %-10s | %-8s | %-8s | %s%-15d | %s | %s\\n\",\n\t\t\t\ti+1, t.rType.String(), t.actual.StringUci(), t.value.String(), \"dm \", t.mateDepth, t.fen, t.id)\n\t\t} else {\n\t\t\tout.Printf(\" %-4d | %-10s | %-8s | %-8s | %s %-15s | %s | %s\\n\",\n\t\t\t\ti+1, t.rType.String(), t.actual.StringUci(), t.value.String(), t.tType.String(), t.targetMoves.StringUci(), t.fen, t.id)\n\t\t}\n\t}\n\tout.Printf(\"====================================================================================================================================\\n\")\n\tout.Printf(\"Summary:\\n\")\n\tout.Printf(\"EPD File: %s\\n\", ts.FilePath)\n\tout.Printf(\"SearchTime: %d ms\\n\", ts.Time.Milliseconds())\n\tout.Printf(\"MaxDepth: %d\\n\", ts.Depth)\n\tout.Printf(\"Date: %s\\n\", time.Now().Local())\n\tout.Printf(\"Successful: %-3d (%d %%)\\n\", tr.SuccessCounter, 100*tr.SuccessCounter/tr.Counter)\n\tout.Printf(\"Failed: %-3d (%d %%)\\n\", tr.FailedCounter, 100*tr.FailedCounter/tr.Counter)\n\tout.Printf(\"Skipped: %-3d (%d %%)\\n\", tr.SkippedCounter, 100*tr.SkippedCounter/tr.Counter)\n\tout.Printf(\"Not tested: %-3d (%d %%)\\n\", tr.NotTestedCounter, 100*tr.NotTestedCounter/tr.Counter)\n\tout.Printf(\"Test time: %s\\n\", elapsed)\n\tout.Printf(\"Configuration: %s\\n\", config.Settings.String())\n}", "func RunSubtests(ctx *Context) {\n\tfor name, fn := range tests {\n\t\tctx.Run(name, fn)\n\t}\n}", "func RunTests(opts Options) {\n\tif opts.Cleanup {\n\t\terr := CleanupTests(opts.Driver, opts.DSN, opts.Verbose)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Cleanup failed: %s\\n\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tos.Exit(0)\n\t}\n\t_ = flag.Set(\"test.run\", opts.Match)\n\tif opts.Verbose {\n\t\t_ = flag.Set(\"test.v\", \"true\")\n\t}\n\ttests := []testing.InternalTest{\n\t\t{\n\t\t\tName: \"MainTest\",\n\t\t\tF: func(t *testing.T) {\n\t\t\t\tTest(t, opts.Driver, opts.DSN, opts.Suites, opts.RW)\n\t\t\t},\n\t\t},\n\t}\n\n\tmainStart(tests)\n}", "func (runner TestSuiteRunner) RunTests(testNamesToRun map[string]bool, testParallelism uint) (allTestsPassed bool, executionErr error) {\n\tallTests := runner.testSuite.GetTests()\n\n\t// If the user doesn't specify any test names to run, run all of them\n\tif len(testNamesToRun) == 0 {\n\t\ttestNamesToRun = map[string]bool{}\n\t\tfor testName, _ := range allTests {\n\t\t\ttestNamesToRun[testName] = true\n\t\t}\n\t}\n\n\t// Validate all the requested tests exist\n\ttestsToRun := make(map[string]testsuite.Test)\n\tfor testName, _ := range testNamesToRun {\n\t\ttest, found := allTests[testName]\n\t\tif !found {\n\t\t\treturn false, stacktrace.NewError(\"No test registered with name '%v'\", testName)\n\t\t}\n\t\ttestsToRun[testName] = test\n\t}\n\n\texecutionInstanceId := uuid.Generate()\n\ttestParams, err := buildTestParams(executionInstanceId, testsToRun, runner.networkWidthBits)\n\tif err != nil {\n\t\treturn false, stacktrace.Propagate(err, \"An error occurred building the test params map\")\n\t}\n\n\t// Initialize a Docker client\n\tdockerClient, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())\n\tif err != nil {\n\t\treturn false, stacktrace.Propagate(err,\"Failed to initialize Docker client from environment.\")\n\t}\n\n\ttestExecutor := parallelism.NewTestExecutorParallelizer(\n\t\texecutionInstanceId,\n\t\tdockerClient,\n\t\trunner.testControllerImageName,\n\t\trunner.testControllerLogLevel,\n\t\trunner.customTestControllerEnvVars,\n\t\ttestParallelism)\n\n\tlogrus.Infof(\"Running %v tests with execution ID %v...\", len(testsToRun), executionInstanceId.String())\n\tallTestsPassed = testExecutor.RunInParallelAndPrintResults(testParams)\n\treturn allTestsPassed, nil\n}", "func main() {\n\ttest_plain_background()\n\ttest_cloud()\n\ttest_enemy()\n\ttest_move_background()\n\ttest_display_score()\n}", "func (t *TestRuntime) RunTests(m *testing.M) int {\n\treturn t.runTests(m, !testing.Verbose())\n}", "func executeTests(k8s *Kubernetes, testList []*TestCase) []*TestCase {\n\terr := bootstrap(k8s)\n\tfailOnError(err)\n\n\t//make a copy and append the tests with CIDRs\n\tcidrTests := []*TestCase{\n\t\t{\"IngressOverlapCIDRBlocks\", testIngressOverlapCIDRBlocks()},\n\t}\n\tmodifiedTestList := append(testList, cidrTests...)\n\n\tfor _, testCase := range modifiedTestList {\n\t\tlog.Infof(\"running test case %s\", testCase.Name)\n\t\tlog.Debugf(\"cleaning-up previous policies and sleeping for %v\", networkPolicyDelay)\n\t\terr = k8s.CleanNetworkPolicies(namespaces)\n\t\ttime.Sleep(networkPolicyDelay)\n\t\tfailOnError(err)\n\t\tfor _, step := range testCase.Steps {\n\t\t\tlog.Infof(\"running step %s of test case %s\", step.Name, testCase.Name)\n\t\t\treachability := step.Reachability\n\t\t\tpolicy := step.NetworkPolicy\n\t\t\tif policy != nil {\n\t\t\t\tlog.Debugf(\"creating policy and sleeping for %v\", networkPolicyDelay)\n\t\t\t\t_, err := k8s.CreateOrUpdateNetworkPolicy(policy.Namespace, policy)\n\t\t\t\tfailOnError(err)\n\t\t\t\ttime.Sleep(networkPolicyDelay)\n\t\t\t}\n\t\t\tstart := time.Now()\n\t\t\tvalidate(k8s, reachability, step.Port)\n\t\t\tstep.Duration = time.Now().Sub(start)\n\t\t\treachability.PrintSummary(true, true, true)\n\t\t}\n\t}\n\treturn modifiedTestList\n}", "func (ts ApisValidatableTestSuite) Run(t *testing.T) {\n\tt.Parallel()\n\tfor k, v := range ts {\n\t\tt.Run(k, v.Run)\n\t}\n}", "func executeTests(t *testing.T, tests ...testExecution) {\n\tctx := setupTestRequirements(t)\n\tdefer ctx.Cleanup()\n\n\tsetupComplianceOperatorCluster(t, ctx)\n\n\t// get global framework variables\n\tf := framework.Global\n\n\tns, err := ctx.GetNamespace()\n\tif err != nil {\n\t\tt.Fatalf(\"could not get namespace: %v\", err)\n\t}\n\n\tfor _, test := range tests {\n\t\tt.Run(test.Name, func(t *testing.T) {\n\t\t\tif err := test.TestFn(t, f, ctx, ns); err != nil {\n\t\t\t\tt.Error(err)\n\t\t\t}\n\t\t})\n\n\t}\n}", "func (suite FeatureTestSuite) Run(t *testing.T, buildFunc feature.BuildFunc) {\n\tfor _, test := range suite {\n\t\trunTest(t, test, buildFunc)\n\t}\n}", "func RunTests(t *testing.T, tests map[string]SubTest) {\n\tfor name, test := range tests {\n\t\tdomainKeeper, ctx, mocks := NewTestKeeper(t, true)\n\t\t// set default mock.Supply not to fail\n\t\tmocks.Supply.SetSendCoinsFromAccountToModule(func(ctx types.Context, addr types.AccAddress, moduleName string, coins types.Coins) error {\n\t\t\treturn nil\n\t\t})\n\t\t// set default fees\n\t\tsetFees := domainKeeper.ConfigurationKeeper.(ConfigurationSetter).SetFees\n\t\tfees := configuration.NewFees()\n\t\tfees.SetDefaults(\"testcoin\")\n\t\tsetFees(ctx, fees)\n\t\t// run sub SubTest\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\t// run before SubTest\n\t\t\tif test.BeforeTest != nil {\n\t\t\t\tif test.BeforeTestBlockTime != 0 {\n\t\t\t\t\tt := time.Unix(test.BeforeTestBlockTime, 0)\n\t\t\t\t\tctx = ctx.WithBlockTime(t)\n\t\t\t\t}\n\t\t\t\ttest.BeforeTest(t, domainKeeper, ctx, mocks)\n\t\t\t}\n\n\t\t\tif test.TestBlockTime != 0 {\n\t\t\t\tt := time.Unix(test.TestBlockTime, 0)\n\t\t\t\tctx = ctx.WithBlockTime(t)\n\t\t\t}\n\t\t\t// run actual SubTest\n\t\t\ttest.Test(t, domainKeeper, ctx, mocks)\n\n\t\t\t// run after SubTest\n\t\t\tif test.AfterTest != nil {\n\t\t\t\tif test.AfterTestBlockTime != 0 {\n\t\t\t\t\tt := time.Unix(test.AfterTestBlockTime, 0)\n\t\t\t\t\tctx = ctx.WithBlockTime(t)\n\t\t\t\t}\n\t\t\t\ttest.AfterTest(t, domainKeeper, ctx, mocks)\n\t\t\t}\n\t\t})\n\t}\n}", "func RunBuiltinTests(t *testing.T, resourceType string) {\n\t// Get a list of all test cases\n\tbox := packr.NewBox(\"./assets/\" + resourceType)\n\tfilesInBox := box.List()\n\tfor _, file := range filesInBox {\n\t\tif isTestCase(file) {\n\t\t\tabsolutePath, _ := filepath.Abs(\"./assets/\" + resourceType + \"/\" + file)\n\t\t\tts, err := loadTestSuite(absolutePath)\n\t\t\tif err != nil {\n\t\t\t\tassert.Nil(t, err, \"Cannot load test case\")\n\t\t\t}\n\t\t\trunTestSuite(t, ts)\n\t\t}\n\t}\n}", "func runTestSuite(t *testing.T, ts TestSuite) {\n\t// Load only the rule for this test suite\n\truleConfigPath := strings.Split(ts.RootPath, \"config-lint/cli/assets/\")[1] + \"/rule.yml\"\n\truleSet, err := loadBuiltInRuleSet(ruleConfigPath)\n\tif err != nil {\n\t\tassert.Nil(t, err, \"Cannot load built-in Terraform rule\")\n\t}\n\n\tfor _, tc := range ts.Tests {\n\t\toptions := linter.Options{\n\t\t\tRuleIDs: []string{tc.RuleId},\n\t\t}\n\t\tvs := assertion.StandardValueSource{}\n\n\t\t// validate the rule set\n\t\tif contains(tc.Tags, \"terraform11\") {\n\t\t\t// Load the test resources for this test suite\n\t\t\ttestResourceDirectory := strings.Split(ts.RootPath, \"config-lint/cli/\")[1] + \"/tests/terraform11/\"\n\t\t\ttestResources, err := getTestResources(testResourceDirectory)\n\t\t\tif err != nil {\n\t\t\t\tassert.Nil(t, err, \"Cannot load built-in Terraform 11 test resources\")\n\n\t\t\t}\n\t\t\t// Defining 'tf11' for the Parser type\n\t\t\tl, err := linter.NewLinter(ruleSet, vs, testResources, \"tf11\")\n\n\t\t\treport, err := l.Validate(ruleSet, options)\n\t\t\tassert.Nil(t, err, \"Validate failed for file\")\n\n\t\t\twarningViolationsReported := getViolationsString(\"WARNING\", report.Violations)\n\t\t\twarningMessage := fmt.Sprintf(\"Expecting %d warnings for rule %s:\\n %s\", tc.Warnings, tc.RuleId, warningViolationsReported)\n\t\t\tassert.Equal(t, tc.Warnings, numberOfWarnings(report.Violations), warningMessage)\n\n\t\t\tfailureViolationsReported := getViolationsString(\"FAILURE\", report.Violations)\n\t\t\tfailureMessage := fmt.Sprintf(\"Expecting %d failures for rule %s:\\n %s\", tc.Failures, tc.RuleId, failureViolationsReported)\n\t\t\tassert.Equal(t, tc.Failures, numberOfFailures(report.Violations), failureMessage)\n\t\t}\n\n\t\tif contains(tc.Tags, \"terraform12\") {\n\t\t\t// Load the test resources for this test suite\n\t\t\ttestResourceDirectory := strings.Split(ts.RootPath, \"config-lint/cli/\")[1] + \"/tests/terraform12/\"\n\t\t\ttestResources, err := getTestResources(testResourceDirectory)\n\t\t\tif err != nil {\n\t\t\t\tassert.Nil(t, err, \"Cannot load built-in Terraform 12 test resources\")\n\n\t\t\t}\n\t\t\t// Defining 'tf11' for the Parser type\n\t\t\tl, err := linter.NewLinter(ruleSet, vs, testResources, \"tf12\")\n\n\t\t\treport, err := l.Validate(ruleSet, options)\n\t\t\tassert.Nil(t, err, \"Validate failed for file\")\n\n\t\t\twarningViolationsReported := getViolationsString(\"WARNING\", report.Violations)\n\t\t\twarningMessage := fmt.Sprintf(\"Expecting %d warnings for rule %s:\\n %s\", tc.Warnings, tc.RuleId, warningViolationsReported)\n\t\t\tassert.Equal(t, tc.Warnings, numberOfWarnings(report.Violations), warningMessage)\n\n\t\t\tfailureViolationsReported := getViolationsString(\"FAILURE\", report.Violations)\n\t\t\tfailureMessage := fmt.Sprintf(\"Expecting %d failures for rule %s:\\n %s\", tc.Failures, tc.RuleId, failureViolationsReported)\n\t\t\tassert.Equal(t, tc.Failures, numberOfFailures(report.Violations), failureMessage)\n\t\t}\n\t}\n}", "func (sfs *SuiteFS) RunTests(t *testing.T, userName string, stFuncs ...SuiteTestFunc) {\n\tvfs := sfs.vfsSetup\n\n\t_, _ = sfs.User(t, userName)\n\tdefer sfs.User(t, sfs.initUser.Name())\n\n\tfor _, stFunc := range stFuncs {\n\t\tfuncName := runtime.FuncForPC(reflect.ValueOf(stFunc).Pointer()).Name()\n\t\tfuncName = funcName[strings.LastIndex(funcName, \".\")+1 : strings.LastIndex(funcName, \"-\")]\n\t\ttestDir := vfs.Join(sfs.rootDir, funcName)\n\n\t\tsfs.CreateTestDir(t, testDir)\n\n\t\tt.Run(funcName, func(t *testing.T) {\n\t\t\tstFunc(t, testDir)\n\t\t})\n\n\t\tsfs.RemoveTestDir(t, testDir)\n\t}\n}", "func TestRun(t *testing.T) {\n\tsuite.Run(t, new(CategoryTestSuite))\n\tsuite.Run(t, new(ProductTestSuite))\n}", "func runTestScenarios(t *testing.T, tests []testCase) {\n\trunTestScenariosWithInput(t, tests, nil)\n}", "func TestRun(t *testing.T) {\n\tRun()\n}", "func TestRunTestAllReal(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"Skipping test in short mode.\")\n\t}\n\n\ttaskData := agent.TaskData{\n\t\tStringValues: map[string]string{\n\t\t\tCFG_TEST_TYPE: CFG_TYPE_ALL,\n\t\t\tCFG_SERVER_HOST: \"speedtest.nyc.rr.com:8080\",\n\t\t\tCFG_SERVER_ID: \"16976\",\n\t\t},\n\t\tIntValues: map[string]int{\n\t\t\tCFG_SERVER_ID: 16976,\n\t\t\tCFG_TIME_OUT: 5,\n\t\t},\n\t\tFloatValues: map[string]float64{CFG_MAX_SECONDS: 6},\n\t\tIntSlices: map[string][]int{\n\t\t\tCFG_DOWNLOAD_SIZES: {245388, 505544},\n\t\t\tCFG_UPLOAD_SIZES: {32768, 65536},\n\t\t},\n\t}\n\n\tspdTestRunner := SpeedTestRunner{}\n\n\tspTestResults, err := spdTestRunner.Run(taskData)\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected Error: \\n%s\", err.Error())\n\t}\n\n\tresults := spTestResults.Latency.Seconds()\n\n\tif results <= 0 {\n\t\tt.Errorf(\"Error: Expected a positive Latency result, but got: %f\", results)\n\t} else {\n\t\tfmt.Printf(\"\\nLatency test results for server %d ... %f\\n\", taskData.IntValues[CFG_SERVER_ID], results)\n\t}\n\n\tresults = spTestResults.Download\n\tif results <= 0 {\n\t\tt.Errorf(\"Error: Expected a positive Download result, but got: %f\", results)\n\t} else {\n\t\tfmt.Printf(\"\\nDownload test results for server %d ... %f\\n\", taskData.IntValues[CFG_SERVER_ID], results)\n\t}\n\n\tresults = spTestResults.Upload\n\tif results <= 0 {\n\t\tt.Errorf(\"Error: Expected a positive Upload result, but got: %f\", results)\n\t} else {\n\t\tfmt.Printf(\"\\nUpload test results for server %d ... %f\\n\", taskData.IntValues[CFG_SERVER_ID], results)\n\t}\n}", "func RunTestCases(t *testing.T, testcases *[]TestCase, ctx *TestContext) {\n\tcontext := ctx\n\tif context == nil {\n\t\tcontext = &TestContext{\n\t\t\tNameIDMap: make(map[string]string),\n\t\t\tNameObjectMap: make(map[string]interface{}),\n\t\t}\n\t}\n\n\tfor _, tc := range *testcases {\n\t\tTestLog = t\n\n\t\tif true { //tc.Enabled){\n\n\t\t\tt.Logf(\"======Begin to run test case: %s \\n\", tc.Name)\n\n\t\t\t//If test case name is like sleep_500, then it would sleep 500ms\n\t\t\tmethod := strings.ToLower(tc.Method)\n\t\t\tif strings.Contains(method, \"sleep\") {\n\t\t\t\tsleep, _ := strconv.Atoi(method[6:])\n\t\t\t\tt.Logf(\"Sleep %d mill-secondes\", sleep)\n\t\t\t\ttime.Sleep(time.Duration(sleep) * time.Millisecond)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tvar runner TestExecuter\n\t\t\tif tc.Executer != nil {\n\t\t\t\trunner = tc.Executer()\n\t\t\t} else {\n\t\t\t\tt.Fatalf(\"No Executer specified in test cases\")\n\t\t\t}\n\t\t\tt.Run(tc.Name, testcaseFunc(runner, context, &tc))\n\n\t\t}\n\t}\n}", "func (test Test) Run(t *testing.T) {\n\tt.Logf(\"Starting test %v\", t.Name())\n\tt.Helper()\n\t// Double negative cannot be helped, this is intended to mitigate test failures where a global\n\t// resource is manipulated, e.g.: the default AWS security group.\n\tif !test.RunOptions.NoParallel {\n\t\tt.Parallel()\n\t}\n\tt.Run(\"Python\", func(t *testing.T) {\n\t\trunOpts := integration.ProgramTestOptions{}\n\t\tif test.RunOptions != nil {\n\t\t\trunOpts = *test.RunOptions\n\t\t}\n\t\tconvertOpts := test.Options\n\t\tif test.Python != nil {\n\t\t\tconvertOpts = convertOpts.With(*test.Python)\n\t\t}\n\n\t\ttargetTest := targetTest{\n\t\t\trunOpts: &runOpts,\n\t\t\tconvertOpts: &convertOpts,\n\t\t\tprojectName: test.ProjectName,\n\t\t\tlanguage: \"python\",\n\t\t\truntime: \"python\",\n\t\t}\n\t\ttargetTest.Run(t)\n\t})\n\tt.Run(\"TypeScript\", func(t *testing.T) {\n\t\trunOpts := integration.ProgramTestOptions{}\n\t\tif test.RunOptions != nil {\n\t\t\trunOpts = *test.RunOptions\n\t\t}\n\t\tconvertOpts := test.Options\n\t\tif test.TypeScript != nil {\n\t\t\tconvertOpts = convertOpts.With(*test.TypeScript)\n\t\t}\n\n\t\ttargetTest := targetTest{\n\t\t\trunOpts: &runOpts,\n\t\t\tconvertOpts: &convertOpts,\n\t\t\tprojectName: test.ProjectName,\n\t\t\tlanguage: \"typescript\",\n\t\t\truntime: \"nodejs\",\n\t\t}\n\t\ttargetTest.Run(t)\n\t})\n}", "func (s *Suite) RunRulesTest() {\n\ts.t.Helper()\n\n\tif s.RuleFile == \"\" {\n\t\ts.t.Error(\"testing with rules started with an empty rule\")\n\t\treturn\n\t}\n\n\trparser := rules.NewParser()\n\trset, err := rparser.Parse(\"<test>\", strings.NewReader(s.RuleFile))\n\tif err != nil {\n\t\ts.t.Fatalf(\"parse rules: %v\", err)\n\t}\n\n\ts.Config().Rules = rset\n\ts.IgnoreUndeclaredChecks()\n\n\truleNamesSet := make(map[string]struct{}, len(rset.Names))\n\tfor _, name := range rset.Names {\n\t\truleNamesSet[name] = struct{}{}\n\t}\n\n\tvar filtered []*linter.Report\n\tresult := s.RunLinter()\n\tfor _, r := range result.Reports {\n\t\tif _, ok := ruleNamesSet[r.CheckName]; ok {\n\t\t\tfiltered = append(filtered, r)\n\t\t}\n\t}\n\n\ts.Match(filtered)\n}", "func (runner *suiteRunner) run() *Result {\n if runner.tracker.result.RunError == nil && len(runner.tests) > 0 {\n runner.tracker.start()\n if runner.checkFixtureArgs() {\n if runner.runFixture(runner.setUpSuite) {\n for i := 0; i != len(runner.tests); i++ {\n c := runner.runTest(runner.tests[i])\n if c.status == fixturePanickedSt {\n runner.missTests(runner.tests[i+1:])\n break\n }\n }\n } else {\n runner.missTests(runner.tests)\n }\n runner.runFixture(runner.tearDownSuite)\n } else {\n runner.missTests(runner.tests)\n }\n runner.tracker.waitAndStop()\n runner.tempDir.removeAll()\n }\n return &runner.tracker.result\n}", "func (c *actionTests) actionRun(t *testing.T) {\n\te2e.EnsureImage(t, c.env)\n\n\ttests := []struct {\n\t\tname string\n\t\targv []string\n\t\texit int\n\t}{\n\t\t{\n\t\t\tname: \"NoCommand\",\n\t\t\targv: []string{c.env.ImagePath},\n\t\t\texit: 0,\n\t\t},\n\t\t{\n\t\t\tname: \"True\",\n\t\t\targv: []string{c.env.ImagePath, \"true\"},\n\t\t\texit: 0,\n\t\t},\n\t\t{\n\t\t\tname: \"False\",\n\t\t\targv: []string{c.env.ImagePath, \"false\"},\n\t\t\texit: 1,\n\t\t},\n\t\t{\n\t\t\tname: \"ScifTestAppGood\",\n\t\t\targv: []string{\"--app\", \"testapp\", c.env.ImagePath},\n\t\t\texit: 0,\n\t\t},\n\t\t{\n\t\t\tname: \"ScifTestAppBad\",\n\t\t\targv: []string{\"--app\", \"fakeapp\", c.env.ImagePath},\n\t\t\texit: 1,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tc.env.RunSingularity(\n\t\t\tt,\n\t\t\te2e.AsSubtest(tt.name),\n\t\t\te2e.WithProfile(e2e.UserProfile),\n\t\t\te2e.WithCommand(\"run\"),\n\t\t\te2e.WithArgs(tt.argv...),\n\t\t\te2e.ExpectExit(tt.exit),\n\t\t)\n\t}\n}", "func runTests(inv *logstash.Invocation, tests []testcase.TestCaseSet, diffCommand []string, keptEnvVars []string) error {\n\tok := true\n\tfor _, t := range tests {\n\t\tfmt.Printf(\"Running tests in %s...\\n\", filepath.Base(t.File))\n\t\tp, err := logstash.NewProcess(inv, t.Codec, t.InputFields, keptEnvVars)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer p.Release()\n\t\tif err = p.Start(); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfor _, line := range t.InputLines {\n\t\t\t_, err = p.Input.Write([]byte(line + \"\\n\"))\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tif err = p.Input.Close(); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tresult, err := p.Wait()\n\t\tif err != nil || *logstashOutput {\n\t\t\tmessage := getLogstashOutputMessage(result.Output, result.Log)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"Error running Logstash: %s.%s\", err, message)\n\t\t\t}\n\t\t\tuserError(\"%s\", message)\n\t\t}\n\t\tif err = t.Compare(result.Events, false, diffCommand); err != nil {\n\t\t\tuserError(\"Testcase failed, continuing with the rest: %s\", err)\n\t\t\tok = false\n\t\t}\n\t}\n\tif !ok {\n\t\treturn errors.New(\"one or more testcases failed\")\n\t}\n\treturn nil\n}", "func (b *HRPBoomer) Run(testcases ...ITestCase) {\n\tevent := sdk.EventTracking{\n\t\tCategory: \"RunLoadTests\",\n\t\tAction: \"hrp boom\",\n\t}\n\t// report start event\n\tgo sdk.SendEvent(event)\n\t// report execution timing event\n\tdefer sdk.SendEvent(event.StartTiming(\"execution\"))\n\n\t// quit all plugins\n\tdefer func() {\n\t\tpluginMap.Range(func(key, value interface{}) bool {\n\t\t\tif plugin, ok := value.(funplugin.IPlugin); ok {\n\t\t\t\tplugin.Quit()\n\t\t\t}\n\t\t\treturn true\n\t\t})\n\t}()\n\n\ttaskSlice := b.ConvertTestCasesToBoomerTasks(testcases...)\n\n\tb.Boomer.Run(taskSlice...)\n}", "func runAllTestCases(t *testing.T, checker resultsChecker) {\n\tt.Helper()\n\tchecker.resetTestCasesRun()\n\terr := filepath.Walk(checker.rootDir(),\n\t\tfunc(path string, info os.FileInfo, err error) error {\n\t\t\trequire.NoError(t, err)\n\t\t\tif info.IsDir() && checker.isTestDir(path) {\n\t\t\t\trunDirectoryTestCase(t, path, checker)\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\trequire.NoError(t, err)\n\trequire.NotZero(t, len(checker.TestCasesRun()), \"No complete test cases found in %s\", checker.rootDir())\n}", "func Run(config *Settings) {\n\tfileNames, err := config.reader.ReadExecutables()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tvar resMutex = &sync.Mutex{}\n\tres := make([]*google.TestResult, 0)\n\n\tvar tasksWg sync.WaitGroup\n\ttasksWg.Add(len(fileNames))\n\tworkerQueueLimit := make(chan bool, config.workersCount) \n\n\tfor _, line := range fileNames {\n\t\tworkerQueueLimit <- true // try get access to work\n\t\tgo func(file string) {\n\t\t\tdefer tasksWg.Done()\n\t\t\tdefer func() { <-workerQueueLimit }() // decrease working queue\n\n\t\t\tconfig.formatter.ShowSuiteStart(file)\n\n\t\t\tgr, output, err := runGoogleTest(file, config.workingDir)\n\t\t\tif err != nil {\n\t\t\t\tconfig.formatter.ShowSuiteFailure(file, output, err)\n\t\t\t} else {\n\t\t\t\tresMutex.Lock()\n\t\t\t\tdefer resMutex.Unlock()\n\t\t\t\tres = append(res, gr)\n\t\t\t\tconfig.formatter.ShowTests(gr, output)\n\t\t\t}\n\t\t}(line)\n\t}\n\n\ttasksWg.Wait()\n\n\tconfig.formatter.ShowStatistics(res)\n}", "func (s MockInputsBoolsHelper) RunTests(t testRunner, testSet []bool, testFunc func(t *testing.T, index int, f bool)) {\n\tif test, ok := t.(helper); ok {\n\t\ttest.Helper()\n\t}\n\n\ttest := internal.GetTest(t)\n\tif test == nil {\n\t\tt.Error(internal.ErrCanNotRunIfNotBuiltinTesting)\n\t\treturn\n\t}\n\n\tfor i, v := range testSet {\n\t\ttest.Run(fmt.Sprint(v), func(t *testing.T) {\n\t\t\tt.Helper()\n\n\t\t\ttestFunc(t, i, v)\n\t\t})\n\t}\n}", "func (t *Test) Run(tc *TestSuite) error {\n\n\tmqutil.Logger.Print(\"\\n--- \" + t.Name)\n\tfmt.Printf(\"\\nRunning test case: %s\\n\", t.Name)\n\terr := t.ResolveParameters(tc)\n\tif err != nil {\n\t\tfmt.Printf(\"... Fail\\n... %s\\n\", err.Error())\n\t\treturn err\n\t}\n\n\treq := resty.R()\n\tif len(tc.ApiToken) > 0 {\n\t\treq.SetAuthToken(tc.ApiToken)\n\t} else if len(tc.Username) > 0 {\n\t\treq.SetBasicAuth(tc.Username, tc.Password)\n\t}\n\n\tpath := GetBaseURL(t.db.Swagger) + t.SetRequestParameters(req)\n\tvar resp *resty.Response\n\n\tt.startTime = time.Now()\n\tswitch t.Method {\n\tcase mqswag.MethodGet:\n\t\tresp, err = req.Get(path)\n\tcase mqswag.MethodPost:\n\t\tresp, err = req.Post(path)\n\tcase mqswag.MethodPut:\n\t\tresp, err = req.Put(path)\n\tcase mqswag.MethodDelete:\n\t\tresp, err = req.Delete(path)\n\tcase mqswag.MethodPatch:\n\t\tresp, err = req.Patch(path)\n\tcase mqswag.MethodHead:\n\t\tresp, err = req.Head(path)\n\tcase mqswag.MethodOptions:\n\t\tresp, err = req.Options(path)\n\tdefault:\n\t\treturn mqutil.NewError(mqutil.ErrInvalid, fmt.Sprintf(\"Unknown method in test %s: %v\", t.Name, t.Method))\n\t}\n\tt.stopTime = time.Now()\n\tfmt.Printf(\"... call completed: %f seconds\\n\", t.stopTime.Sub(t.startTime).Seconds())\n\n\tif err != nil {\n\t\tt.err = mqutil.NewError(mqutil.ErrHttp, err.Error())\n\t} else {\n\t\tmqutil.Logger.Print(resp.Status())\n\t\tmqutil.Logger.Println(string(resp.Body()))\n\t}\n\terr = t.ProcessResult(resp)\n\treturn err\n}", "func RunTests(m *testing.M, version *int) {\n\tflag.IntVar(version, \"v\", 0, \"The anwork version that should be used with these tests\")\n\tflag.Parse()\n\n\tif *version == 0 {\n\t\tpanic(\"Version (-v) must be passed with a legitimate anwork version number\")\n\t}\n\n\tos.Exit(m.Run())\n}", "func RunTests(matchString func(pat, str string) (bool, error), tests []InternalTest) (ok bool) {}", "func (cs *Suite) Run(ctx context.Context, metricsClient metrics.Client, imageData ImageData) []CheckResult {\n\tresults := make([]CheckResult, 0, len(cs.checks))\n\tresultsChan := make(chan CheckResult, len(cs.checks))\n\tdefer close(resultsChan)\n\n\tfor name, check := range cs.checks {\n\t\tgo runner(ctx, name, check, imageData, resultsChan, metricsClient)\n\t}\n\n\tfor range cs.checks {\n\t\tresults = append(results, <-resultsChan)\n\t}\n\n\treturn results\n}", "func runTests(c *C, overrider configOverrider, tests ...func(dbt *DBTest)) {\n\tdb, err := sql.Open(\"mysql\", getDSN(overrider))\n\tc.Assert(err, IsNil, Commentf(\"Error connecting\"))\n\tdefer db.Close()\n\n\tdb.Exec(\"DROP TABLE IF EXISTS test\")\n\n\tdbt := &DBTest{c, db}\n\tfor _, test := range tests {\n\t\ttest(dbt)\n\t\tdbt.db.Exec(\"DROP TABLE IF EXISTS test\")\n\t}\n}", "func (t *TestBehaviour) RunAllTests() {\n\tt.RunGetICLATemplateWatermark()\n\tt.RunGetCCLATemplateWatermark()\n}", "func Run(test *TestData, result chan *TestResult) {\n\n\t// tests which don't require loading of reaction data output\n\tnonDataParseTests := []string{\"DIFF_FILE_CONTENT\", \"FILE_MATCH_PATTERN\",\n\t\t\"CHECK_TRIGGERS\", \"CHECK_EXPRESSIONS\", \"CHECK_LEGACY_VOL_OUTPUT\",\n\t\t\"CHECK_EMPTY_FILE\", \"CHECK_ASCII_VIZ_OUTPUT\", \"CHECK_CHECKPOINT\"}\n\n\tfor _, c := range test.Checks {\n\n\t\tdataPaths, err := file.GetDataPaths(test.Path, c.DataFile, test.Run.Seed,\n\t\t\ttest.Run.NumSeeds)\n\t\tif err != nil {\n\t\t\tresult <- &TestResult{test.Path, false, c.TestType, fmt.Sprint(err)}\n\t\t\tcontinue\n\t\t}\n\n\t\t// load the data for test types which need it\n\t\tvar data []*file.Columns\n\t\tvar stringData []*file.StringColumns\n\t\t// NOTE: only attempt to parse data for the test cases which need it\n\t\tif c.DataFile != \"\" && !misc.ContainsString(nonDataParseTests, c.TestType) {\n\t\t\tdata, err = file.LoadData(dataPaths, c.HaveHeader, c.AverageData)\n\t\t\tif err != nil {\n\t\t\t\tresult <- &TestResult{test.Path, false, c.TestType, fmt.Sprint(err)}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t} else if c.TestType == \"CHECK_TRIGGERS\" {\n\t\t\tstringData, err = file.LoadStringData(dataPaths, c.HaveHeader)\n\t\t\tif err != nil {\n\t\t\t\tresult <- &TestResult{test.Path, false, c.TestType, fmt.Sprint(err)}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\t// execute requested tests on data\n\t\tvar testErr error\n\t\tswitch c.TestType {\n\t\tcase \"CHECK_SUCCESS\":\n\t\t\tif test.SimStatus == nil {\n\t\t\t\tresult <- &TestResult{test.Path, false, \"CHECK_SUCCESS\",\n\t\t\t\t\t\"simulations did not run or return an exit status\"}\n\t\t\t\treturn // if simulation fails we won't continue testing\n\t\t\t}\n\n\t\t\t// in order to cut down on the amount of output (particularly in the case of\n\t\t\t// multiple seeds) we return failure if one or more of all runs within a test\n\t\t\t// fails and success otherwise\n\t\t\tfor _, testRun := range test.SimStatus {\n\t\t\t\tif !testRun.Success {\n\t\t\t\t\tmessage := strings.Join([]string{testRun.ExitMessage, testRun.StdErrContent}, \"\\n\")\n\t\t\t\t\tresult <- &TestResult{test.Path, false, \"CHECK_SUCCESS\", message}\n\t\t\t\t\treturn // if simulation fails we won't continue testing\n\t\t\t\t}\n\t\t\t}\n\n\t\tcase \"CHECK_EXIT_CODE\":\n\t\t\tfor _, testRun := range test.SimStatus {\n\t\t\t\tif c.ExitCode != testRun.ExitCode {\n\t\t\t\t\ttestErr = fmt.Errorf(\"Expected exit code %d but got %d instead\",\n\t\t\t\t\t\tc.ExitCode, testRun.ExitCode)\n\t\t\t\t}\n\t\t\t}\n\n\t\tcase \"CHECK_NONEMPTY_FILES\":\n\t\t\tif testErr = checkFilesEmpty(test, c, false); testErr != nil {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\tcase \"CHECK_EMPTY_FILES\":\n\t\t\tif testErr = checkFilesEmpty(test, c, true); testErr != nil {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\tcase \"CHECK_CHECKPOINT\":\n\t\t\tif testErr = checkCheckPoint(test.Path, c); testErr != nil {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\tcase \"CHECK_LEGACY_VOL_OUTPUT\":\n\t\t\tfor _, p := range dataPaths {\n\t\t\t\tif testErr = checkLegacyVolOutput(p, c); testErr != nil {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\tcase \"CHECK_ASCII_VIZ_OUTPUT\":\n\t\t\tfor _, p := range dataPaths {\n\t\t\t\tif testErr = checkASCIIVizOutput(p); testErr != nil {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\tcase \"DIFF_FILE_CONTENT\":\n\t\t\tfor _, p := range dataPaths {\n\t\t\t\tif testErr = diffFileContent(test.Path, p, c.TemplateFile,\n\t\t\t\t\tc.TemplateParameters); testErr != nil {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\tcase \"COUNT_CONSTRAINTS\":\n\t\t\tfor i, d := range data {\n\t\t\t\tif testErr = checkCountConstraints(d, dataPaths[i], c.MinTime, c.MaxTime,\n\t\t\t\t\tc.CountConstraints); testErr != nil {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\tcase \"COUNT_MINMAX\":\n\t\t\tfor i, d := range data {\n\t\t\t\tif testErr = checkCountMinmax(d, dataPaths[i], c.MinTime, c.MaxTime,\n\t\t\t\t\tc.CountMaximum, c.CountMinimum); testErr != nil {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\tcase \"FILE_MATCH_PATTERN\":\n\t\t\tfor _, dataPath := range dataPaths {\n\t\t\t\tif testErr = fileMatchPattern(dataPath, c.MatchPattern, c.NumMatches); testErr != nil {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\tcase \"CHECK_EXPRESSIONS\":\n\t\t\tfor _, dataPath := range dataPaths {\n\t\t\t\tif testErr = checkExpressions(dataPath); testErr != nil {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\tcase \"COMPARE_COUNTS\":\n\t\t\t// only one of absDeviation or relDeviation can be defined\n\t\t\tif (len(c.AbsDeviation) > 0) && (len(c.RelDeviation) > 0) {\n\t\t\t\ttestErr = fmt.Errorf(\"absDeviation and relDeviation are mutually exclusive\")\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\treferencePath := filepath.Join(test.Path, c.ReferenceFile)\n\t\t\trefData, err := file.ReadCounts(referencePath, c.HaveHeader)\n\t\t\tif err != nil {\n\t\t\t\ttestErr = err\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tfor i, d := range data {\n\t\t\t\tif testErr = compareCounts(d, refData, c.AbsDeviation, c.RelDeviation,\n\t\t\t\t\tdataPaths[i], c.MinTime, c.MaxTime); testErr != nil {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\tcase \"COUNT_EQUILIBRIUM\":\n\t\t\tfor i, d := range data {\n\t\t\t\tif testErr = checkCountEquilibrium(d, dataPaths[i], c.MinTime, c.MaxTime,\n\t\t\t\t\tc.Means, c.Tolerances); testErr != nil {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\tcase \"POSITIVE_COUNTS\":\n\t\t\tfor i, d := range data {\n\t\t\t\tif testErr = checkPositiveOrZeroCounts(d, dataPaths[i], c.MinTime,\n\t\t\t\t\tc.MaxTime, false); testErr != nil {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\tcase \"POSITIVE_OR_ZERO_COUNTS\":\n\t\t\tfor i, d := range data {\n\t\t\t\tif testErr = checkPositiveOrZeroCounts(d, dataPaths[i], c.MinTime,\n\t\t\t\t\tc.MaxTime, true); testErr != nil {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\tcase \"ZERO_COUNTS\":\n\t\t\tfor i, d := range data {\n\t\t\t\tif testErr = checkZeroCounts(d, dataPaths[i], c.MinTime,\n\t\t\t\t\tc.MaxTime); testErr != nil {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\tcase \"COUNT_RATES\":\n\t\t\tfor i, d := range data {\n\t\t\t\tif testErr = countRates(d, dataPaths[i], c.MinTime, c.MaxTime,\n\t\t\t\t\tc.BaseTime, c.Means, c.Tolerances); testErr != nil {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\tcase \"CHECK_TRIGGERS\":\n\t\t\tfor i, d := range stringData {\n\t\t\t\tif testErr = checkTriggers(d, dataPaths[i], c.MinTime, c.MaxTime,\n\t\t\t\t\tc.TriggerType, c.HaveExactTime, c.OutputTime, c.Xrange, c.Yrange,\n\t\t\t\t\tc.Zrange); testErr != nil {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\tdefault:\n\t\t\ttestErr = fmt.Errorf(\"Unknown test type: %s\", c.TestType)\n\t\t\tbreak\n\t\t}\n\t\trecordResult(result, c.TestType, test.Path, testErr)\n\t}\n}", "func (d *Driver) runTests(ctx context.Context, bundle string, tests []*protocol.ResolvedEntity, dutInfos map[string]*protocol.DUTInfo, client *reporting.RPCClient, remoteDevservers []string) ([]*resultsjson.Result, error) {\n\n\targs := &runTestsArgs{\n\t\tDUTInfo: dutInfos,\n\t\tCounter: failfast.NewCounter(d.cfg.MaxTestFailures()),\n\t\tClient: client,\n\t\tRemoteDevservers: remoteDevservers,\n\t\tSwarmingTaskID: d.cfg.SwarmingTaskID(),\n\t\tBuildBucketID: d.cfg.BuildBucketID(),\n\t}\n\n\tif !ShouldRunTestsRecursively() {\n\t\tlocalTests, remoteTests, err := splitTests(tests)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\t// Note: These methods can return non-nil results even on errors.\n\t\tlocalResults, err := d.runLocalTests(ctx, bundle, localTests, args)\n\t\tif err != nil {\n\t\t\treturn localResults, err\n\t\t}\n\t\tvar remoteTestNames []string\n\t\tfor _, t := range remoteTests {\n\t\t\tremoteTestNames = append(remoteTestNames, t.GetEntity().GetName())\n\t\t}\n\t\tremoteResults, err := d.runRemoteTests(ctx, bundle, remoteTestNames, args)\n\n\t\treturn append(localResults, remoteResults...), err\n\t}\n\tvar testNames []string\n\tfor _, t := range tests {\n\t\ttestNames = append(testNames, t.GetEntity().GetName())\n\t}\n\treturn d.runRemoteTests(ctx, bundle, testNames, args)\n}", "func RunTests(ctx context.Context, w io.Writer, path string) error {\n\tif path == \"\" || path == \".\" {\n\t\tvar err error\n\t\tpath, err = os.Getwd()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tfiles, err := getTestFiles(ctx, path)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor i := range files {\n\t\terr = runFile(ctx, &files[i])\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\terr = Report(w, files)\n\treturn err\n}", "func (e *CachedTestExecutor) Run(ctx context.Context, actions ...chromedp.Action) error {\n\targs := e.Called(ctx, actions)\n\treturn args.Error(0)\n}", "func (i *InspecRunner) RunAllTests(path string) (verifiers.TestSuite, error) {\n\tv := inspec.InspecVerifier{}\n\terr := v.Setup(path)\n\n\tif err != nil {\n\t\tfmt.Println(\"error during inspec verifier setup\")\n\t\treturn verifiers.TestSuite{}, err\n\t}\n\n\tresult, err := v.Check(path)\n\n\tif err != nil {\n\t\tfmt.Println(\"error during inspec test execution\")\n\t\treturn result, err\n\t}\n\treturn result, nil\n\n}", "func (f *VRFTest) Run() error {\n\tif err := f.createChainlinkJobs(); err != nil {\n\t\treturn err\n\t}\n\tvar ctx context.Context\n\tvar testCtxCancel context.CancelFunc\n\tif f.TestOptions.TestDuration.Seconds() > 0 {\n\t\tctx, testCtxCancel = context.WithTimeout(context.Background(), f.TestOptions.TestDuration)\n\t} else {\n\t\tctx, testCtxCancel = context.WithCancel(context.Background())\n\t}\n\tdefer testCtxCancel()\n\tcancelPerfEvents := f.watchPerfEvents()\n\tcurrentRound := 0\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\tlog.Info().Msg(\"Test finished\")\n\t\t\ttime.Sleep(f.TestOptions.GracefulStopDuration)\n\t\t\tcancelPerfEvents()\n\t\t\treturn nil\n\t\tdefault:\n\t\t\tlog.Info().Int(\"RoundID\", currentRound).Msg(\"New round\")\n\t\t\tif err := f.requestRandomness(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err := f.waitRoundFulfilled(currentRound + 1); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif f.TestOptions.NumberOfRounds != 0 && currentRound >= f.TestOptions.NumberOfRounds {\n\t\t\t\tlog.Info().Msg(\"Final round is reached\")\n\t\t\t\ttestCtxCancel()\n\t\t\t}\n\t\t\tcurrentRound++\n\t\t}\n\t}\n}", "func (tc ScannerTestcase) Run(ctx context.Context) func(*testing.T) {\n\tsort.Slice(tc.Want, pkgSort(tc.Want))\n\treturn func(t *testing.T) {\n\t\tctx := zlog.Test(ctx, t)\n\t\td := tc.Digest()\n\t\tn, err := fetch.Layer(ctx, t, http.DefaultClient, tc.Domain, tc.Name, d)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tdefer n.Close()\n\t\tl := &claircore.Layer{\n\t\t\tHash: d,\n\t\t}\n\t\tl.SetLocal(n.Name())\n\n\t\tgot, err := tc.Scanner.Scan(ctx, l)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tsort.Slice(got, pkgSort(got))\n\t\tt.Logf(\"found %d packages\", len(got))\n\t\tif !cmp.Equal(tc.Want, got) {\n\t\t\tt.Error(cmp.Diff(tc.Want, got))\n\t\t}\n\t}\n}", "func run(t *testing.T, formatter Formatter, suites ...TCatcher) {\n\tvar (\n\t\tbeforeAllFound, afterAllFound bool\n\t\tbeforeAll, afterAll, before, after reflect.Value\n\t\ttotalPassed, totalFailed, totalPending, totalNoAssertions int\n\t)\n\n\tflag.Parse()\n\n\tfor _, s := range suites {\n\t\tbeforeAll, afterAll, before, after = reflect.Value{}, reflect.Value{}, reflect.Value{}, reflect.Value{}\n\t\ts.SetT(t)\n\t\ts.Reset()\n\n\t\tiType := reflect.TypeOf(s)\n\n\t\tformatter.PrintSuiteName(strings.Split(iType.String(), \".\")[1])\n\n\t\t// search for Before and After methods\n\t\tfor i := 0; i < iType.NumMethod(); i++ {\n\t\t\tmethod := iType.Method(i)\n\t\t\tif ok, _ := regexp.MatchString(\"^BeforeAll\", method.Name); ok {\n\t\t\t\tif !beforeAllFound {\n\t\t\t\t\tbeforeAll = method.Func\n\t\t\t\t\tbeforeAllFound = true\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t\tif ok, _ := regexp.MatchString(\"^AfterAll\", method.Name); ok {\n\t\t\t\tif !afterAllFound {\n\t\t\t\t\tafterAll = method.Func\n\t\t\t\t\tafterAllFound = true\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t\tif ok, _ := regexp.MatchString(\"^Before\", method.Name); ok {\n\t\t\t\tbefore = method.Func\n\t\t\t}\n\t\t\tif ok, _ := regexp.MatchString(\"^After\", method.Name); ok {\n\t\t\t\tafter = method.Func\n\t\t\t}\n\t\t}\n\n\t\tif beforeAll.IsValid() {\n\t\t\tbeforeAll.Call([]reflect.Value{reflect.ValueOf(s)})\n\t\t}\n\n\t\tfor i := 0; i < iType.NumMethod(); i++ {\n\t\t\tmethod := iType.Method(i)\n\t\t\tif ok, _ := regexp.MatchString(*testToRun, method.Name); ok {\n\t\t\t\tif ok, _ := regexp.MatchString(formatter.AllowedMethodsPattern(), method.Name); ok {\n\n\t\t\t\t\ts.SetStatus(&Status{Code: STATUS_NO_ASSERTIONS})\n\n\t\t\t\t\tif before.IsValid() {\n\t\t\t\t\t\tbefore.Call([]reflect.Value{reflect.ValueOf(s)})\n\t\t\t\t\t}\n\n\t\t\t\t\tmethod.Func.Call([]reflect.Value{reflect.ValueOf(s)})\n\n\t\t\t\t\tif after.IsValid() {\n\t\t\t\t\t\tafter.Call([]reflect.Value{reflect.ValueOf(s)})\n\t\t\t\t\t}\n\n\t\t\t\t\tvar info *suiteInfo\n\t\t\t\t\tstatus := s.GetStatus()\n\n\t\t\t\t\tswitch status.Code {\n\t\t\t\t\tcase STATUS_PASS:\n\t\t\t\t\t\tinfo = s.GetInfo()\n\t\t\t\t\t\ttotalPassed++\n\t\t\t\t\tcase STATUS_FAIL:\n\t\t\t\t\t\tinfo = s.GetInfo()\n\t\t\t\t\t\tt.Error(status.ErrorMessage)\n\t\t\t\t\t\ttotalFailed++\n\t\t\t\t\tcase STATUS_PENDING:\n\t\t\t\t\t\tinfo = s.GetInfo()\n\t\t\t\t\t\tinfo.assertions = 0\n\t\t\t\t\t\ttotalPending++\n\t\t\t\t\tcase STATUS_NO_ASSERTIONS:\n\t\t\t\t\t\tinfo = &suiteInfo{0, method.Name}\n\t\t\t\t\t\ttotalNoAssertions++\n\t\t\t\t\t}\n\t\t\t\t\tformatter.PrintStatus(status, info)\n\t\t\t\t}\n\t\t\t}\n\n\t\t}\n\n\t\tif afterAll.IsValid() {\n\t\t\tafterAll.Call([]reflect.Value{reflect.ValueOf(s)})\n\t\t}\n\t}\n\n\tformatter.PrintFinalReport(totalPassed, totalFailed, totalPending, totalNoAssertions)\n}", "func Run(t *testing.T, s suite.TestingSuite) {\n\tsuite.Run(t, s)\n}", "func (s *Service) RunTest(ctx context.Context, req *conformance.Request) (*conformance.Response, error) {\n\tvar config test_gen.ServiceMesh\n\n\tconfig = linkerdConfig\n\tswitch req.Mesh.Type {\n\tcase smp.ServiceMesh_LINKERD:\n\t\tconfig = linkerdConfig\n\t\treq.Mesh.Annotations[\"linkerd.io/inject\"] = \"enabled\"\n\tcase smp.ServiceMesh_APP_MESH:\n\t\tconfig = linkerdConfig\n\t\treq.Mesh.Labels[\"appmesh.k8s.aws/sidecarInjectorWebhook\"] = \"enabled\"\n\tcase smp.ServiceMesh_MAESH:\n\t\tconfig = maeshConfig\n\tcase smp.ServiceMesh_ISTIO:\n\t\tconfig = istioConfig\n\t\treq.Mesh.Labels[\"istio-injection\"] = \"enabled\"\n\tcase smp.ServiceMesh_OPEN_SERVICE_MESH:\n\t\tconfig = osmConfig\n\t\treq.Mesh.Labels[\"openservicemesh.io/monitored-by\"] = \"osm\"\n\tcase smp.ServiceMesh_KUMA:\n\t\treq.Mesh.Annotations[\"kuma.io/sidecar-injection\"] = \"enabled\"\n\tcase smp.ServiceMesh_NGINX_SERVICE_MESH:\n\t\treq.Mesh.Annotations[\"njector.nsm.nginx.com/auto-inject\"] = \"true\"\n\n\t}\n\n\tresult := test_gen.RunTest(config, req.Mesh.Annotations, req.Mesh.Labels)\n\ttotalSteps := 24\n\ttotalFailures := 0\n\tstepsCount := map[string]int{\n\t\t\"traffic-access\": 7,\n\t\t\"traffic-split\": 11,\n\t\t\"traffic-spec\": 6,\n\t}\n\tspecVersion := map[string]string{\n\t\t\"traffic-access\": \"v0.6.0/v1alpha3\",\n\t\t\"traffic-split\": \"v0.6.0/v1alpha4\",\n\t\t\"traffic-spec\": \"v0.6.0/v1alpha4\",\n\t}\n\n\tdetails := make([]*conformance.Detail, 0)\n\tfor _, res := range result.Testsuite[0].Testcase {\n\t\td := &conformance.Detail{\n\t\t\tSmispec: res.Name,\n\t\t\tSpecversion: specVersion[res.Name],\n\t\t\tAssertion: strconv.Itoa(stepsCount[res.Name]),\n\t\t\tDuration: res.Time,\n\t\t\tCapability: conformance.Capability_FULL,\n\t\t\tStatus: conformance.ResultStatus_PASSED,\n\t\t\tResult: &conformance.Result{\n\t\t\t\tResult: &conformance.Result_Message{\n\t\t\t\t\tMessage: \"All test passed\",\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\tif len(res.Failure.Text) > 2 {\n\t\t\td.Result = &conformance.Result{\n\t\t\t\tResult: &conformance.Result_Error{\n\t\t\t\t\tError: &service.CommonError{\n\t\t\t\t\t\tCode: \"\",\n\t\t\t\t\t\tSeverity: \"\",\n\t\t\t\t\t\tShortDescription: res.Failure.Text,\n\t\t\t\t\t\tLongDescription: res.Failure.Message,\n\t\t\t\t\t\tProbableCause: \"\",\n\t\t\t\t\t\tSuggestedRemediation: \"\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\t\t\td.Status = conformance.ResultStatus_FAILED\n\t\t\td.Capability = conformance.Capability_NONE\n\n\t\t\t// A hacky way to see the testStep Failed, since KUDO only provides it in Failure.Message\n\t\t\tre := regexp.MustCompile(`[0-9]+`)\n\t\t\tif res.Failure.Message != \"\" {\n\t\t\t\tstepFailed := re.FindAllString(res.Failure.Message, 1)\n\t\t\t\tif len(stepFailed) != 0 {\n\t\t\t\t\tpassed, _ := strconv.Atoi(stepFailed[0])\n\t\t\t\t\tpassed = passed - 1\n\t\t\t\t\tfailures := stepsCount[res.Name] - passed\n\t\t\t\t\ttotalFailures += failures\n\t\t\t\t\tif (passed) >= (stepsCount[res.Name] / 2) {\n\t\t\t\t\t\td.Capability = conformance.Capability_HALF\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tdetails = append(details, d)\n\t}\n\n\treturn &conformance.Response{\n\t\tCasespassed: strconv.Itoa(totalSteps - totalFailures),\n\t\tPasspercent: strconv.FormatFloat(float64(totalSteps-totalFailures)/float64(totalSteps)*100, 'f', 2, 64),\n\t\tMesh: req.Mesh,\n\t\tDetails: details,\n\t}, nil\n}", "func (t TestCases) Run(fn func(string) (string, string), hideInput bool) {\n\tfor _, test := range t {\n\t\tpart1, part2 := fn(test.Input)\n\t\tpassedPart1 := part1 == test.ExpectedPart1 || test.ExpectedPart1 == \"\"\n\t\tpassedPart2 := part2 == test.ExpectedPart2 || test.ExpectedPart2 == \"\"\n\t\tpassed := passedPart1 && passedPart2\n\n\t\tif !passed && !hideInput {\n\t\t\tfmt.Println(\"Input \", test.Input)\n\t\t}\n\t\tif !passedPart1 {\n\t\t\tfmt.Println(\" - PART1: \", part1, \" but expected \", test.ExpectedPart1)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tif !passedPart2 {\n\t\t\tfmt.Println(\" - PART2: \", part2, \" but expected \", test.ExpectedPart2)\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n}", "func (suite *AddCommandTestSuite) TestExecuteWhenMultipleTracksFound() {\n\n}", "func (Tests) Run(ctx context.Context) error {\n\targ := BuildDockerComposeArgs(ProjectName, ProjectType, \"test\", DockerComposeTestFile)\n\targ = append(arg, \"run\")\n\targ = append(arg,\n\t\t\"--rm\",\n\t\t\"--use-aliases\",\n\t)\n\targ = append(arg, \"app\", \"go\", \"test\", \"-mod=vendor\", \"-v\", \"-cover\")\n\tif err := Exec(ComposeBin, append(arg, \"./service\")...); err != nil {\n\t\treturn err\n\t}\n\tif err := Exec(ComposeBin, append(arg, \"./...\")...); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (o Scorecard) runTest(test Test) (result *v1.Pod, err error) {\n\n\t// Create a Pod to run the test\n\tpodDef := getPodDefinition(test, o)\n\tresult, err = o.Client.CoreV1().Pods(o.Namespace).Create(context.TODO(), podDef, metav1.CreateOptions{})\n\treturn result, err\n}", "func runTests(t *testing.T, tests []test) {\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tresp := executeRequest(tt.method, tt.url, serialize(tt.req), tt.asAdmin)\n\t\t\tif resp.StatusCode != tt.want {\n\t\t\t\tt.Errorf(\"Unexpected status code %d\", resp.StatusCode)\n\t\t\t}\n\n\t\t\tif tt.body != \"\" {\n\t\t\t\tbodyBytes, err := ioutil.ReadAll(resp.Body)\n\t\t\t\tdefer resp.Body.Close()\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Errorf(\"Error loading body\")\n\t\t\t\t}\n\t\t\t\tif tt.body != string(bodyBytes) {\n\t\t\t\t\tt.Errorf(\"Unexpected body '%s', expected '%s'\", bodyBytes, tt.body)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}", "func (c *Command) Run(args []string) {\n\tflag.StringVar(&c.Filter, \"f\", \"\", \"regexp to filter tests by name\")\n\tflag.BoolVar(&c.Verbose, \"v\", false, \"print all test names\")\n\tcheck(flag.CommandLine.Parse(args))\n\targs = flag.Args()\n\n\tif len(args) == 0 {\n\t\targs = []string{\".\"}\n\t}\n\n\tokPath, err := util.OKPath()\n\tcheck(err)\n\n\tfor _, arg := range args {\n\t\tpackageName := util.PackageNameFromPath(okPath, arg)\n\t\tif arg == \".\" {\n\t\t\tpackageName = \".\"\n\t\t}\n\t\tanonFunctionName := 0\n\t\tf, _, errs := compiler.Compile(okPath, packageName, true,\n\t\t\t&anonFunctionName, false)\n\t\tutil.CheckErrorsWithExit(errs)\n\n\t\tm := vm.NewVM(\"no-package\")\n\t\tstartTime := time.Now()\n\t\tcheck(m.LoadFile(f))\n\t\terr := m.RunTests(c.Verbose, regexp.MustCompile(c.Filter), packageName)\n\t\telapsed := time.Since(startTime).Milliseconds()\n\t\tcheck(err)\n\n\t\tassertWord := pluralise(\"assert\", m.TotalAssertions)\n\t\tif m.TestsFailed > 0 {\n\t\t\tfmt.Printf(\"%s: %d failed %d passed %d %s (%d ms)\\n\",\n\t\t\t\tpackageName, m.TestsFailed, m.TestsPass,\n\t\t\t\tm.TotalAssertions, assertWord, elapsed)\n\t\t} else {\n\t\t\tfmt.Printf(\"%s: %d passed %d %s (%d ms)\\n\",\n\t\t\t\tpackageName, m.TestsPass,\n\t\t\t\tm.TotalAssertions, assertWord, elapsed)\n\t\t}\n\n\t\tif m.TestsFailed > 0 {\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n}", "func RulesUnitTest(queryOpts promql.LazyLoaderOpts, files ...string) int {\n\tfailed := false\n\n\tfor _, f := range files {\n\t\tif errs := ruleUnitTest(f, queryOpts); errs != nil {\n\t\t\tfmt.Fprintln(os.Stderr, \" FAILED:\")\n\t\t\tfor _, e := range errs {\n\t\t\t\tfmt.Fprintln(os.Stderr, e.Error())\n\t\t\t\tfmt.Println()\n\t\t\t}\n\t\t\tfailed = true\n\t\t} else {\n\t\t\tfmt.Println(\" SUCCESS\")\n\t\t}\n\t\tfmt.Println()\n\t}\n\tif failed {\n\t\treturn failureExitCode\n\t}\n\treturn successExitCode\n}", "func (d *Driver) RunTests(ctx context.Context,\n\ttests []*BundleEntity,\n\tdutInfos map[string]*protocol.DUTInfo,\n\tclient *reporting.RPCClient,\n\tremoteDevservers []string) ([]*resultsjson.Result, error) {\n\ttestsPerBundle := make(map[string][]*protocol.ResolvedEntity)\n\tfor _, t := range tests {\n\t\ttestsPerBundle[t.Bundle] = append(testsPerBundle[t.Bundle], t.Resolved)\n\t}\n\tbundles := make([]string, 0, len(testsPerBundle))\n\tfor b := range testsPerBundle {\n\t\tbundles = append(bundles, b)\n\t}\n\tsort.Strings(bundles)\n\tvar results []*resultsjson.Result\n\tfor _, bundle := range bundles {\n\t\tres, err := d.runTests(ctx, bundle, testsPerBundle[bundle], dutInfos, client, remoteDevservers)\n\t\tresults = append(results, res...)\n\t\tif err != nil {\n\t\t\treturn results, err\n\t\t}\n\t}\n\treturn results, nil\n}", "func RunTestScenarios(scenarioDir string, scenarioFileNames []string, t *originT.T) {\n\tnewT := testing.NewT(t)\n\n\t// Register default accounts configured into runtime key mapping\n\tRegisterDefaultAccountKeys()\n\n\tvar files []string\n\n\tscenarioDirectory := path.Join(FixtureTestOpts.BaseDirectory, scenarioDir)\n\terr := filepath.Walk(scenarioDirectory, func(path string, info os.FileInfo, err error) error {\n\t\tif filepath.Ext(path) != \".json\" {\n\t\t\treturn nil\n\t\t}\n\t\tscenarioName := strings.TrimSuffix(info.Name(), \".json\")\n\t\tt.Log(fmt.Sprintf(\"checking %s from %+v\", scenarioName, scenarioFileNames))\n\t\tif len(scenarioFileNames) != 0 && !inttest.Exists(scenarioFileNames, scenarioName) {\n\t\t\treturn nil\n\t\t}\n\t\tt.Log(\"added\", scenarioName)\n\t\tfiles = append(files, path)\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\tt.Fatal(\"error walking through scenario directory\", err)\n\t}\n\tfor _, file := range files {\n\t\tt.Log(\"Registering work queues for scenario path=\", file)\n\t\tRunRegisterWorkQueuesForSingleFixture(file, &newT)\n\t}\n\n\tfor _, file := range files {\n\t\tt.Log(\"Running scenario path=\", file)\n\t\tRunSingleFixtureTest(file, &newT)\n\t}\n}", "func (l *List) Run() ([]*chaostest.ChaosTest, error) {\n\tif err := l.cfg.KubeClient.IsReachable(); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// TODO: Implement filter\n\tresults, err := l.cfg.ChaosTests.List()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Conclude the status of chaos test\n\tfor _, res := range results {\n\t\tfor _, exp := range res.Experiments {\n\t\t\tif exp.Phase == \"\" {\n\t\t\t\tres.SetStatus(chaostest.StatusPending, fmt.Sprintf(\"experiment is starting\"))\n\t\t\t}\n\t\t\tif strings.ToLower(exp.Phase) == chaostest.StatusRunning.String() {\n\t\t\t\tres.SetStatus(chaostest.StatusRunning, fmt.Sprintf(\"experiment %q is currently running\", exp.Experiment))\n\t\t\t}\n\t\t\tif strings.ToLower(exp.Phase) == chaostest.StatusCompleted.String() {\n\t\t\t\tres.SetStatus(chaostest.StatusCompleted, \"all experiments have been completed\")\n\t\t\t}\n\t\t}\n\t}\n\treturn results, nil\n}", "func doTests(t *testing.T, tests []string) {\n\tdoTestsParam(t, tests, TestParams{\n\t\textensions: parser.CommonExtensions,\n\t})\n}", "func (controller TestController) RunTest() (setupErr error, testErr error) {\n\ttests := controller.testSuite.GetTests()\n\tlogrus.Debugf(\"Test configs: %v\", tests)\n\ttest, found := tests[controller.testName]\n\tif !found {\n\t\treturn stacktrace.NewError(\"Nonexistent test: %v\", controller.testName), nil\n\t}\n\n\tnetworkLoader, err := test.GetNetworkLoader()\n\tif err != nil {\n\t\treturn stacktrace.Propagate(err, \"Could not get network loader\"), nil\n\t}\n\n\tlogrus.Info(\"Connecting to Docker environment...\")\n\t// Initialize a Docker client\n\tdockerClient, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())\n\tif err != nil {\n\t\treturn stacktrace.Propagate(err,\"Failed to initialize Docker client from environment.\"), nil\n\t}\n\tdockerManager, err := docker.NewDockerManager(logrus.StandardLogger(), dockerClient)\n\tif err != nil {\n\t\treturn stacktrace.Propagate(err, \"An error occurred when constructing the Docker manager\"), nil\n\t}\n\tlogrus.Info(\"Connected to Docker environment\")\n\n\tlogrus.Infof(\"Configuring test network in Docker network %v...\", controller.networkId)\n\talreadyTakenIps := map[string]bool{\n\t\tcontroller.gatewayIp: true,\n\t\tcontroller.testControllerIp: true,\n\t}\n\tfreeIpTracker, err := networks.NewFreeIpAddrTracker(logrus.StandardLogger(), controller.subnetMask, alreadyTakenIps)\n\tif err != nil {\n\t\treturn stacktrace.Propagate(err, \"An error occurred creating the free IP address tracker\"), nil\n\t}\n\n\tbuilder := networks.NewServiceNetworkBuilder(\n\t\t\tdockerManager,\n\t\t\tcontroller.networkId,\n\t\t\tfreeIpTracker,\n\t\t\tcontroller.testVolumeName,\n\t\t\tcontroller.testVolumeFilepath)\n\tif err := networkLoader.ConfigureNetwork(builder); err != nil {\n\t\treturn stacktrace.Propagate(err, \"Could not configure test network in Docker network %v\", controller.networkId), nil\n\t}\n\tnetwork := builder.Build()\n\tdefer func() {\n\t\tlogrus.Info(\"Stopping test network...\")\n\t\terr := network.RemoveAll(CONTAINER_STOP_TIMEOUT)\n\t\tif err != nil {\n\t\t\tlogrus.Error(\"An error occurred stopping the network\")\n\t\t\tfmt.Fprintln(logrus.StandardLogger().Out, err)\n\t\t} else {\n\t\t\tlogrus.Info(\"Successfully stopped the test network\")\n\t\t}\n\t}()\n\tlogrus.Info(\"Test network configured\")\n\n\tlogrus.Info(\"Initializing test network...\")\n\tavailabilityCheckers, err := networkLoader.InitializeNetwork(network);\n\tif err != nil {\n\t\treturn stacktrace.Propagate(err, \"An error occurred initialized the network to its starting state\"), nil\n\t}\n\tlogrus.Info(\"Test network initialized\")\n\n\t// Second pass: wait for all services to come up\n\tlogrus.Info(\"Waiting for test network to become available...\")\n\tfor serviceId, availabilityChecker := range availabilityCheckers {\n\t\tlogrus.Debugf(\"Waiting for service %v to become available...\", serviceId)\n\t\tif err := availabilityChecker.WaitForStartup(); err != nil {\n\t\t\treturn stacktrace.Propagate(err, \"An error occurred waiting for service with ID %v to start up\", serviceId), nil\n\t\t}\n\t\tlogrus.Debugf(\"Service %v is available\", serviceId)\n\t}\n\tlogrus.Info(\"Test network is available\")\n\n\tlogrus.Info(\"Executing test...\")\n\tuntypedNetwork, err := networkLoader.WrapNetwork(network)\n\tif err != nil {\n\t\treturn stacktrace.Propagate(err, \"Error occurred wrapping network in user-defined network type\"), nil\n\t}\n\n\ttestResultChan := make(chan error)\n\n\tgo func() {\n\t\ttestResultChan <- runTest(test, untypedNetwork)\n\t}()\n\n\t// Time out the test so a poorly-written test doesn't run forever\n\ttestTimeout := test.GetExecutionTimeout()\n\tvar timedOut bool\n\tvar testResultErr error\n\tselect {\n\tcase testResultErr = <- testResultChan:\n\t\tlogrus.Tracef(\"Test returned result before timeout: %v\", testResultErr)\n\t\ttimedOut = false\n\tcase <- time.After(testTimeout):\n\t\tlogrus.Tracef(\"Hit timeout %v before getting a result from the test\", testTimeout)\n\t\ttimedOut = true\n\t}\n\n\tlogrus.Tracef(\"After running test w/timeout: resultErr: %v, timedOut: %v\", testResultErr, timedOut)\n\n\tif timedOut {\n\t\treturn nil, stacktrace.NewError(\"Timed out after %v waiting for test to complete\", testTimeout)\n\t}\n\n\tlogrus.Info(\"Test execution completed\")\n\n\tif testResultErr != nil {\n\t\treturn nil, stacktrace.Propagate(testResultErr, \"An error occurred when running the test\")\n\t}\n\n\treturn nil, nil\n}", "func (s *IntegrationSuite) TestRun(c *C) {\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\t// Execute e2e workflow\n\tlog.Info().Print(\"Running e2e integration test.\", field.M{\"app\": s.name, \"testName\": c.TestName()})\n\n\t// Check config\n\terr := s.app.Init(ctx)\n\tif err != nil {\n\t\tlog.Info().Print(\"Skipping integration test.\", field.M{\"app\": s.name, \"reason\": err.Error()})\n\t\ts.skip = true\n\t\tc.Skip(err.Error())\n\t}\n\n\t// Create namespace\n\terr = createNamespace(s.cli, s.namespace)\n\tc.Assert(err, IsNil)\n\n\t// Create profile\n\tif s.profile == nil {\n\t\tlog.Info().Print(\"Skipping integration test. Could not create profile. Please check if required credentials are set.\", field.M{\"app\": s.name})\n\t\ts.skip = true\n\t\tc.Skip(\"Could not create a Profile\")\n\t}\n\tprofileName := s.createProfile(c, ctx)\n\n\t// Install db\n\terr = s.app.Install(ctx, s.namespace)\n\tc.Assert(err, IsNil)\n\n\t// Check if ready\n\tok, err := s.app.IsReady(ctx)\n\tc.Assert(err, IsNil)\n\tc.Assert(ok, Equals, true)\n\n\t// Create blueprint\n\tbp := s.bp.Blueprint()\n\tc.Assert(bp, NotNil)\n\t_, err = s.crCli.Blueprints(kontroller.namespace).Create(ctx, bp, metav1.CreateOptions{})\n\tc.Assert(err, IsNil)\n\n\tvar configMaps, secrets map[string]crv1alpha1.ObjectReference\n\ttestEntries := 3\n\t// Add test entries to DB\n\tif a, ok := s.app.(app.DatabaseApp); ok {\n\t\t// wait for application to be actually ready\n\t\terr = pingAppAndWait(ctx, a)\n\t\tc.Assert(err, IsNil)\n\n\t\terr = a.Reset(ctx)\n\t\tc.Assert(err, IsNil)\n\n\t\terr = a.Initialize(ctx)\n\t\tc.Assert(err, IsNil)\n\n\t\t// Add few entries\n\t\tfor i := 0; i < testEntries; i++ {\n\t\t\tc.Assert(a.Insert(ctx), IsNil)\n\t\t}\n\n\t\tcount, err := a.Count(ctx)\n\t\tc.Assert(err, IsNil)\n\t\tc.Assert(count, Equals, testEntries)\n\t}\n\n\t// Get Secret and ConfigMap object references\n\tif a, ok := s.app.(app.ConfigApp); ok {\n\t\tconfigMaps = a.ConfigMaps()\n\t\tsecrets = a.Secrets()\n\t}\n\n\t// Validate Blueprint\n\tvalidateBlueprint(c, *bp, configMaps, secrets)\n\n\t// Create ActionSet specs\n\tas := newActionSet(bp.GetName(), profileName, kontroller.namespace, s.app.Object(), configMaps, secrets)\n\t// Take backup\n\tbackup := s.createActionset(ctx, c, as, \"backup\", nil)\n\tc.Assert(len(backup), Not(Equals), 0)\n\n\t// Save timestamp for PITR\n\tvar restoreOptions map[string]string\n\tif b, ok := s.bp.(app.PITRBlueprinter); ok {\n\t\tpitr := b.FormatPITR(time.Now())\n\t\tlog.Info().Print(\"Saving timestamp for PITR\", field.M{\"pitr\": pitr})\n\t\trestoreOptions = map[string]string{\n\t\t\t\"pitr\": pitr,\n\t\t}\n\t\t// Add few more entries with timestamp > pitr\n\t\ttime.Sleep(time.Second)\n\t\tif a, ok := s.app.(app.DatabaseApp); ok {\n\t\t\tc.Assert(a.Insert(ctx), IsNil)\n\t\t\tc.Assert(a.Insert(ctx), IsNil)\n\n\t\t\tcount, err := a.Count(ctx)\n\t\t\tc.Assert(err, IsNil)\n\t\t\tc.Assert(count, Equals, testEntries+2)\n\t\t}\n\t}\n\n\t// Reset DB\n\tif a, ok := s.app.(app.DatabaseApp); ok {\n\t\terr = a.Reset(ctx)\n\t\tc.Assert(err, IsNil)\n\t}\n\n\t// Restore backup\n\tpas, err := s.crCli.ActionSets(kontroller.namespace).Get(ctx, backup, metav1.GetOptions{})\n\tc.Assert(err, IsNil)\n\ts.createActionset(ctx, c, pas, \"restore\", restoreOptions)\n\n\t// Verify data\n\tif a, ok := s.app.(app.DatabaseApp); ok {\n\t\t// wait for application to be actually ready\n\t\terr = pingAppAndWait(ctx, a)\n\t\tc.Assert(err, IsNil)\n\n\t\tcount, err := a.Count(ctx)\n\t\tc.Assert(err, IsNil)\n\t\tc.Assert(count, Equals, testEntries)\n\t}\n\n\t// Delete snapshots\n\ts.createActionset(ctx, c, pas, \"delete\", nil)\n}", "func RunTest(flags *Flags) error {\n\tswitch flags.Mode {\n\tcase constants.ManagerMode:\n\t\treturn workermanager.New().RunTest()\n\tcase constants.WorkerMode:\n\t\tslackURL := flags.SlackURL\n\t\tvar slacks []string\n\t\tif len(slackURL) > 0 {\n\t\t\tslacks = append(slacks, slackURL)\n\t\t}\n\t\treturn worker.NewWorker().RunTest(flags.Type, slacks)\n\t}\n\n\treturn nil\n}", "func (scenTest *GetStartedFunctionsScenarioTest) RunSubTest(stubber *testtools.AwsmStubber) {\n\tmockQuestioner := demotools.MockQuestioner{Answers: scenTest.Answers}\n\tscenario := NewGetStartedFunctionsScenario(*stubber.SdkConfig, &mockQuestioner, &scenTest.helper)\n\tscenario.isTestRun = true\n\tscenario.Run()\n}", "func TestAll(t *testing.T) {\n\tcfg := initializeTests(t)\n\ttestUser(t, cfg)\n\ttestHomeMessage(t, cfg)\n\ttestDepartment(t, cfg)\n\ttestCommunity(t, cfg)\n\ttestCity(t, cfg)\n\ttestCopro(t, cfg)\n\ttestBudgetAction(t, cfg)\n\ttestRenewProject(t, cfg)\n\ttestHousingType(t, cfg)\n\ttestHousing(t, cfg)\n\ttestCommitment(t, cfg)\n\ttestBeneficiary(t, cfg)\n\ttestPayment(t, cfg)\n\ttestBudgetSector(t, cfg)\n\ttestCommitmentLink(t, cfg)\n\ttestCommission(t, cfg)\n\ttestRenewProjectForecast(t, cfg)\n\ttestHousingForecast(t, cfg)\n\ttestCoproForecast(t, cfg)\n\ttestSettings(t, cfg)\n\ttestHome(t, cfg)\n\ttestBeneficiaryDatas(t, cfg)\n\ttestBeneficiaryPayments(t, cfg)\n\ttestPmtRatio(t, cfg)\n\ttestPmtForecasts(t, cfg)\n\ttestCmtForecasts(t, cfg)\n\ttestLinkCommitmentsHousings(t, cfg)\n\ttestCoproCommitmentLink(t, cfg)\n\ttestRPEventType(t, cfg)\n\ttestRPEvent(t, cfg)\n\ttestRenewProjectReport(t, cfg)\n\ttestRPPerCommunityReport(t, cfg)\n\ttestRPCmtCityJoin(t, cfg)\n\ttestDepartmentReport(t, cfg)\n\ttestCityReport(t, cfg)\n\ttestPreProg(t, cfg)\n\ttestProg(t, cfg)\n\ttestRPLS(t, cfg)\n\ttestSummaries(t, cfg)\n\ttestHousingSummary(t, cfg)\n\ttestCoproEventType(t, cfg)\n\ttestCoproEvent(t, cfg)\n\ttestCoproDoc(t, cfg)\n\ttestCoproReport(t, cfg)\n\ttestRPMultiAnnualReport(t, cfg)\n\ttestPaymentCredits(t, cfg)\n\ttestPaymentCreditJournals(t, cfg)\n\ttestPlacement(t, cfg)\n\ttestBeneficiaryGroup(t, cfg)\n\ttestBeneficiaryGroupDatas(t, cfg)\n\ttestHousingTypology(t, cfg)\n\ttestHousingConvention(t, cfg)\n\ttestHousingComment(t, cfg)\n\ttestHousingTransfer(t, cfg)\n\ttestConventionType(t, cfg)\n\ttestReservationFee(t, cfg)\n\ttestGetDifActionPaymentPrevisions(t, cfg)\n\ttestReservationReport(t, cfg)\n\ttestSoldCommitment(t, cfg)\n\ttestAvgPmtTime(t, cfg)\n\ttestPaymentDemands(t, cfg)\n\ttestPaymentDelays(t, cfg)\n}", "func RunTest(t *testing.T, name string, f Func, testCases []TestCase) {\n\tt.Run(name, func(t *testing.T) {\n\t\tfor _, test := range testCases {\n\t\t\tif actual := f(test.Input); actual != test.Expected {\n\t\t\t\tt.Errorf(\"\\nfor n=%d, expected: %t, actual: %t\", test.Input, test.Expected, actual)\n\t\t\t}\n\t\t}\n\t})\n}", "func TestAll() error {\n\tout, err := sh.Output(\"go\", \"test\", \"./...\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tlog.Print(out)\n\treturn nil\n}", "func TestRunMain(t *testing.T) {\n\tmain()\n}", "func Test(t *testing.T, command Runner, testCases []Case) {\n\tt.Helper()\n\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.Name, func(t *testing.T) {\n\t\t\tt.Helper() // TODO: make Helper working for subtests: issue #24128\n\n\t\t\tstdout := &bytes.Buffer{}\n\t\t\tstderr := &bytes.Buffer{}\n\n\t\t\tcommand.SetStdout(stdout)\n\t\t\tcommand.SetStderr(stderr)\n\n\t\t\tm := newMatch(t, tc.wantFail)\n\n\t\t\tif tc.WantFile != \"\" {\n\t\t\t\tif !m.removeFile(tc.WantFile) {\n\t\t\t\t\ttc.WantFile = \"\" // stop testing File match\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tvar gotErr string\n\t\t\tgotPanic := m.run(func() {\n\t\t\t\tif err := command.Run(tc.Args); err != nil {\n\t\t\t\t\tgotErr = err.Error()\n\t\t\t\t}\n\t\t\t})\n\n\t\t\tif tc.WantFile != \"\" {\n\t\t\t\tif gotFile, ext, ok := m.getFile(tc.WantFile); ok {\n\t\t\t\t\tm.match(\"File golden\"+ext, gotFile, \"golden\"+ext)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tm.match(\"WantStdout\", stdout.String(), tc.WantStdout)\n\t\t\tm.match(\"WantStderr\", stderr.String(), tc.WantStderr)\n\t\t\tm.match(\"WantPanic\", gotPanic, tc.WantPanic)\n\t\t\tm.match(\"WantErr\", gotErr, tc.WantErr)\n\t\t\tm.equal(\"WantExitCode\", command.ExitCode(), tc.WantExitCode)\n\n\t\t\tm.done()\n\t\t})\n\t}\n}", "func (t *Test) Run(ctx context.Context, opts ...TestOption) (*TestResult, error) {\n\tparsedOpts := &testOptions{\n\t\tvars: &starlark.Dict{},\n\t}\n\tfor _, opt := range opts {\n\t\topt.applyTest(parsedOpts)\n\t}\n\n\tthread := &starlark.Thread{\n\t\tPrint: skyPrint,\n\t}\n\tthread.SetLocal(\"context\", ctx)\n\n\tassertModule := assertmodule.AssertModule()\n\ttestCtx := &starlarkstruct.Module{\n\t\tName: \"skycfg_test_ctx\",\n\t\tMembers: starlark.StringDict(map[string]starlark.Value{\n\t\t\t\"vars\": parsedOpts.vars,\n\t\t\t\"assert\": assertModule,\n\t\t}),\n\t}\n\targs := starlark.Tuple([]starlark.Value{testCtx})\n\n\tresult := TestResult{\n\t\tTestName: t.Name(),\n\t}\n\n\tstartTime := time.Now()\n\t_, err := starlark.Call(thread, t.callable, args, nil)\n\tresult.Duration = time.Since(startTime)\n\tif err != nil {\n\t\t// if there is no assertion error, there was something wrong with the execution itself\n\t\tif len(assertModule.Failures) == 0 {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// there should only be one failure, because each test run gets its own *TestContext\n\t\t// and each assertion failure halts execution.\n\t\tif len(assertModule.Failures) > 1 {\n\t\t\tpanic(\"A test run should only have one assertion failure. Something went wrong with the test infrastructure.\")\n\t\t}\n\t\tresult.Failure = assertModule.Failures[0]\n\t}\n\n\treturn &result, nil\n}", "func TestSolution(t *testing.T) {\n\tfor test, f := range tests {\n\t\tRunTest(t, test, f, SampleTestCases)\n\t}\n}", "func TestMain(m *testing.M) {\n\tprintln(\"do stuff before all tests\")\n\tm.Run()\n\tprintln(\"do stuff after all tests\")\n}", "func Test(cli *DockerClient, problemDir, fileName, solutionDir, ft string) (SubmissionStatus, error) {\n\tdefer cleanUpArtifacts(problemDir, fileName, ft)\n\n\tlogFile, err := os.Create(filepath.Join(problemDir, \"log.txt\"))\n\tif err != nil {\n\t\treturn RunnerError, err\n\t}\n\tdefer logFile.Close()\n\n\tlogFile.Write([]byte(\"Compile:\\n\"))\n\n\t// Compile the solution\n\terr = Compile(cli, problemDir, fileName, ft, logFile)\n\tif err == ErrExitStatusError {\n\t\tlogFile.Write([]byte(\"Status: Compile Error\\n\"))\n\t\treturn CompileError, nil\n\t} else if err != nil {\n\t\tlogFile.Write([]byte(\"Status: Runner error\\nError: Failed to run the compiler container: \" + err.Error() + \"\\n\"))\n\t\treturn RunnerError, err\n\t}\n\n\t// Run the submissions\n\ttests, err := ioutil.ReadDir(solutionDir)\n\tif err != nil {\n\t\tlogFile.Write([]byte(\"Status: Runner error\\nError: Failed to open solution directory: \" + err.Error() + \"\\n\"))\n\t\treturn RunnerError, err\n\t}\n\n\tproblemDef, err := loadProblem(problemDir)\n\tif err != nil {\n\t\tlogFile.Write([]byte(\"Failed to open solution definition (using defaults): \" + err.Error() + \"\\n\"))\n\t}\n\n\trunner, err := NewRunner(cli, problemDir, fileName, ft, time.Duration(problemDef.Time)*time.Second)\n\tif err != nil {\n\t\tlogFile.Write([]byte(\"Status: Runner error\\nError: Failed to create runner container: \" + err.Error() + \"\\n\"))\n\t\treturn RunnerError, err\n\t}\n\tdefer runner.Close()\n\n\tfor _, file := range tests {\n\t\tname, fileType := detectType(file.Name())\n\n\t\tif fileType != \"in\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tlogFile.Write([]byte(\"Running \" + file.Name() + \":\\n\"))\n\n\t\t// Get the inputs\n\t\tfileIn, err := os.Open(filepath.Join(solutionDir, file.Name()))\n\t\tif err != nil {\n\t\t\tlogFile.Write([]byte(\"Status: Runner error\\nError: Failed to load input file \" + file.Name() + \": \" + err.Error() + \"\\n\"))\n\t\t\treturn RunnerError, err\n\t\t}\n\t\tdefer fileIn.Close()\n\n\t\toutBuffer := bytes.NewBufferString(\"\")\n\n\t\t// Run the code\n\t\tif err := runner.Run(fileIn, outBuffer); err != nil {\n\t\t\tif err == ErrExitStatusError {\n\t\t\t\tlogFile.Write([]byte(\"Status: Exception\\n\"))\n\t\t\t\treturn Exception, nil\n\t\t\t} else if err == ErrTimeLimit {\n\t\t\t\tlogFile.Write([]byte(\"Status: Time Limit Exceeded\\n\"))\n\t\t\t\treturn TimeLimit, nil\n\t\t\t} else {\n\t\t\t\tlogFile.Write([]byte(\"Status: Runner error\\nError: Failed to run submission:\" + err.Error() + \"\\n\"))\n\t\t\t\treturn RunnerError, err\n\t\t\t}\n\t\t}\n\n\t\t// Verify the output of the submission\n\t\toutFile, err := ioutil.ReadFile(filepath.Join(solutionDir, name+\".out\"))\n\t\tif err != nil {\n\t\t\tlogFile.Write([]byte(\"Status: Runner error\\nError: Failed to load answer \" + name + \".out: \" + err.Error() + \"\\n\"))\n\t\t\treturn RunnerError, err\n\t\t}\n\n\t\texpectedOut := strings.Trim(string(outFile), \"\\r\\n\\t \")\n\t\tsolutionOut := strings.Trim(string(outBuffer.String()), \"\\r\\n\\t \")\n\n\t\tlogFile.Write([]byte(solutionOut + \"\\n\"))\n\n\t\tif expectedOut != solutionOut {\n\t\t\tlogFile.Write([]byte(\"Status: Wrong Answer\\n\"))\n\t\t\treturn Wrong, nil\n\t\t}\n\t}\n\n\tlogFile.Write([]byte(\"Status: Accepted\"))\n\treturn Ok, nil\n}", "func (b *KRMBlueprintTest) Test() {\n\tif b.ShouldSkip() {\n\t\tb.logger.Logf(b.t, \"Skipping test due to config %s\", b.BlueprintTestConfig.Path)\n\t\tb.t.SkipNow()\n\t\treturn\n\t}\n\ta := assert.New(b.t)\n\t// run stages\n\tutils.RunStage(\"init\", func() { b.Init(a) })\n\tdefer utils.RunStage(\"teardown\", func() { b.Teardown(a) })\n\tutils.RunStage(\"apply\", func() { b.Apply(a) })\n\tutils.RunStage(\"verify\", func() { b.Verify(a) })\n}", "func Run(opts ...Option) error {\n\tvar o options\n\tfor _, opt := range opts {\n\t\tif err := opt(&o); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Now build the command to run with the realized options\n\t// struct.\n\targs := []string{\"test\", \"-v\", \"-json\"}\n\tif o.race {\n\t\targs = append(args, \"-race\")\n\t}\n\tif o.coverprofile != \"\" {\n\t\targs = append(args, \"-coverprofile=\"+o.coverprofile)\n\t}\n\tif o.coverpkg != \"\" {\n\t\targs = append(args, \"-coverpkg=\"+o.coverpkg)\n\t}\n\tif o.covermode != \"\" {\n\t\targs = append(args, \"-covermode=\"+o.covermode)\n\t}\n\tif o.p != 0 {\n\t\targs = append(args, \"-p=\"+strconv.Itoa(o.p))\n\t}\n\targs = append(args, \"./...\")\n\tcmd := exec.Command(\"go\", args...)\n\tcmd.Stderr = os.Stderr\n\n\tcmdStdout, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.Start(); err != nil {\n\t\treturn err\n\t}\n\tif err := parseGoTestJSONOutput(cmdStdout, newMultiResultAccepter(o.accepters...)); err != nil {\n\t\t_ = cmd.Process.Kill()\n\t\t_ = cmd.Wait()\n\t\treturn err\n\t}\n\tif err := cmd.Wait(); err != nil {\n\t\treturn fmt.Errorf(\"go test failed: %w\", err)\n\t}\n\n\treturn nil\n}", "func scoreboardRunFn(cmd *cobra.Command, args []string) {\n\tif err := run.Run(); err != nil {\n\t\tlogger.WithError(err).Fatal(\"runtime error\")\n\t}\n}", "func TestExecute(t *testing.T) {\n\tctx := context.Background()\n\n\t// Clear pre-existing golden files to avoid leaving stale ones around.\n\tif *updateGoldens {\n\t\tfiles, err := filepath.Glob(filepath.Join(*goldensDir, \"*.golden.json\"))\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tfor _, f := range files {\n\t\t\tif err := os.Remove(f); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t}\n\t}\n\n\ttestCases := []struct {\n\t\tname string\n\t\tflags testsharderFlags\n\t\ttestSpecs []build.TestSpec\n\t\ttestDurations []build.TestDuration\n\t\ttestList []build.TestListEntry\n\t\tmodifiers []testsharder.TestModifier\n\t\tpackageRepos []build.PackageRepo\n\t\taffectedTests []string\n\t}{\n\t\t{\n\t\t\tname: \"no tests\",\n\t\t},\n\t\t{\n\t\t\tname: \"mixed device types\",\n\t\t\ttestSpecs: []build.TestSpec{\n\t\t\t\tfuchsiaTestSpec(\"foo\"),\n\t\t\t\thostTestSpec(\"bar\"),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"multiply\",\n\t\t\tflags: testsharderFlags{\n\t\t\t\ttargetDurationSecs: 5,\n\t\t\t},\n\t\t\ttestSpecs: []build.TestSpec{\n\t\t\t\tfuchsiaTestSpec(\"foo\"),\n\t\t\t\tfuchsiaTestSpec(\"bar\"),\n\t\t\t},\n\t\t\tmodifiers: []testsharder.TestModifier{\n\t\t\t\t{\n\t\t\t\t\tName: \"foo\",\n\t\t\t\t\tTotalRuns: 50,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName: \"bar\",\n\t\t\t\t},\n\t\t\t},\n\t\t\ttestDurations: []build.TestDuration{\n\t\t\t\t{\n\t\t\t\t\tName: \"*\",\n\t\t\t\t\tMedianDuration: time.Millisecond,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"affected tests\",\n\t\t\ttestSpecs: []build.TestSpec{\n\t\t\t\tfuchsiaTestSpec(\"affected-hermetic\"),\n\t\t\t\tfuchsiaTestSpec(\"not-affected\"),\n\t\t\t},\n\t\t\ttestList: []build.TestListEntry{\n\t\t\t\ttestListEntry(\"affected-hermetic\", true),\n\t\t\t\ttestListEntry(\"not-affected\", false),\n\t\t\t},\n\t\t\taffectedTests: []string{\n\t\t\t\tpackageURL(\"affected-hermetic\"),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"affected nonhermetic tests\",\n\t\t\ttestSpecs: []build.TestSpec{\n\t\t\t\tfuchsiaTestSpec(\"affected-nonhermetic\"),\n\t\t\t\tfuchsiaTestSpec(\"not-affected\"),\n\t\t\t},\n\t\t\taffectedTests: []string{\n\t\t\t\tpackageURL(\"affected-nonhermetic\"),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"target test count\",\n\t\t\tflags: testsharderFlags{\n\t\t\t\ttargetTestCount: 2,\n\t\t\t},\n\t\t\ttestSpecs: []build.TestSpec{\n\t\t\t\tfuchsiaTestSpec(\"foo1\"),\n\t\t\t\tfuchsiaTestSpec(\"foo2\"),\n\t\t\t\tfuchsiaTestSpec(\"foo3\"),\n\t\t\t\tfuchsiaTestSpec(\"foo4\"),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"sharding by time\",\n\t\t\tflags: testsharderFlags{\n\t\t\t\ttargetDurationSecs: int((4 * time.Minute).Seconds()),\n\t\t\t\tperTestTimeoutSecs: int((10 * time.Minute).Seconds()),\n\t\t\t},\n\t\t\ttestSpecs: []build.TestSpec{\n\t\t\t\tfuchsiaTestSpec(\"slow\"),\n\t\t\t\tfuchsiaTestSpec(\"fast1\"),\n\t\t\t\tfuchsiaTestSpec(\"fast2\"),\n\t\t\t\tfuchsiaTestSpec(\"fast3\"),\n\t\t\t},\n\t\t\ttestDurations: []build.TestDuration{\n\t\t\t\t{\n\t\t\t\t\tName: \"*\",\n\t\t\t\t\tMedianDuration: 2 * time.Second,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName: packageURL(\"slow\"),\n\t\t\t\t\tMedianDuration: 5 * time.Minute,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"max shards per env\",\n\t\t\tflags: testsharderFlags{\n\t\t\t\t// Given expected test durations of 4 minutes for each test it's\n\t\t\t\t// impossible to satisfy both the target shard duration and the\n\t\t\t\t// max shards per environment, so the target shard duration\n\t\t\t\t// should effectively be ignored.\n\t\t\t\ttargetDurationSecs: int((5 * time.Minute).Seconds()),\n\t\t\t\tmaxShardsPerEnvironment: 2,\n\t\t\t\tskipUnaffected: true,\n\t\t\t},\n\t\t\ttestSpecs: []build.TestSpec{\n\t\t\t\tfuchsiaTestSpec(\"affected1\"),\n\t\t\t\tfuchsiaTestSpec(\"affected2\"),\n\t\t\t\tfuchsiaTestSpec(\"affected3\"),\n\t\t\t\tfuchsiaTestSpec(\"affected4\"),\n\t\t\t\tfuchsiaTestSpec(\"unaffected1\"),\n\t\t\t\tfuchsiaTestSpec(\"unaffected2\"),\n\t\t\t\tfuchsiaTestSpec(\"nonhermetic1\"),\n\t\t\t\tfuchsiaTestSpec(\"nonhermetic2\"),\n\t\t\t},\n\t\t\ttestDurations: []build.TestDuration{\n\t\t\t\t{\n\t\t\t\t\tName: \"*\",\n\t\t\t\t\tMedianDuration: 4 * time.Minute,\n\t\t\t\t},\n\t\t\t},\n\t\t\taffectedTests: []string{\n\t\t\t\tpackageURL(\"affected1\"),\n\t\t\t\tpackageURL(\"affected2\"),\n\t\t\t\tpackageURL(\"affected3\"),\n\t\t\t\tpackageURL(\"affected4\"),\n\t\t\t},\n\t\t\ttestList: []build.TestListEntry{\n\t\t\t\ttestListEntry(\"affected1\", true),\n\t\t\t\ttestListEntry(\"affected2\", true),\n\t\t\t\ttestListEntry(\"affected3\", true),\n\t\t\t\ttestListEntry(\"affected4\", true),\n\t\t\t\ttestListEntry(\"unaffected1\", true),\n\t\t\t\ttestListEntry(\"unaffected2\", true),\n\t\t\t\ttestListEntry(\"nonhermetic1\", false),\n\t\t\t\ttestListEntry(\"nonhermetic2\", false),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"hermetic deps\",\n\t\t\tflags: testsharderFlags{\n\t\t\t\thermeticDeps: true,\n\t\t\t},\n\t\t\ttestSpecs: []build.TestSpec{\n\t\t\t\tfuchsiaTestSpec(\"foo\"),\n\t\t\t\tfuchsiaTestSpec(\"bar\"),\n\t\t\t\tfuchsiaTestSpec(\"baz\"),\n\t\t\t},\n\t\t\tpackageRepos: []build.PackageRepo{\n\t\t\t\t{\n\t\t\t\t\tPath: \"pkg_repo1\",\n\t\t\t\t\tBlobs: filepath.Join(\"pkg_repo1\", \"blobs\"),\n\t\t\t\t\tTargets: filepath.Join(\"pkg_repo1\", \"targets.json\"),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"ffx deps\",\n\t\t\tflags: testsharderFlags{\n\t\t\t\tffxDeps: true,\n\t\t\t},\n\t\t\ttestSpecs: []build.TestSpec{\n\t\t\t\tfuchsiaTestSpec(\"foo\"),\n\t\t\t\tfuchsiaTestSpec(\"bar\"),\n\t\t\t\tfuchsiaTestSpec(\"baz\"),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"multiply affected test\",\n\t\t\tflags: testsharderFlags{\n\t\t\t\taffectedTestsMultiplyThreshold: 3,\n\t\t\t\ttargetDurationSecs: int(2 * time.Minute.Seconds()),\n\t\t\t},\n\t\t\ttestSpecs: []build.TestSpec{\n\t\t\t\tfuchsiaTestSpec(\"multiplied-affected-test\"),\n\t\t\t\tfuchsiaTestSpec(\"affected-test\"),\n\t\t\t\tfuchsiaTestSpec(\"unaffected-test\"),\n\t\t\t},\n\t\t\ttestDurations: []build.TestDuration{\n\t\t\t\t{\n\t\t\t\t\tName: \"*\",\n\t\t\t\t\tMedianDuration: time.Second,\n\t\t\t\t},\n\t\t\t},\n\t\t\taffectedTests: []string{\n\t\t\t\tpackageURL(\"multiplied-affected-test\"),\n\t\t\t\tpackageURL(\"affected-test\"),\n\t\t\t},\n\t\t\tmodifiers: []testsharder.TestModifier{\n\t\t\t\t{\n\t\t\t\t\tName: \"multiplied-affected-test\",\n\t\t\t\t\tTotalRuns: 100,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"test list with tags\",\n\t\t\ttestSpecs: []build.TestSpec{\n\t\t\t\tfuchsiaTestSpec(\"hermetic-test\"),\n\t\t\t\tfuchsiaTestSpec(\"nonhermetic-test\"),\n\t\t\t},\n\t\t\ttestList: []build.TestListEntry{\n\t\t\t\ttestListEntry(\"hermetic-test\", true),\n\t\t\t\ttestListEntry(\"nonhermetic-test\", false),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"skip unaffected tests\",\n\t\t\tflags: testsharderFlags{\n\t\t\t\tskipUnaffected: true,\n\t\t\t},\n\t\t\ttestSpecs: []build.TestSpec{\n\t\t\t\tfuchsiaTestSpec(\"affected-hermetic-test\"),\n\t\t\t\tfuchsiaTestSpec(\"unaffected-hermetic-test\"),\n\t\t\t\tfuchsiaTestSpec(\"affected-nonhermetic-test\"),\n\t\t\t\tfuchsiaTestSpec(\"unaffected-nonhermetic-test\"),\n\t\t\t},\n\t\t\ttestList: []build.TestListEntry{\n\t\t\t\ttestListEntry(\"affected-hermetic-test\", true),\n\t\t\t\ttestListEntry(\"unaffected-hermetic-test\", true),\n\t\t\t\ttestListEntry(\"affected-nonhermetic-test\", false),\n\t\t\t\ttestListEntry(\"unaffected-nonhermetic-test\", false),\n\t\t\t},\n\t\t\taffectedTests: []string{\n\t\t\t\tfuchsiaTestSpec(\"affected-hermetic-test\").Name,\n\t\t\t\tfuchsiaTestSpec(\"affected-nonhermetic-test\").Name,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"run all tests if no affected tests\",\n\t\t\tflags: testsharderFlags{\n\t\t\t\tskipUnaffected: true,\n\t\t\t},\n\t\t\ttestSpecs: []build.TestSpec{\n\t\t\t\tfuchsiaTestSpec(\"affected-hermetic-test\"),\n\t\t\t\tfuchsiaTestSpec(\"unaffected-hermetic-test\"),\n\t\t\t\tfuchsiaTestSpec(\"affected-nonhermetic-test\"),\n\t\t\t\tfuchsiaTestSpec(\"unaffected-nonhermetic-test\"),\n\t\t\t},\n\t\t\ttestList: []build.TestListEntry{\n\t\t\t\ttestListEntry(\"affected-hermetic-test\", true),\n\t\t\t\ttestListEntry(\"unaffected-hermetic-test\", true),\n\t\t\t\ttestListEntry(\"affected-nonhermetic-test\", false),\n\t\t\t\ttestListEntry(\"unaffected-nonhermetic-test\", false),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"multiply unaffected hermetic tests\",\n\t\t\tflags: testsharderFlags{\n\t\t\t\tskipUnaffected: true,\n\t\t\t},\n\t\t\ttestSpecs: []build.TestSpec{\n\t\t\t\tfuchsiaTestSpec(\"unaffected-hermetic-test\"),\n\t\t\t\tfuchsiaTestSpec(\"affected-nonhermetic-test\"),\n\t\t\t\tfuchsiaTestSpec(\"unaffected-hermetic-multiplied-test\"),\n\t\t\t},\n\t\t\ttestList: []build.TestListEntry{\n\t\t\t\ttestListEntry(\"unaffected-hermetic-test\", true),\n\t\t\t\ttestListEntry(\"affected-nonhermetic-test\", false),\n\t\t\t\ttestListEntry(\"unaffected-hermetic-multiplied-test\", true),\n\t\t\t},\n\t\t\taffectedTests: []string{\n\t\t\t\tfuchsiaTestSpec(\"affected-nonhermetic-test\").Name,\n\t\t\t},\n\t\t\tmodifiers: []testsharder.TestModifier{\n\t\t\t\t{\n\t\t\t\t\tName: \"unaffected-hermetic-multiplied-test\",\n\t\t\t\t\tTotalRuns: 100,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"various modifiers\",\n\t\t\tflags: testsharderFlags{\n\t\t\t\ttargetDurationSecs: 5,\n\t\t\t},\n\t\t\ttestSpecs: []build.TestSpec{\n\t\t\t\tfuchsiaTestSpec(\"foo\"),\n\t\t\t\tfuchsiaTestSpec(\"bar\"),\n\t\t\t\tfuchsiaTestSpec(\"baz\"),\n\t\t\t},\n\t\t\tmodifiers: []testsharder.TestModifier{\n\t\t\t\t// default modifier\n\t\t\t\t{\n\t\t\t\t\tName: \"*\",\n\t\t\t\t\tTotalRuns: -1,\n\t\t\t\t\tMaxAttempts: 2,\n\t\t\t\t},\n\t\t\t\t// multiplier\n\t\t\t\t{\n\t\t\t\t\tName: \"foo\",\n\t\t\t\t\tMaxAttempts: 1,\n\t\t\t\t},\n\t\t\t\t// change maxAttempts (but multiplier takes precedence)\n\t\t\t\t{\n\t\t\t\t\tName: \"foo\",\n\t\t\t\t\tTotalRuns: -1,\n\t\t\t\t\tMaxAttempts: 1,\n\t\t\t\t},\n\t\t\t\t// change maxAttempts, set affected\n\t\t\t\t{\n\t\t\t\t\tName: \"bar\",\n\t\t\t\t\tAffected: true,\n\t\t\t\t\tTotalRuns: -1,\n\t\t\t\t\tMaxAttempts: 1,\n\t\t\t\t},\n\t\t\t},\n\t\t\ttestList: []build.TestListEntry{\n\t\t\t\ttestListEntry(\"foo\", false),\n\t\t\t\ttestListEntry(\"bar\", true),\n\t\t\t\ttestListEntry(\"baz\", false),\n\t\t\t},\n\t\t\ttestDurations: []build.TestDuration{\n\t\t\t\t{\n\t\t\t\t\tName: \"*\",\n\t\t\t\t\tMedianDuration: time.Millisecond,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tgoldenBasename := strings.ReplaceAll(tc.name, \" \", \"_\") + \".golden.json\"\n\t\t\tgoldenFile := filepath.Join(*goldensDir, goldenBasename)\n\n\t\t\tif *updateGoldens {\n\t\t\t\ttc.flags.outputFile = goldenFile\n\t\t\t} else {\n\t\t\t\ttc.flags.outputFile = filepath.Join(t.TempDir(), goldenBasename)\n\t\t\t}\n\n\t\t\ttc.flags.buildDir = t.TempDir()\n\t\t\tif len(tc.modifiers) > 0 {\n\t\t\t\ttc.flags.modifiersPath = writeTempJSONFile(t, tc.modifiers)\n\t\t\t}\n\t\t\tif len(tc.affectedTests) > 0 {\n\t\t\t\t// Add a newline to the end of the file to test that it still calculates the\n\t\t\t\t// correct number of affected tests even with extra whitespace.\n\t\t\t\ttc.flags.affectedTestsPath = writeTempFile(t, strings.Join(tc.affectedTests, \"\\n\")+\"\\n\")\n\t\t\t}\n\t\t\tif tc.flags.ffxDeps {\n\t\t\t\tsdkManifest := map[string]interface{}{\n\t\t\t\t\t\"atoms\": []interface{}{},\n\t\t\t\t}\n\t\t\t\tsdkManifestPath := filepath.Join(tc.flags.buildDir, \"sdk\", \"manifest\", \"core\")\n\t\t\t\tif err := os.MkdirAll(filepath.Dir(sdkManifestPath), os.ModePerm); err != nil {\n\t\t\t\t\tt.Fatal(err)\n\t\t\t\t}\n\t\t\t\tif err := jsonutil.WriteToFile(sdkManifestPath, sdkManifest); err != nil {\n\t\t\t\t\tt.Fatal(err)\n\t\t\t\t}\n\t\t\t}\n\t\t\t// Write test-list.json.\n\t\t\tif err := jsonutil.WriteToFile(\n\t\t\t\tfilepath.Join(tc.flags.buildDir, testListPath),\n\t\t\t\tbuild.TestList{Data: tc.testList, SchemaID: \"experimental\"},\n\t\t\t); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\twriteDepFiles(t, tc.flags.buildDir, tc.testSpecs)\n\t\t\tfor _, repo := range tc.packageRepos {\n\t\t\t\tif err := os.MkdirAll(filepath.Join(tc.flags.buildDir, repo.Path), 0o700); err != nil {\n\t\t\t\t\tt.Fatal(err)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tm := &fakeModules{\n\t\t\t\ttestSpecs: tc.testSpecs,\n\t\t\t\ttestDurations: tc.testDurations,\n\t\t\t\tpackageRepositories: tc.packageRepos,\n\t\t\t}\n\t\t\tif err := execute(ctx, tc.flags, m); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\n\t\t\tif !*updateGoldens {\n\t\t\t\twant := readShards(t, goldenFile)\n\t\t\t\tgot := readShards(t, tc.flags.outputFile)\n\t\t\t\tif diff := cmp.Diff(want, got); diff != \"\" {\n\t\t\t\t\tt.Errorf(strings.Join([]string{\n\t\t\t\t\t\t\"Golden file mismatch!\",\n\t\t\t\t\t\t\"To fix, run `tools/integration/testsharder/update_goldens.sh\",\n\t\t\t\t\t\tdiff,\n\t\t\t\t\t}, \"\\n\"))\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}", "func (envManager *TestEnvManager) RunTest(m runnable) (ret int) {\n\tdefer envManager.TearDown()\n\tif err := envManager.StartUp(); err != nil {\n\t\tlog.Printf(\"Failed to setup framework: %s\", err)\n\t\tret = 1\n\t} else {\n\t\tlog.Printf(\"\\nStart testing ......\")\n\t\tret = m.Run()\n\t}\n\treturn ret\n}", "func (ldx *LxdAudit) Run(args []string) int {\n\t// load audit tests fro benchmark folder\n\tauditTests := ldx.FileLoader.LoadAuditTests(ldx.FilesInfo)\n\t// filter tests by cmd criteria\n\tft := filteredAuditBenchTests(auditTests, ldx.PredicateChain, ldx.PredicateParams)\n\t//execute audit tests and show it in progress bar\n\tcompletedTest := executeTests(ft, ldx.runAuditTest, ldx.log)\n\t// generate output data\n\tui.PrintOutput(completedTest, ldx.OutputGenerator, ldx.log)\n\t// send test results to plugin\n\tsendResultToPlugin(ldx.PlChan, ldx.CompletedChan, completedTest)\n\treturn 0\n}", "func Run(ctx context.Context, s *testing.State) {\n\t// Reserve time for cleanup\n\tcloseCtx := ctx\n\tctx, cancel := ctxutil.Shorten(ctx, 2*time.Second)\n\tdefer cancel()\n\n\t// Perform initial test setup\n\tsetupVars, err := runSetup(ctx, s)\n\tif err != nil {\n\t\ts.Fatal(\"Failed to run setup: \", err)\n\t}\n\tdefer setupVars.closeBrowser(closeCtx)\n\tdefer setupVars.recorder.Close(closeCtx)\n\n\tif err := muteDevice(ctx, s); err != nil {\n\t\ts.Log(\"(non-error) Failed to mute device: \", err)\n\t}\n\n\t// Execute Test\n\tif err := setupVars.recorder.Run(ctx, func(ctx context.Context) error {\n\t\treturn testBody(ctx, setupVars)\n\t}); err != nil {\n\t\ts.Fatal(\"Failed to conduct the test scenario, or collect the histogram data: \", err)\n\t}\n\n\t// Write out values\n\tpv := perf.NewValues()\n\tif err := setupVars.recorder.Record(ctx, pv); err != nil {\n\t\ts.Fatal(\"Failed to report: \", err)\n\t}\n\tif err := pv.Save(s.OutDir()); err != nil {\n\t\ts.Error(\"Failed to store values: \", err)\n\t}\n}", "func runTest(ctx context.Context, c autotest.Config, a *autotest.AutoservArgs, w io.Writer) (*Result, error) {\n\tr, err := runTask(ctx, c, a, w)\n\tif !r.Started {\n\t\treturn r, err\n\t}\n\tp := filepath.Join(a.ResultsDir, autoservPidFile)\n\tif i, err2 := readTestsFailed(p); err2 != nil {\n\t\tif err == nil {\n\t\t\terr = err2\n\t\t}\n\t} else {\n\t\tr.TestsFailed = i\n\t}\n\tif err2 := appendJobFinished(a.ResultsDir); err == nil {\n\t\terr = err2\n\t}\n\treturn r, err\n}", "func (st *buildStatus) runTests(helpers <-chan buildlet.Client) (remoteErr, err error) {\n\ttestNames, remoteErr, err := st.distTestList()\n\tif remoteErr != nil {\n\t\treturn fmt.Errorf(\"distTestList remote: %v\", remoteErr), nil\n\t}\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"distTestList exec: %v\", err)\n\t}\n\ttestStats := getTestStats(st)\n\n\tset, err := st.newTestSet(testStats, testNames)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tst.LogEventTime(\"starting_tests\", fmt.Sprintf(\"%d tests\", len(set.items)))\n\tstartTime := time.Now()\n\n\tworkDir, err := st.bc.WorkDir(st.ctx)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error discovering workdir for main buildlet, %s: %v\", st.bc.Name(), err)\n\t}\n\n\tmainBuildletGoroot := st.conf.FilePathJoin(workDir, \"go\")\n\tmainBuildletGopath := st.conf.FilePathJoin(workDir, \"gopath\")\n\n\t// We use our original buildlet to run the tests in order, to\n\t// make the streaming somewhat smooth and not incredibly\n\t// lumpy. The rest of the buildlets run the largest tests\n\t// first (critical path scheduling).\n\t// The buildletActivity WaitGroup is used to track when all\n\t// the buildlets are dead or done.\n\tvar buildletActivity sync.WaitGroup\n\tbuildletActivity.Add(2) // one per goroutine below (main + helper launcher goroutine)\n\tgo func() {\n\t\tdefer buildletActivity.Done() // for the per-goroutine Add(2) above\n\t\tfor !st.bc.IsBroken() {\n\t\t\ttis, ok := set.testsToRunInOrder()\n\t\t\tif !ok {\n\t\t\t\tselect {\n\t\t\t\tcase <-st.ctx.Done():\n\t\t\t\t\treturn\n\t\t\t\tcase <-time.After(5 * time.Second):\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tst.runTestsOnBuildlet(st.bc, tis, mainBuildletGoroot, mainBuildletGopath)\n\t\t}\n\t\tst.LogEventTime(\"main_buildlet_broken\", st.bc.Name())\n\t}()\n\tgo func() {\n\t\tdefer buildletActivity.Done() // for the per-goroutine Add(2) above\n\t\tfor helper := range helpers {\n\t\t\tbuildletActivity.Add(1)\n\t\t\tgo func(bc buildlet.Client) {\n\t\t\t\tdefer buildletActivity.Done() // for the per-helper Add(1) above\n\t\t\t\tdefer st.LogEventTime(\"closed_helper\", bc.Name())\n\t\t\t\tdefer bc.Close()\n\t\t\t\tif devPause {\n\t\t\t\t\tdefer time.Sleep(5 * time.Minute)\n\t\t\t\t\tdefer st.LogEventTime(\"DEV_HELPER_SLEEP\", bc.Name())\n\t\t\t\t}\n\t\t\t\tst.LogEventTime(\"got_empty_test_helper\", bc.String())\n\t\t\t\tif err := bc.PutTarFromURL(st.ctx, st.SnapshotURL(pool.NewGCEConfiguration().BuildEnv()), \"go\"); err != nil {\n\t\t\t\t\tlog.Printf(\"failed to extract snapshot for helper %s: %v\", bc.Name(), err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tworkDir, err := bc.WorkDir(st.ctx)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Printf(\"error discovering workdir for helper %s: %v\", bc.Name(), err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tst.LogEventTime(\"test_helper_set_up\", bc.Name())\n\t\t\t\tgoroot := st.conf.FilePathJoin(workDir, \"go\")\n\t\t\t\tgopath := st.conf.FilePathJoin(workDir, \"gopath\")\n\t\t\t\tfor !bc.IsBroken() {\n\t\t\t\t\ttis, ok := set.testsToRunBiggestFirst()\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\tst.LogEventTime(\"no_new_tests_remain\", bc.Name())\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tst.runTestsOnBuildlet(bc, tis, goroot, gopath)\n\t\t\t\t}\n\t\t\t\tst.LogEventTime(\"test_helper_is_broken\", bc.Name())\n\t\t\t}(helper)\n\t\t}\n\t}()\n\n\t// Convert a sync.WaitGroup into a channel.\n\t// Aside: https://groups.google.com/forum/#!topic/golang-dev/7fjGWuImu5k\n\tbuildletsGone := make(chan struct{})\n\tgo func() {\n\t\tbuildletActivity.Wait()\n\t\tclose(buildletsGone)\n\t}()\n\n\tvar lastMetadata string\n\tvar lastHeader string\n\tvar serialDuration time.Duration\n\tfor _, ti := range set.items {\n\tAwaitDone:\n\t\tfor {\n\t\t\ttimer := time.NewTimer(30 * time.Second)\n\t\t\tselect {\n\t\t\tcase <-ti.done: // wait for success\n\t\t\t\ttimer.Stop()\n\t\t\t\tbreak AwaitDone\n\t\t\tcase <-timer.C:\n\t\t\t\tst.LogEventTime(\"still_waiting_on_test\", ti.name.Old)\n\t\t\tcase <-buildletsGone:\n\t\t\t\tset.cancelAll()\n\t\t\t\treturn nil, errBuildletsGone\n\t\t\t}\n\t\t}\n\n\t\tserialDuration += ti.execDuration\n\t\tif len(ti.output) > 0 {\n\t\t\tmetadata, header, out := parseOutputAndHeader(ti.output)\n\t\t\tprintHeader := false\n\t\t\tif metadata != lastMetadata {\n\t\t\t\tlastMetadata = metadata\n\t\t\t\tfmt.Fprintf(st, \"\\n%s\\n\", metadata)\n\t\t\t\t// Always include the test header after\n\t\t\t\t// metadata changes. This is a readability\n\t\t\t\t// optimization that ensures that tests are\n\t\t\t\t// always immediately preceded by their test\n\t\t\t\t// banner, even if it is duplicate banner\n\t\t\t\t// because the test metadata changed.\n\t\t\t\tprintHeader = true\n\t\t\t}\n\t\t\tif header != lastHeader {\n\t\t\t\tlastHeader = header\n\t\t\t\tprintHeader = true\n\t\t\t}\n\t\t\tif printHeader {\n\t\t\t\tfmt.Fprintf(st, \"\\n%s\\n\", header)\n\t\t\t}\n\t\t\tif pool.NewGCEConfiguration().InStaging() {\n\t\t\t\tout = bytes.TrimSuffix(out, nl)\n\t\t\t\tst.Write(out)\n\t\t\t\tfmt.Fprintf(st, \" (shard %s; par=%d)\\n\", ti.shardIPPort, ti.groupSize)\n\t\t\t} else {\n\t\t\t\tst.Write(out)\n\t\t\t}\n\t\t}\n\n\t\tif ti.remoteErr != nil {\n\t\t\tset.cancelAll()\n\t\t\treturn fmt.Errorf(\"dist test failed: %s: %v\", ti.name, ti.remoteErr), nil\n\t\t}\n\t}\n\telapsed := time.Since(startTime)\n\tvar msg string\n\tif st.conf.NumTestHelpers(st.isTry()) > 0 {\n\t\tmsg = fmt.Sprintf(\"took %v; aggregate %v; saved %v\", elapsed, serialDuration, serialDuration-elapsed)\n\t} else {\n\t\tmsg = fmt.Sprintf(\"took %v\", elapsed)\n\t}\n\tst.LogEventTime(\"tests_complete\", msg)\n\tfmt.Fprintf(st, \"\\nAll tests passed.\\n\")\n\treturn nil, nil\n}", "func (t *Test) Run() error {\n\tfor _, cmd := range t.cmds {\n\t\t// TODO(fabxc): aggregate command errors, yield diffs for result\n\t\t// comparison errors.\n\t\tif err := t.exec(cmd); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (mtc MetricsTestCases) Test(t *testing.T) {\n\tfor name, tc := range mtc {\n\t\tt.Run(name, func(tt *testing.T) {\n\t\t\tproblems, err := tc.Test()\n\t\t\tif err != nil {\n\t\t\t\ttt.Error(err)\n\t\t\t}\n\t\t\tfor _, problem := range problems {\n\t\t\t\tlog.Printf(\"non-standard metric '%s': %s\", problem.Metric, problem.Text)\n\t\t\t}\n\t\t})\n\t}\n}", "func Test(ctx context.Context) {\n\tmg.CtxDeps(ctx, Tests.Run)\n}", "func (t *SelfTester) RunSelfTest() ([]string, []string, map[string]*serializers.EventSerializer, error) {\n\tif err := t.BeginWaitingForEvent(); err != nil {\n\t\treturn nil, nil, nil, fmt.Errorf(\"failed to run self test: %w\", err)\n\t}\n\tdefer t.EndWaitingForEvent()\n\n\tt.lastTimestamp = time.Now()\n\n\t// launch the self tests\n\tvar success []string\n\tvar fails []string\n\ttestEvents := make(map[string]*serializers.EventSerializer)\n\n\tfor _, selftest := range FileSelfTests {\n\t\tdef := selftest.GetRuleDefinition(t.targetFilePath)\n\n\t\tpredicate, err := selftest.GenerateEvent(t.targetFilePath)\n\t\tif err != nil {\n\t\t\tfails = append(fails, def.ID)\n\t\t\tlog.Errorf(\"Self test failed: %s\", def.ID)\n\t\t\tcontinue\n\t\t}\n\t\tevent, err2 := t.expectEvent(predicate)\n\t\ttestEvents[def.ID] = event\n\t\tif err2 != nil {\n\t\t\tfails = append(fails, def.ID)\n\t\t\tlog.Errorf(\"Self test failed: %s\", def.ID)\n\t\t} else {\n\t\t\tsuccess = append(success, def.ID)\n\t\t}\n\t}\n\n\t// save the results for get status command\n\tt.success = success\n\tt.fails = fails\n\n\treturn success, fails, testEvents, nil\n}", "func (cs *Suite) RunAndAttest(ctx context.Context, metadataClient MetadataClient, metricsClient metrics.Client, imageData ImageData) []CheckResult {\n\tresults := cs.Run(ctx, metricsClient, imageData)\n\treturn cs.Attest(ctx, metricsClient, metadataClient, results)\n}", "func RunTests(t *testing.T, svctest ServiceTest) {\n\tt.Run(\"NewSite\", func(t *testing.T) { clearEnvWrapper(t, svctest.TestNewSite) })\n\tt.Run(\"DeleteSite\", func(t *testing.T) { clearEnvWrapper(t, svctest.TestDeleteSite) })\n\tt.Run(\"WritePost\", func(t *testing.T) { clearEnvWrapper(t, svctest.TestWritePost) })\n\tt.Run(\"RemovePost\", func(t *testing.T) { clearEnvWrapper(t, svctest.TestRemovePost) })\n\tt.Run(\"ReadPost\", func(t *testing.T) { clearEnvWrapper(t, svctest.TestReadPost) })\n\tt.Run(\"WriteConfig\", func(t *testing.T) { clearEnvWrapper(t, svctest.TestWriteConfig) })\n\tt.Run(\"ReadConfig\", func(t *testing.T) { clearEnvWrapper(t, svctest.TestReadConfig) })\n\tt.Run(\"UpdateAbout\", func(t *testing.T) { clearEnvWrapper(t, svctest.TestUpdateAbout) })\n\tt.Run(\"ReadAbout\", func(t *testing.T) { clearEnvWrapper(t, svctest.TestReadAbout) })\n\tt.Run(\"ChangeDefaultConfig\", func(t *testing.T) { clearEnvWrapper(t, svctest.TestChangeDefaultConfig) })\n}", "func RunTest(ctx context.Context, target, location string, nodeIDs []int, limit int, debug, outputJSON bool, runTest runFunc, runOutput runOutputFunc) error {\n\trunReq := &perfops.RunRequest{\n\t\tTarget: target,\n\t\tLocation: location,\n\t\tNodes: nodeIDs,\n\t\tLimit: limit,\n\t}\n\n\tf := NewFormatter(debug && !outputJSON)\n\tf.StartSpinner()\n\ttestID, err := runTest(ctx, runReq)\n\tf.StopSpinner()\n\tif err != nil {\n\t\treturn err\n\t}\n\tres := &RunOutputResult{}\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-time.After(200 * time.Millisecond):\n\t\t\t}\n\t\t\toutput, err := runOutput(ctx, testID)\n\t\t\tres.SetOutput(output, err)\n\t\t\tif err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}()\n\n\tif outputJSON {\n\t\tf.StartSpinner()\n\t}\n\tvar o *perfops.RunOutput\n\tfor {\n\t\tselect {\n\t\tcase <-time.After(50 * time.Millisecond):\n\t\t}\n\t\tif o, err = res.Output(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !outputJSON && o != nil {\n\t\t\tPrintOutput(f, o)\n\t\t}\n\t\tif o != nil && o.IsFinished() {\n\t\t\tbreak\n\t\t}\n\t}\n\tif outputJSON {\n\t\tf.StopSpinner()\n\t\tPrintOutputJSON(o)\n\t}\n\treturn nil\n}", "func (t *Test) Run() error {\n\treturn t.Wrap(t.run)\n}", "func runTestScenariosWithInputAndProcessedOutput(t *testing.T, tests []testCase, input *os.File, processOutput processOutput) {\n\tfor _, tc := range tests {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tcmd := exec.Command(binaryPath, tc.args...)\n\n\t\t\tif input != nil {\n\t\t\t\tcmd.Stdin = input\n\t\t\t}\n\n\t\t\toutput, err := cmd.CombinedOutput()\n\t\t\tif (err != nil) != tc.wantErr {\n\t\t\t\tt.Fatalf(\"%s\\nexpected (err != nil) to be %v, but got %v. err: %v\", output, tc.wantErr, err != nil, err)\n\t\t\t}\n\t\t\tactual := string(output)\n\n\t\t\tif processOutput != nil {\n\t\t\t\tactual = processOutput(actual)\n\t\t\t}\n\n\t\t\tgolden := newGoldenFile(t, tc.golden)\n\n\t\t\tif *update {\n\t\t\t\tgolden.write(actual)\n\t\t\t}\n\t\t\texpected := golden.load()\n\t\t\tif !reflect.DeepEqual(expected, actual) {\n\t\t\t\tt.Fatalf(\"Expected: %v Actual: %v\", expected, actual)\n\t\t\t}\n\t\t})\n\t}\n}", "func RunTestSuite(t *testing.T, s storage.Storage) {\n\tt.Run(\"UpdateAuthRequest\", func(t *testing.T) { testUpdateAuthRequest(t, s) })\n\tt.Run(\"CreateRefresh\", func(t *testing.T) { testCreateRefresh(t, s) })\n}", "func runTest(waf *engine.Waf, profile testProfile) (bool, int, error){\n\tpassed := 0\n\tfor _, test := range profile.Tests{\n\t\ttn := time.Now().UnixNano()\n\t\tpass := true\n\t\tfor _, stage := range test.Stages{\n\t\t\ttx := waf.NewTransaction()\n\t\t\tif stage.Stage.Input.EncodedRequest != \"\"{\n\t\t\t\tsDec, _ := b64.StdEncoding.DecodeString(stage.Stage.Input.EncodedRequest)\n\t\t\t\tstage.Stage.Input.RawRequest = string(sDec)\n\t\t\t}\n\t\t\tif stage.Stage.Input.RawRequest != \"\"{\n\t\t\t\treq, err := requestFromString(stage.Stage.Input.RawRequest)\n\t\t\t\tif err != nil{\n\t\t\t\t\tfmt.Println(\"Error parsing HTTP request:\")\n\t\t\t\t\tfmt.Println(err)\n\t\t\t\t\treturn false, 0, err\n\t\t\t\t}\n\t\t\t\trequestToTx(req, tx)\n\t\t\t}\n\t\t\t//Apply tx data\n\t\t\tif len(stage.Stage.Input.Headers) > 0{\n\t\t\t\tfor k, v := range stage.Stage.Input.Headers{\n\t\t\t\t\ttx.AddRequestHeader(k, v)\n\t\t\t\t}\n\t\t\t}\t\t\t\n\t\t\tmethod := \"GET\"\n\t\t\tif stage.Stage.Input.Method != \"\"{\n\t\t\t\tmethod = stage.Stage.Input.Method\n\t\t\t\ttx.SetRequestMethod(method)\n\t\t\t}\n\n\t\t\t//Request Line\n\t\t\thttpv := \"HTTP/1.1\"\n\t\t\tif stage.Stage.Input.Version != \"\"{\n\t\t\t\thttpv = stage.Stage.Input.Version\n\t\t\t}\n\n\t\t\tpath := \"/\"\n\t\t\tif stage.Stage.Input.Uri != \"\"{\n\t\t\t\tu, err := url.Parse(stage.Stage.Input.Uri)\n\t\t\t\tif err != nil {\n\t\t\t\t\tif debug{\n\t\t\t\t\t\tfmt.Println(\"Invalid URL: \" + stage.Stage.Input.Uri)\n\t\t\t\t\t\tfmt.Println(err)\n\t\t\t\t\t}\n\t\t\t\t}else{\n\t\t\t\t\ttx.SetUrl(u)\n\t\t\t\t\ttx.AddGetArgsFromUrl(u)\n\t\t\t\t\tpath = stage.Stage.Input.Uri//or unescaped?\t\n\t\t\t\t}\n\t\t\t\t\n\t\t\t}\n\t\t\ttx.SetRequestLine(method, httpv, path)\n\n\t\t\t//PHASE 1\n\t\t\ttx.ExecutePhase(1)\n\n\t\t\t// POST DATA\n\t\t\tif stage.Stage.Input.Data != \"\"{\n\t\t\t\tdata := \"\"\n\t\t\t\tv := reflect.ValueOf(stage.Stage.Input.Data)\n\t\t\t\tswitch v.Kind() {\n\t\t\t\tcase reflect.Slice:\n\t\t\t for i := 0; i < v.Len(); i++ {\n\t\t\t data += fmt.Sprintf(\"%s\\r\\n\", v.Index(i))\n\t\t\t }\n\t\t\t data += \"\\r\\n\"\n\t\t\t\tcase reflect.String:\n\t\t\t\t\tdata = stage.Stage.Input.Data.(string)\n\t\t\t\t}\n\t\t\t\trh := tx.GetCollection(\"request_headers\")\n\t\t\t\tct := rh.GetSimple(\"content-type\")\n\t\t\t\tctt := \"\"\n\t\t\t\tif len(ct) == 1{\n\t\t\t\t\tctt = ct[0]\n\t\t\t\t}\n\t\t\t\tmediaType, params, _ := mime.ParseMediaType(ctt)\n\t\t\t\tif method == \"GET\" || method == \"HEAD\" || method == \"OPTIONS\" {\n\t\t\t\t\tlength := strconv.Itoa(len(data))\n\t\t\t\t\tif len(rh.GetSimple(\"content-length\")) == 0{\n\t\t\t\t\t\trh.Set(\"content-length\", []string{length})\n\t\t\t\t\t}\n\t\t\t\t\t// Just for testing\n\t\t\t\t\ttx.GetCollection(\"request_body\").Set(\"\", []string{data})\n\t\t\t\t}else if strings.HasPrefix(mediaType, \"multipart/\") {\n\t\t\t\t\tparseMultipart(data, params[\"boundary\"], tx)\n\t\t\t\t}else {\n\t\t\t\t\ttx.SetRequestBody(data, int64(len(data)), mediaType)\n\t\t\t\t\tu, err := url.ParseQuery(data)\n\t\t\t\t\tif err == nil{\n\t\t\t\t\t\ttx.SetArgsPost(u)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfor i := 2; i <= 5; i++{\n\t\t\t\ttx.ExecutePhase(i)\n\t\t\t}\n\t\t\tlog := \"\"\n\t\t\tfor _, mr := range tx.MatchedRules{\n\t\t\t\tlog += fmt.Sprintf(\" [id \\\"%d\\\"]\", mr.Id)\n\t\t\t}\n\t\t\t//now we evaluate tests\n\t\t\tif stage.Stage.Output.LogContains != \"\"{\n\t\t\t\tif !strings.Contains(log, stage.Stage.Output.LogContains){\n\t\t\t\t\tpass = false\n\t\t\t\t}\n\t\t\t}\n\t\t\tif stage.Stage.Output.NoLogContains != \"\"{\n\t\t\t\tif strings.Contains(log, stage.Stage.Output.NoLogContains){\n\t\t\t\t\tpass = false\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tresult := \"\\033[31mFailed\"\n\t\tif pass{\n\t\t\tresult = \"\\033[32mPassed\"\n\t\t\tpassed++\n\t\t\tif failonly{\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tfmt.Printf(\"%s: %s\\033[0m (%dus)\\n\", test.Title, result, time.Now().UnixNano()-tn)\n\t}\n\treturn len(profile.Tests) == passed, 0, nil\n}", "func (m *LoadManager) RunSuite() {\n\tm.HandleShutdownSignal()\n\n\tt := timeNow()\n\tstartTime := epochNowMillis(t)\n\thrStartTime := timeHumanReadable(t)\n\n\tfor _, step := range m.Steps {\n\t\tlog.Infof(\"running step: %s, execution mode: %s\", step.Name, step.ExecutionMode)\n\t\tswitch step.ExecutionMode {\n\t\tcase ParallelMode:\n\t\t\tvar wg sync.WaitGroup\n\t\t\twg.Add(len(step.Runners))\n\n\t\t\tfor _, r := range step.Runners {\n\t\t\t\tr.SetupHandleStore(m)\n\t\t\t\tgo r.Run(&wg, m)\n\t\t\t}\n\t\t\twg.Wait()\n\t\tcase SequenceMode:\n\t\t\tfor _, r := range step.Runners {\n\t\t\t\tr.SetupHandleStore(m)\n\t\t\t\tr.Run(nil, m)\n\t\t\t}\n\t\tcase SequenceValidateMode:\n\t\t\tfor _, r := range step.Runners {\n\t\t\t\tr.SetupHandleStore(m)\n\t\t\t\tr.Run(nil, m)\n\t\t\t\tr.SetValidationParams()\n\t\t\t\tr.Run(nil, m)\n\t\t\t}\n\t\tdefault:\n\t\t\tlog.Fatal(\"please set execution_mode, parallel, sequence or sequence_validate\")\n\t\t}\n\t}\n\tif m.GeneratorConfig.Grafana.URL != \"\" {\n\t\tt = timeNow()\n\t\tfinishTime := epochNowMillis(t)\n\t\thrFinishTime := timeHumanReadable(t)\n\n\t\tTimerangeUrl(startTime, finishTime)\n\t\tHumanReadableTestInterval(hrStartTime, hrFinishTime)\n\t}\n\tm.Shutdown()\n}", "func Run(\n\tdao DAO,\n\tstatements, setupStatements, teardownStatements, solutions []Statement,\n\tselectedQuestions []string,\n) ([]TestResult, error) {\n\tvar testResult = []TestResult{}\n\n\ti := 0\n\tresults, errs, err := dao.ExecuteStatements(setupStatements, teardownStatements, statements)\n\tsolutionResults, _, err := dao.ExecuteStatements(setupStatements, teardownStatements, solutions)\n\ttestcases := ConvertTablesToTestCases(solutionResults)\n\n\tif err != nil {\n\t\treturn testResult, err\n\t}\n\n\tfor _, expected := range testcases {\n\t\tif !stringInSlice(expected.Index, selectedQuestions) && len(selectedQuestions) > 0 {\n\t\t\tcontinue\n\t\t}\n\t\tif i >= len(results) {\n\t\t\ttestResult = append(\n\t\t\t\ttestResult,\n\t\t\t\tTestResult{\n\t\t\t\t\tExpected: expected,\n\t\t\t\t\tActual: Result{},\n\t\t\t\t\tPass: false,\n\t\t\t\t},\n\t\t\t)\n\t\t\ti++\n\t\t\tcontinue\n\t\t}\n\t\ttable := results[i]\n\t\terr := errs[i]\n\t\t// Query has syntax error\n\t\tif err != nil {\n\t\t\ttestResult = append(\n\t\t\t\ttestResult,\n\t\t\t\tTestResult{\n\t\t\t\t\tExpected: expected,\n\t\t\t\t\tActual: table,\n\t\t\t\t\tPass: false,\n\t\t\t\t\tError: err,\n\t\t\t\t},\n\t\t\t)\n\t\t\ti++\n\t\t\tcontinue\n\t\t}\n\t\tif !reflect.DeepEqual(table.Content, expected.Content) {\n\t\t\ttestResult = append(\n\t\t\t\ttestResult,\n\t\t\t\tTestResult{\n\t\t\t\t\tExpected: expected,\n\t\t\t\t\tActual: table,\n\t\t\t\t\tPass: false,\n\t\t\t\t},\n\t\t\t)\n\t\t} else {\n\t\t\ttestResult = append(\n\t\t\t\ttestResult,\n\t\t\t\tTestResult{\n\t\t\t\t\tExpected: expected,\n\t\t\t\t\tActual: table,\n\t\t\t\t\tPass: true,\n\t\t\t\t},\n\t\t\t)\n\t\t}\n\t\ti++\n\t}\n\n\treturn testResult, nil\n}", "func testRun(t *testing.T, nfail, nnode, nclients, nthreads, naccesses int) {\n\n\tdesc := fmt.Sprintf(\"F=%v,N=%v,Clients=%v,Threads=%v,Accesses=%v\",\n\t\tnfail, nnode, nclients, nthreads, naccesses)\n\tt.Run(desc, func(t *testing.T) {\n\n\t\t// Create a cancelable context for the test run\n\t\tctx, cancel := context.WithCancel(context.Background())\n\n\t\t// Create an in-memory CAS register representing each node\n\t\tmembers := make([]cas.Store, nnode)\n\t\tmemhist := make([]test.History, nnode)\n\t\tfor i := range members {\n\t\t\tmembers[i] = &cas.Register{}\n\t\t}\n\n\t\t// Create a consensus group Store for each simulated client\n\t\tclients := make([]cas.Store, nclients)\n\t\tfor i := range clients {\n\n\t\t\t// Interpose checking wrappers on the CAS registers\n\t\t\tcheckers := make([]cas.Store, nnode)\n\t\t\tfor i := range checkers {\n\t\t\t\tcheckers[i] = test.Checked(t, &memhist[i],\n\t\t\t\t\tmembers[i])\n\t\t\t}\n\n\t\t\tclients[i] = (&Group{}).Start(ctx, checkers, nfail)\n\t\t}\n\n\t\t// Run a standard torture-test across all the clients\n\t\ttest.Stores(t, nthreads, naccesses, clients...)\n\n\t\t// Shut down all the clients by canceling the context\n\t\tcancel()\n\t})\n}", "func Test(t *testing.T, p prog.Program, cases ...Case) {\n\tt.Helper()\n\tfor _, c := range cases {\n\t\tt.Run(strings.Join(c.args, \" \"), func(t *testing.T) {\n\t\t\tt.Helper()\n\t\t\tr := run(p, c.args, c.stdin)\n\t\t\tif r.exitCode != c.want.exitCode {\n\t\t\t\tt.Errorf(\"got exit code %v, want %v\", r.exitCode, c.want.exitCode)\n\t\t\t}\n\t\t\tif !matchOutput(r.stdout, c.want.stdout) {\n\t\t\t\tt.Errorf(\"got stdout %v, want %v\", r.stdout, c.want.stdout)\n\t\t\t}\n\t\t\tif !matchOutput(r.stderr, c.want.stderr) {\n\t\t\t\tt.Errorf(\"got stderr %v, want %v\", r.stderr, c.want.stderr)\n\t\t\t}\n\t\t})\n\t}\n}", "func (o *Options) Run() error {\n\terr := o.Validate()\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"failed to validate\")\n\t}\n\n\ttest := &v1alpha1.TestRun{}\n\terr = o.PopulateTest(test)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"failed to populate the TestRun resource\")\n\t}\n\n\to.TestRun, err = o.TestClient.JxtestV1alpha1().TestRuns(o.Namespace).Create(test)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"failed to create the TestRun CRD\")\n\t}\n\treturn nil\n}", "func (t testCommand) Run() error {\n\tif t.shouldFail {\n\t\treturn errors.New(\"I AM ERROR\")\n\t}\n\treturn nil\n}", "func runScenarioTest(assert *asserts.Asserts, param scenarioParam) {\n\tapplog.Infof(param.String())\n\tmonitoring.Reset()\n\n\t// Prepare test.\n\tdone := make(chan bool)\n\tprovider := config.NewMapConfigurationProvider()\n\tconfig := config.New(provider)\n\n\tconfig.Set(\"backend\", \"single\")\n\n\tassert.Nil(ebus.Init(config), \"single node backend started\")\n\n\tStartShopAgent()\n\tStartWarehouseAgent()\n\tStartManufacturerAgent()\n\tStartDeliveryAgent()\n\tStartWaitAgent(done)\n\n\t// Run orders.\n\tfor on := 0; on < param.Orders; on++ {\n\t\torder := generateOrder(on)\n\t\terr := ebus.Emit(order, \"OrderReceived\")\n\t\tassert.Nil(err, \"order emitted\")\n\t}\n\n\tselect {\n\tcase <-done:\n\t\tapplog.Infof(\"order processing done\")\n\tcase <-time.After(param.Timeout):\n\t\tassert.Fail(\"timeout during wait for processed orders\")\n\t}\n\n\t// Finalize test.\n\terr := ebus.Stop()\n\tassert.Nil(err, \"stopped the bus\")\n\ttime.Sleep(time.Second)\n\tmonitoring.MeasuringPointsPrintAll()\n}", "func runTest(m *testing.M) int {\n\t// In order to get a Mongo session we need the name of the database we\n\t// are using. The web framework middleware is using this by convention.\n\tdbName, err := cfg.String(\"MONGO_DB\")\n\tif err != nil {\n\t\tfmt.Println(\"MongoDB is not configured\")\n\t\treturn 1\n\t}\n\n\tdb, err := db.NewMGO(\"context\", dbName)\n\tif err != nil {\n\t\tfmt.Println(\"Unable to get Mongo session\")\n\t\treturn 1\n\t}\n\n\tdefer db.CloseMGO(\"context\")\n\n\ttstdata.Generate(db)\n\tdefer tstdata.Drop(db)\n\n\tloadQuery(db, \"basic.json\")\n\tloadQuery(db, \"basic_var.json\")\n\tdefer qfix.Remove(db, \"QTEST_O\")\n\n\tloadScript(db, \"basic_script_pre.json\")\n\tloadScript(db, \"basic_script_pst.json\")\n\tdefer sfix.Remove(db, \"STEST_O\")\n\n\tloadMasks(db, \"basic.json\")\n\tdefer mfix.Remove(db, \"test_xenia_data\")\n\n\treturn m.Run()\n}", "func (c *TestClient) Run() {\n\tc.MetricsServer = metrics.StartMetricsServer(fmt.Sprintf(\":%d\", c.HostConfig.MetricsPort))\n\tmetrics.RegisterMetrics(metrics.PodIPAddressAssignedLatency, metrics.PodCreationReachabilityLatency)\n\tdefer utils.ShutDownMetricsServer(context.TODO(), c.MetricsServer)\n\n\tif err := c.measurePodCreation(); err != nil {\n\t\tklog.Errorf(\"Pod creation test failed, error: %v\", err)\n\t}\n\t// Prevent application from terminating and pod from restarting.\n\t// The test will run only once when the application is deployed.\n\t// The pod needs to be recreated to rerun the test.\n\tutils.EnterIdleState(c.MainStopChan)\n}", "func main() {\n\tcwd, err := os.Getwd()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\trootPath := path.Join(cwd, \"test262-parser-tests\")\n\tpassDone := make(chan bool)\n\tpassesDone := 0\n\n\tgo runFiles(path.Join(rootPath, \"pass\"), passDone, func(file string) bool {\n\t\tif shouldSkipPass(file) {\n\t\t\treturn true\n\t\t}\n\n\t\treturn assertFile(file, false)\n\t})\n\n\tgo runFiles(path.Join(rootPath, \"fail\"), passDone, func(file string) bool {\n\t\t//if shouldSkipPass(file) {\n\t\t//\treturn\n\t\t//}\n\n\t\treturn assertFile(file, true)\n\t})\n\n\tselect {\n\tcase <-passDone:\n\t\tpassesDone++\n\n\t\tif passesDone == 2 {\n\t\t\treturn\n\t\t}\n\t}\n}" ]
[ "0.7847949", "0.7178379", "0.66959757", "0.6382603", "0.63654983", "0.63125676", "0.62624186", "0.6155679", "0.6062077", "0.6012515", "0.59727854", "0.59608316", "0.59560037", "0.594106", "0.59397423", "0.5924008", "0.59166163", "0.5908898", "0.5906732", "0.5884082", "0.5835022", "0.5819863", "0.58040196", "0.5787261", "0.5767522", "0.5757496", "0.5756991", "0.5746309", "0.5739249", "0.5728429", "0.5727468", "0.57243484", "0.57229275", "0.57169163", "0.5701745", "0.56802756", "0.5664796", "0.5648111", "0.5634137", "0.562908", "0.5612909", "0.5598511", "0.5594199", "0.5593294", "0.5587887", "0.55741096", "0.5570326", "0.55667484", "0.5565281", "0.55494136", "0.55478054", "0.5543974", "0.5538321", "0.55252147", "0.5513565", "0.5501934", "0.5501336", "0.54974043", "0.54780364", "0.54718304", "0.545059", "0.54408044", "0.54352015", "0.54334074", "0.5430396", "0.5430304", "0.5425689", "0.54015094", "0.5392096", "0.53894526", "0.5381382", "0.5377796", "0.53657556", "0.5365078", "0.53637135", "0.53583115", "0.5356249", "0.53541523", "0.535263", "0.5336745", "0.53254765", "0.5308957", "0.52978516", "0.5293046", "0.528845", "0.52854896", "0.5285354", "0.5269669", "0.5267721", "0.52651864", "0.5264345", "0.52506506", "0.5245132", "0.52422416", "0.5231052", "0.52037764", "0.5197214", "0.5194757", "0.5194736", "0.5188712" ]
0.79646385
0
selectTests applies an optionally passed selector expression against the configured set of tests, returning the selected tests
func (o Scorecard) selectTests() []Test { selected := make([]Test, 0) for _, test := range o.Config.Tests { if o.Selector.String() == "" || o.Selector.Matches(labels.Set(test.Labels)) { // TODO olm manifests check selected = append(selected, test) } } return selected }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func selectTests(selector labels.Selector, tests []Test) []Test {\n\n\tselected := make([]Test, 0)\n\n\tfor _, test := range tests {\n\t\tif selector.String() == \"\" || selector.Matches(labels.Set(test.Labels)) {\n\t\t\t// TODO olm manifests check\n\t\t\tselected = append(selected, test)\n\t\t}\n\t}\n\treturn selected\n}", "func (o *Scorecard) selectTests(stage v1alpha3.StageConfiguration) []v1alpha3.TestConfiguration {\n\tselected := make([]v1alpha3.TestConfiguration, 0)\n\tfor _, test := range stage.Tests {\n\t\tif o.Selector == nil || o.Selector.String() == \"\" || o.Selector.Matches(labels.Set(test.Labels)) {\n\t\t\t// TODO olm manifests check\n\t\t\tselected = append(selected, test)\n\t\t}\n\t}\n\treturn selected\n}", "func SupportSelectors(flagSet *pflag.FlagSet, p *[]string) {\n\tflagSet.StringArrayVarP(p, \"selector\", \"l\", []string{}, \"filter results by a set of comma-separated label selectors\")\n}", "func (tse *taskSelectorEvaluator) evalSelector(s Selector) ([]string, error) {\n\t// keep a slice of results per criterion\n\tresults := []string{}\n\tif len(s) == 0 {\n\t\treturn nil, fmt.Errorf(\"cannot evaluate selector with no criteria\")\n\t}\n\tfor i, sc := range s {\n\t\ttaskNames, err := tse.evalCriterion(sc)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"error evaluating '%v' selector: %v\", s, err)\n\t\t}\n\t\tif i == 0 {\n\t\t\tresults = taskNames\n\t\t} else {\n\t\t\t// intersect all evaluated criteria\n\t\t\tresults = util.StringSliceIntersection(results, taskNames)\n\t\t}\n\t}\n\tif len(results) == 0 {\n\t\treturn nil, fmt.Errorf(\"no tasks satisfy selector '%v'\", s)\n\t}\n\treturn results, nil\n}", "func (m mockTenantQuerier) Select(_ bool, sp *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet {\n\tlog, _ := spanlogger.New(m.ctx, \"mockTenantQuerier.select\")\n\tdefer log.Span.Finish()\n\tvar matrix model.Matrix\n\n\tfor _, s := range m.matrix() {\n\t\tif metricMatches(s.Metric, matchers) {\n\t\t\tmatrix = append(matrix, s)\n\t\t}\n\t}\n\n\treturn &mockSeriesSet{\n\t\tupstream: series.MatrixToSeriesSet(false, matrix),\n\t\twarnings: m.warnings,\n\t\tqueryErr: m.queryErr,\n\t}\n}", "func RunTests(matchString func(pat, str string) (bool, error), tests []InternalTest) (ok bool) {}", "func (conn *Conn) SelectTestResults() []Metrics {\n\tdb := conn.db\n\tsqlStr := \"SELECT result_type, x_axis, y_axis FROM test_results\"\n\trows, err := db.Query(sqlStr)\n\tif err != nil {\n\t\tlog.Printf(\"Error query: %v\\n\", err)\n\t}\n\n\tvar resultType string\n\tvar x float64\n\tvar y float64\n\tresult := make([]Metrics, 0)\n\n\tdefer rows.Close()\n\tfor rows.Next() {\n\t\terr = rows.Scan(&resultType, &x, &y)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Error scanning row: %v\\n\", err)\n\t\t\treturn nil\n\t\t}\n\t\tresult = append(result, Metrics{resultType, x, y})\n\t}\n\treturn result\n}", "func Select(ctx context.Context, args ...core.Value) (core.Value, error) {\n\terr := core.ValidateArgs(args, 2, 4)\n\n\tif err != nil {\n\t\treturn values.None, err\n\t}\n\n\targ1 := args[0]\n\terr = core.ValidateType(arg1, drivers.HTMLPageType, drivers.HTMLDocumentType, drivers.HTMLElementType)\n\n\tif err != nil {\n\t\treturn values.False, err\n\t}\n\n\tif arg1.Type() == drivers.HTMLPageType || arg1.Type() == drivers.HTMLDocumentType {\n\t\tdoc, err := drivers.ToDocument(arg1)\n\n\t\tif err != nil {\n\t\t\treturn values.None, err\n\t\t}\n\n\t\t// selector\n\t\targ2 := args[1]\n\t\terr = core.ValidateType(arg2, types.String)\n\n\t\tif err != nil {\n\t\t\treturn values.False, err\n\t\t}\n\n\t\targ3 := args[2]\n\t\terr = core.ValidateType(arg3, types.Array)\n\n\t\tif err != nil {\n\t\t\treturn values.False, err\n\t\t}\n\n\t\treturn doc.SelectBySelector(ctx, arg2.(values.String), arg3.(*values.Array))\n\t}\n\n\tel := arg1.(drivers.HTMLElement)\n\targ2 := args[1]\n\n\terr = core.ValidateType(arg2, types.Array)\n\n\tif err != nil {\n\t\treturn values.False, err\n\t}\n\n\treturn el.Select(ctx, arg2.(*values.Array))\n}", "func filterTests(tests []string, patterns []string) ([]string, error) {\n\tr := []string{}\n\tfor _, test := range tests {\n\t\tif matches, err := kola.MatchesPatterns(test, patterns); err != nil {\n\t\t\treturn nil, err\n\t\t} else if matches {\n\t\t\tr = append(r, test)\n\t\t}\n\t}\n\treturn r, nil\n}", "func (r *Render) selectFiles(withRE, withoutRE *regexp.Regexp) []*File {\n\tselected := []*File{}\n\tfor _, f := range r.files {\n\t\tif withRE == nil && withoutRE == nil {\n\t\t\tselected = append(selected, f)\n\t\t\tcontinue\n\t\t}\n\n\t\tname := f.Name()\n\t\tif withRE != nil && !withRE.MatchString(name) {\n\t\t\tr.logger.Debugf(\"Skipping file '%s' by not matching with clause ('%s')\",\n\t\t\t\tname, withRE.String())\n\t\t\tcontinue\n\t\t} else if withoutRE != nil && withoutRE.MatchString(name) {\n\t\t\tr.logger.Debugf(\"Skipping file '%s' by matching without clause ('%s')\",\n\t\t\t\tname, withoutRE.String())\n\t\t\tcontinue\n\t\t}\n\t\tselected = append(selected, f)\n\t}\n\treturn selected\n}", "func (e *Evaluator) Select(expr string) ([]ast.Node, error) {\n\tn := e.n.Copy()\n\t_expr, err := xpath.Compile(expr)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"expr cannot compile: %w\", err)\n\t}\n\n\treturn nodes(_expr.Select(n)), nil\n}", "func filterBySelector(ns Nodes, selector string, negate bool) Nodes {\n\tsel := parseSelector(selector)\n\tif len(sel) != 1 {\n\t\treturn Nodes{}\n\t}\n\treturn filterByFunc(ns,\n\t\tfunc(index int, e *Node) bool {\n\t\t\treturn satisfiesSel(e, sel[0])\n\t\t}, negate)\n}", "func (m *MockUI) Select(arg0 string, arg1 []string) (int, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Select\", arg0, arg1)\n\tret0, _ := ret[0].(int)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func Run(\n\tdao DAO,\n\tstatements, setupStatements, teardownStatements, solutions []Statement,\n\tselectedQuestions []string,\n) ([]TestResult, error) {\n\tvar testResult = []TestResult{}\n\n\ti := 0\n\tresults, errs, err := dao.ExecuteStatements(setupStatements, teardownStatements, statements)\n\tsolutionResults, _, err := dao.ExecuteStatements(setupStatements, teardownStatements, solutions)\n\ttestcases := ConvertTablesToTestCases(solutionResults)\n\n\tif err != nil {\n\t\treturn testResult, err\n\t}\n\n\tfor _, expected := range testcases {\n\t\tif !stringInSlice(expected.Index, selectedQuestions) && len(selectedQuestions) > 0 {\n\t\t\tcontinue\n\t\t}\n\t\tif i >= len(results) {\n\t\t\ttestResult = append(\n\t\t\t\ttestResult,\n\t\t\t\tTestResult{\n\t\t\t\t\tExpected: expected,\n\t\t\t\t\tActual: Result{},\n\t\t\t\t\tPass: false,\n\t\t\t\t},\n\t\t\t)\n\t\t\ti++\n\t\t\tcontinue\n\t\t}\n\t\ttable := results[i]\n\t\terr := errs[i]\n\t\t// Query has syntax error\n\t\tif err != nil {\n\t\t\ttestResult = append(\n\t\t\t\ttestResult,\n\t\t\t\tTestResult{\n\t\t\t\t\tExpected: expected,\n\t\t\t\t\tActual: table,\n\t\t\t\t\tPass: false,\n\t\t\t\t\tError: err,\n\t\t\t\t},\n\t\t\t)\n\t\t\ti++\n\t\t\tcontinue\n\t\t}\n\t\tif !reflect.DeepEqual(table.Content, expected.Content) {\n\t\t\ttestResult = append(\n\t\t\t\ttestResult,\n\t\t\t\tTestResult{\n\t\t\t\t\tExpected: expected,\n\t\t\t\t\tActual: table,\n\t\t\t\t\tPass: false,\n\t\t\t\t},\n\t\t\t)\n\t\t} else {\n\t\t\ttestResult = append(\n\t\t\t\ttestResult,\n\t\t\t\tTestResult{\n\t\t\t\t\tExpected: expected,\n\t\t\t\t\tActual: table,\n\t\t\t\t\tPass: true,\n\t\t\t\t},\n\t\t\t)\n\t\t}\n\t\ti++\n\t}\n\n\treturn testResult, nil\n}", "func (c *Configuration) TestsForSuites(names ...string) <-chan JobWithError {\n\toutput := make(chan JobWithError)\n\tgo func() {\n\t\tc.mutex.RLock()\n\t\tdefer c.mutex.RUnlock()\n\n\t\tseen := make(map[string]struct{})\n\t\tfor _, suite := range names {\n\t\t\ttests, ok := c.suites[suite]\n\t\t\tif !ok {\n\t\t\t\toutput <- JobWithError{\n\t\t\t\t\tJob: nil,\n\t\t\t\t\tErr: errors.Errorf(\"suite named '%s' does not exist\", suite),\n\t\t\t\t}\n\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tfor _, test := range tests {\n\t\t\t\tj, ok := c.tests[test]\n\n\t\t\t\tvar err error\n\t\t\t\tif !ok {\n\t\t\t\t\terr = errors.Errorf(\"test name %s is specified in suite %s\"+\n\t\t\t\t\t\t\"but does not exist\", test, suite)\n\t\t\t\t}\n\n\t\t\t\tif _, ok := seen[test]; ok {\n\t\t\t\t\t// this means a test is specified in more than one suite,\n\t\t\t\t\t// and we only want to dispatch it once.\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tseen[test] = struct{}{}\n\n\t\t\t\tif err != nil {\n\t\t\t\t\toutput <- JobWithError{Job: nil, Err: err}\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\toutput <- JobWithError{Job: j, Err: nil}\n\t\t\t}\n\t\t}\n\n\t\tclose(output)\n\t}()\n\n\treturn output\n}", "func (c *Configuration) GetAllTests(tests, suites []string) <-chan JobWithError {\n\toutput := make(chan JobWithError)\n\tgo func() {\n\t\tfor check := range c.TestsByName(tests...) {\n\t\t\toutput <- check\n\t\t}\n\n\t\tfor check := range c.TestsForSuites(suites...) {\n\t\t\toutput <- check\n\t\t}\n\t\tclose(output)\n\t}()\n\n\treturn output\n}", "func (m *MockFullNode) MpoolSelects(arg0 context.Context, arg1 types0.TipSetKey, arg2 []float64) ([][]*types.SignedMessage, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"MpoolSelects\", arg0, arg1, arg2)\n\tret0, _ := ret[0].([][]*types.SignedMessage)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (o Scorecard) RunTests(ctx context.Context) (testOutput v1alpha3.Test, err error) {\n\n\terr = o.TestRunner.Initialize(ctx)\n\tif err != nil {\n\t\treturn testOutput, err\n\t}\n\n\ttests := o.selectTests()\n\tif len(tests) == 0 {\n\t\treturn testOutput, nil\n\t}\n\n\tfor _, test := range tests {\n\t\tresult, err := o.TestRunner.RunTest(ctx, test)\n\t\tif err != nil {\n\t\t\tresult = convertErrorToStatus(test.Name, err)\n\t\t}\n\t\ttestOutput.Status.Results = append(testOutput.Status.Results, result.Results...)\n\t}\n\n\tif !o.SkipCleanup {\n\t\terr = o.TestRunner.Cleanup(ctx)\n\t\tif err != nil {\n\t\t\treturn testOutput, err\n\t\t}\n\t}\n\treturn testOutput, nil\n}", "func getNodesWithSelector(f *framework.Framework, labelselector map[string]string) []corev1.Node {\n\tvar nodes corev1.NodeList\n\tlo := &client.ListOptions{\n\t\tLabelSelector: labels.SelectorFromSet(labelselector),\n\t}\n\tf.Client.List(goctx.TODO(), &nodes, lo)\n\treturn nodes.Items\n}", "func Selector(sel string) (NodeFunc, error) {\n\tSel, err := cascadia.Parse(sel)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn NodeFunc(func(node *html.Node) bool {\n\t\treturn Sel.Match(node)\n\t}), nil\n}", "func (n *NodeManager) GetBySelector(stype, selector, target string, hours int64) ([]OsqueryNode, error) {\n\tvar nodes []OsqueryNode\n\tvar s string\n\tswitch stype {\n\tcase \"environment\":\n\t\ts = \"environment\"\n\tcase \"platform\":\n\t\ts = \"platform\"\n\t}\n\tswitch target {\n\tcase \"all\":\n\t\tif err := n.DB.Where(s+\" = ?\", selector).Find(&nodes).Error; err != nil {\n\t\t\treturn nodes, err\n\t\t}\n\tcase \"active\":\n\t\t//if err := n.DB.Where(s+\" = ?\", selector).Where(\"updated_at > ?\", time.Now().AddDate(0, 0, -3)).Find(&nodes).Error; err != nil {\n\t\tif err := n.DB.Where(s+\" = ?\", selector).Where(\"updated_at > ?\", time.Now().Add(time.Duration(hours)*time.Hour)).Find(&nodes).Error; err != nil {\n\t\t\treturn nodes, err\n\t\t}\n\tcase \"inactive\":\n\t\t//if err := n.DB.Where(s+\" = ?\", selector).Where(\"updated_at < ?\", time.Now().AddDate(0, 0, -3)).Find(&nodes).Error; err != nil {\n\t\tif err := n.DB.Where(s+\" = ?\", selector).Where(\"updated_at < ?\", time.Now().Add(time.Duration(hours)*time.Hour)).Find(&nodes).Error; err != nil {\n\t\t\treturn nodes, err\n\t\t}\n\t}\n\treturn nodes, nil\n}", "func doTests(t *testing.T, tests []string) {\n\tdoTestsParam(t, tests, TestParams{\n\t\textensions: parser.CommonExtensions,\n\t})\n}", "func (wla *attestor) Attest(ctx context.Context, pid int32) []*common.Selector {\n\ttLabels := []telemetry.Label{{workloadPid, string(pid)}}\n\tdefer wla.c.M.MeasureSinceWithLabels([]string{workloadApi, workloadAttDur}, time.Now(), tLabels)\n\n\tplugins := wla.c.Catalog.WorkloadAttestors()\n\tsChan := make(chan []*common.Selector)\n\terrChan := make(chan error)\n\n\tfor _, p := range plugins {\n\t\tgo func(p *catalog.ManagedWorkloadAttestor) {\n\t\t\tif selectors, err := wla.invokeAttestor(ctx, p, pid); err == nil {\n\t\t\t\tsChan <- selectors\n\t\t\t} else {\n\t\t\t\terrChan <- err\n\t\t\t}\n\t\t}(p)\n\t}\n\n\t// Collect the results\n\tselectors := []*common.Selector{}\n\tfor i := 0; i < len(plugins); i++ {\n\t\tselect {\n\t\tcase s := <-sChan:\n\t\t\tselectors = append(selectors, s...)\n\t\tcase err := <-errChan:\n\t\t\twla.c.L.Errorf(\"Failed to collect all selectors for PID %v: %v\", pid, err)\n\t\t}\n\t}\n\n\twla.c.M.AddSampleWithLabels([]string{workloadApi, \"discovered_selectors\"}, float32(len(selectors)), tLabels)\n\twla.c.L.Debugf(\"PID %v attested to have selectors %v\", pid, selectors)\n\treturn selectors\n}", "func Select_(children ...HTML) HTML {\n return Select(nil, children...)\n}", "func listTests(ctx context.Context, cfg *config.Config,\n\tdrv *driver.Driver,\n\tdutInfos map[string]*protocol.DUTInfo) ([]*resultsjson.Result, error) {\n\tCompanionFeatures := make(map[string]*frameworkprotocol.DUTFeatures)\n\tfor role, dutInfo := range dutInfos {\n\t\tif role != \"\" {\n\t\t\tCompanionFeatures[role] = dutInfo.GetFeatures()\n\t\t}\n\t}\n\n\tvar dutFeature *frameworkprotocol.DUTFeatures\n\tif _, ok := dutInfos[\"\"]; ok {\n\t\tdutFeature = dutInfos[\"\"].GetFeatures()\n\t}\n\n\ttests, err := drv.ListMatchedTests(ctx, cfg.Features(dutFeature, CompanionFeatures))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar shard *sharding.Shard\n\tif cfg.ShardMethod() == \"hash\" {\n\t\tshard = sharding.ComputeHash(tests, cfg.ShardIndex(), cfg.TotalShards())\n\t} else {\n\t\tshard = sharding.ComputeAlpha(tests, cfg.ShardIndex(), cfg.TotalShards())\n\t}\n\n\tvar testsToPrint []*driver.BundleEntity\n\tif cfg.ExcludeSkipped() {\n\t\ttestsToPrint, _ = removeSkippedTestsFromBundle(shard.Included)\n\t} else {\n\t\ttestsToPrint = shard.Included\n\t}\n\n\t// Convert driver.BundleEntity to resultsjson.Result.\n\tresults := make([]*resultsjson.Result, len(testsToPrint))\n\tfor i, re := range testsToPrint {\n\t\ttest, err := resultsjson.NewTest(re.Resolved.GetEntity())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tresults[i] = &resultsjson.Result{\n\t\t\tTest: *test,\n\t\t\tSkipReason: strings.Join(re.Resolved.GetSkip().GetReasons(), \", \"),\n\t\t}\n\t}\n\treturn results, nil\n}", "func tests() []testcase {\n\titems := [...]testcase{\n\t\t{0, 0},\n\t\t{1, 1},\n\t\t{2, 1},\n\t\t{3, 2},\n\t\t{4, 3},\n\t\t{5, 5},\n\t\t{6, 8},\n\t\t{7, 13},\n\t\t{8, 21},\n\t\t{9, 34},\n\t\t{10, 55},\n\t\t{11, 89},\n\t\t{12, 144},\n\t\t{13, 233},\n\t\t{14, 377},\n\t\t{15, 610},\n\t\t{16, 987},\n\t\t{17, 1597},\n\t\t{18, 2584},\n\t\t{19, 4181},\n\t\t{20, 6765},\n\t}\n\treturn items[:]\n}", "func TestSubQuerySelect(t *testing.T) {\n\tassert := assert.New(t)\n\tsubQuery := NewQuery(\"Products\").Where(\"Quantity > ?\", 2).Select(\"UserID\")\n\tquery, params := Build(NewQuery(\"Users\").Where(\"ID IN ?\", subQuery).Select())\n\tassertEqual(assert, \"SELECT * FROM `Users` WHERE ID IN (SELECT `UserID` FROM `Products` WHERE Quantity > ?)\", query)\n\tassertParams(assert, []interface{}{2}, params)\n}", "func (runner TestSuiteRunner) RunTests(testNamesToRun map[string]bool, testParallelism uint) (allTestsPassed bool, executionErr error) {\n\tallTests := runner.testSuite.GetTests()\n\n\t// If the user doesn't specify any test names to run, run all of them\n\tif len(testNamesToRun) == 0 {\n\t\ttestNamesToRun = map[string]bool{}\n\t\tfor testName, _ := range allTests {\n\t\t\ttestNamesToRun[testName] = true\n\t\t}\n\t}\n\n\t// Validate all the requested tests exist\n\ttestsToRun := make(map[string]testsuite.Test)\n\tfor testName, _ := range testNamesToRun {\n\t\ttest, found := allTests[testName]\n\t\tif !found {\n\t\t\treturn false, stacktrace.NewError(\"No test registered with name '%v'\", testName)\n\t\t}\n\t\ttestsToRun[testName] = test\n\t}\n\n\texecutionInstanceId := uuid.Generate()\n\ttestParams, err := buildTestParams(executionInstanceId, testsToRun, runner.networkWidthBits)\n\tif err != nil {\n\t\treturn false, stacktrace.Propagate(err, \"An error occurred building the test params map\")\n\t}\n\n\t// Initialize a Docker client\n\tdockerClient, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())\n\tif err != nil {\n\t\treturn false, stacktrace.Propagate(err,\"Failed to initialize Docker client from environment.\")\n\t}\n\n\ttestExecutor := parallelism.NewTestExecutorParallelizer(\n\t\texecutionInstanceId,\n\t\tdockerClient,\n\t\trunner.testControllerImageName,\n\t\trunner.testControllerLogLevel,\n\t\trunner.customTestControllerEnvVars,\n\t\ttestParallelism)\n\n\tlogrus.Infof(\"Running %v tests with execution ID %v...\", len(testsToRun), executionInstanceId.String())\n\tallTestsPassed = testExecutor.RunInParallelAndPrintResults(testParams)\n\treturn allTestsPassed, nil\n}", "func Select(root *html.Node, sel string) []*html.Node {\n\tSel, err := cascadia.Parse(sel)\n\tif err != nil {\n\t\treturn nil\n\t}\n\treturn cascadia.QueryAll(root, Sel)\n}", "func (o QperfSpecOutput) Tests() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v QperfSpec) []string { return v.Tests }).(pulumi.StringArrayOutput)\n}", "func SelectorMetadataBySelectValue(optionTemplates []OptionTemplate, selector string) ([]PropertyMetadata, error) {\n\treturn selectorMetadataByFunc(\n\t\toptionTemplates,\n\t\tselector,\n\t\tfunc(optionTemplate OptionTemplate) string {\n\t\t\treturn optionTemplate.SelectValue\n\t\t})\n}", "func (self *TextMatcher) IsSelected() *TextMatcher {\n\tself.appendRule(matcherRule[string]{\n\t\tname: IS_SELECTED_RULE_NAME,\n\t\ttestFn: func(value string) (bool, string) {\n\t\t\tpanic(\"Special IsSelected matcher is not supposed to have its testFn method called. This rule should only be used within the .Lines() and .TopLines() method on a ViewAsserter.\")\n\t\t},\n\t})\n\n\treturn self\n}", "func ParseSelector(s string) Selector {\n\tvar criteria []selectCriterion\n\t// read the white-space delimited criteria\n\tcritStrings := strings.Fields(s)\n\tfor _, c := range critStrings {\n\t\tcriteria = append(criteria, stringToCriterion(c))\n\t}\n\treturn criteria\n}", "func TestSimpleSelect(t *testing.T) {\n\tqc := start_ds()\n\tqccs := start_cs()\n\n\tr, _, err := Run(qc, \"select 1 + 1\", Namespace_FS)\n\tif err != nil || len(r) == 0 {\n\t\tt.Errorf(\"did not expect err %s\", err.Error())\n\t}\n\n\trcs, _, errcs := Run(qccs, \"select * from system:keyspaces\", Namespace_CBS)\n\tif errcs != nil || len(rcs) == 0 {\n\t\tt.Errorf(\"did not expect err %s\", errcs.Error())\n\t}\n\n\tr, _, err = Run(qc, \"select * from customer\", Namespace_FS)\n\tif err != nil || len(r) == 0 {\n\t\tt.Errorf(\"did not expect err %s\", err.Error())\n\t}\n\n\tfileInfos, _ := ioutil.ReadDir(\"../../data/sampledb/dimestore/customer\")\n\tif len(r) != len(fileInfos) {\n\t\tfmt.Printf(\"num results : %#v, fileInfos: %#v\\n\", len(r), len(fileInfos))\n\t\tt.Errorf(\"expected # of results to match directory listing\")\n\t}\n\n}", "func (s SelectorGroup) Match(n *html.Node) bool {\n\tfor _, sel := range s {\n\t\tif sel.Match(n) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (tse *taskSelectorEvaluator) evalCriterion(sc selectCriterion) ([]string, error) {\n\tswitch {\n\tcase sc.Validate() != nil:\n\t\treturn nil, fmt.Errorf(\"criterion '%v' is invalid: %v\", sc, sc.Validate())\n\n\tcase sc.name == SelectAll: // special \"All Tasks\" case\n\t\tnames := []string{}\n\t\tfor _, task := range tse.tasks {\n\t\t\tnames = append(names, task.Name)\n\t\t}\n\t\treturn names, nil\n\n\tcase !sc.tagged && !sc.negated: // just a regular name\n\t\ttask := tse.byName[sc.name]\n\t\tif task == nil {\n\t\t\treturn nil, fmt.Errorf(\"no task named '%v'\", sc.name)\n\t\t}\n\t\treturn []string{task.Name}, nil\n\n\tcase sc.tagged && !sc.negated: // expand a tag\n\t\ttasks := tse.byTag[sc.name]\n\t\tif len(tasks) == 0 {\n\t\t\treturn nil, fmt.Errorf(\"no tasks have the tag '%v'\", sc.name)\n\t\t}\n\t\tnames := []string{}\n\t\tfor _, task := range tasks {\n\t\t\tnames = append(names, task.Name)\n\t\t}\n\t\treturn names, nil\n\n\tcase !sc.tagged && sc.negated: // everything *but* a specific task\n\t\tif tse.byName[sc.name] == nil {\n\t\t\t// we want to treat this as an error for better usability\n\t\t\treturn nil, fmt.Errorf(\"no task named '%v'\", sc.name)\n\t\t}\n\t\tnames := []string{}\n\t\tfor _, task := range tse.tasks {\n\t\t\tif task.Name != sc.name {\n\t\t\t\tnames = append(names, task.Name)\n\t\t\t}\n\t\t}\n\t\treturn names, nil\n\n\tcase sc.tagged && sc.negated: // everything *but* a tag\n\t\ttasks := tse.byTag[sc.name]\n\t\tif len(tasks) == 0 {\n\t\t\t// we want to treat this as an error for better usability\n\t\t\treturn nil, fmt.Errorf(\"no tasks have the tag '%v'\", sc.name)\n\t\t}\n\t\t// compare tasks by address to avoid the ones with a negated tag\n\t\tillegalTasks := map[*ProjectTask]bool{}\n\t\tfor _, taskPtr := range tasks {\n\t\t\tillegalTasks[taskPtr] = true\n\t\t}\n\t\tnames := []string{}\n\t\tfor _, taskPtr := range tse.byName {\n\t\t\tif !illegalTasks[taskPtr] {\n\t\t\t\tnames = append(names, taskPtr.Name)\n\t\t\t}\n\t\t}\n\t\treturn names, nil\n\n\tdefault:\n\t\t// protection for if we edit this switch block later\n\t\tpanic(\"this should not be reachable\")\n\t}\n}", "func (q *mockStoreQuerier) Select(_ bool, sp *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet {\n\tuserID, err := tenant.TenantID(q.ctx)\n\tif err != nil {\n\t\treturn storage.ErrSeriesSet(err)\n\t}\n\n\tminT, maxT := q.mint, q.maxt\n\tif sp != nil {\n\t\tminT, maxT = sp.Start, sp.End\n\t}\n\n\t// We will hit this for /series lookup when -querier.query-store-for-labels-enabled is set.\n\t// If we don't skip here, it'll make /series lookups extremely slow as all the chunks will be loaded.\n\t// That flag is only to be set with blocks storage engine, and this is a protective measure.\n\tif sp != nil && sp.Func == \"series\" {\n\t\treturn storage.EmptySeriesSet()\n\t}\n\n\tchunks, err := q.store.Get(q.ctx, userID, model.Time(minT), model.Time(maxT), matchers...)\n\tif err != nil {\n\t\treturn storage.ErrSeriesSet(err)\n\t}\n\n\treturn partitionChunks(chunks, q.mint, q.maxt, q.chunkIteratorFunc)\n}", "func RunTests(opts Options) {\n\tif opts.Cleanup {\n\t\terr := CleanupTests(opts.Driver, opts.DSN, opts.Verbose)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Cleanup failed: %s\\n\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tos.Exit(0)\n\t}\n\t_ = flag.Set(\"test.run\", opts.Match)\n\tif opts.Verbose {\n\t\t_ = flag.Set(\"test.v\", \"true\")\n\t}\n\ttests := []testing.InternalTest{\n\t\t{\n\t\t\tName: \"MainTest\",\n\t\t\tF: func(t *testing.T) {\n\t\t\t\tTest(t, opts.Driver, opts.DSN, opts.Suites, opts.RW)\n\t\t\t},\n\t\t},\n\t}\n\n\tmainStart(tests)\n}", "func NodeSelectorRequirementsAsSelector(nsm []corev1.NodeSelectorRequirement) (labels.Selector, error) {\n\tif len(nsm) == 0 {\n\t\treturn labels.Nothing(), nil\n\t}\n\tselector := labels.NewSelector()\n\tfor _, expr := range nsm {\n\t\tvar op selection.Operator\n\t\tswitch expr.Operator {\n\t\tcase corev1.NodeSelectorOpIn:\n\t\t\top = selection.In\n\t\tcase corev1.NodeSelectorOpNotIn:\n\t\t\top = selection.NotIn\n\t\tcase corev1.NodeSelectorOpExists:\n\t\t\top = selection.Exists\n\t\tcase corev1.NodeSelectorOpDoesNotExist:\n\t\t\top = selection.DoesNotExist\n\t\tcase corev1.NodeSelectorOpGt:\n\t\t\top = selection.GreaterThan\n\t\tcase corev1.NodeSelectorOpLt:\n\t\t\top = selection.LessThan\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"%q is not a valid node selector operator\", expr.Operator)\n\t\t}\n\t\tr, err := labels.NewRequirement(expr.Key, op, expr.Values)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tselector = selector.Add(*r)\n\t}\n\treturn selector, nil\n}", "func extendSelector(selector labels.Selector, requirements ...func() (*labels.Requirement, error)) (labels.Selector, error) {\n\tif selector == nil {\n\t\tselector = labels.Everything()\n\t}\n\n\tfor _, fn := range requirements {\n\t\tr, err := fn()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tselector = selector.Add(*r)\n\t}\n\n\treturn selector, nil\n}", "func ExtractTests(args []string) ([]int, error) {\n\tvalues := make([]int, 0)\n\n\tfor _, arg := range args {\n\t\tvalue, err := strconv.Atoi(arg)\n\t\tif err != nil {\n\t\t\t//nolint:wrapcheck\n\t\t\treturn nil, err\n\t\t}\n\n\t\tvalues = append(values, value)\n\t}\n\n\treturn values, nil\n}", "func RunSubtests(ctx *Context) {\n\tfor name, fn := range tests {\n\t\tctx.Run(name, fn)\n\t}\n}", "func (q *Query) selectClause() (columns []string, parser func(*spanner.Row) (*pb.TestResult, error)) {\n\tcolumns = []string{\n\t\t\"InvocationId\",\n\t\t\"TestId\",\n\t\t\"ResultId\",\n\t\t\"IsUnexpected\",\n\t\t\"Status\",\n\t\t\"StartTime\",\n\t\t\"RunDurationUsec\",\n\t}\n\n\t// Select extra columns depending on the mask.\n\tvar extraColumns []string\n\treadMask := q.Mask\n\tif readMask.IsEmpty() {\n\t\treadMask = defaultListMask\n\t}\n\tselectIfIncluded := func(column, field string) {\n\t\tswitch inc, err := readMask.Includes(field); {\n\t\tcase err != nil:\n\t\t\tpanic(err)\n\t\tcase inc != mask.Exclude:\n\t\t\textraColumns = append(extraColumns, column)\n\t\t\tcolumns = append(columns, column)\n\t\t}\n\t}\n\tselectIfIncluded(\"SummaryHtml\", \"summary_html\")\n\tselectIfIncluded(\"Tags\", \"tags\")\n\tselectIfIncluded(\"TestMetadata\", \"test_metadata\")\n\tselectIfIncluded(\"Variant\", \"variant\")\n\tselectIfIncluded(\"VariantHash\", \"variant_hash\")\n\tselectIfIncluded(\"FailureReason\", \"failure_reason\")\n\tselectIfIncluded(\"Properties\", \"properties\")\n\n\t// Build a parser function.\n\tvar b spanutil.Buffer\n\tvar summaryHTML spanutil.Compressed\n\tvar tmd spanutil.Compressed\n\tvar fr spanutil.Compressed\n\tvar properties spanutil.Compressed\n\tparser = func(row *spanner.Row) (*pb.TestResult, error) {\n\t\tvar invID invocations.ID\n\t\tvar maybeUnexpected spanner.NullBool\n\t\tvar micros spanner.NullInt64\n\t\ttr := &pb.TestResult{}\n\n\t\tptrs := []any{\n\t\t\t&invID,\n\t\t\t&tr.TestId,\n\t\t\t&tr.ResultId,\n\t\t\t&maybeUnexpected,\n\t\t\t&tr.Status,\n\t\t\t&tr.StartTime,\n\t\t\t&micros,\n\t\t}\n\n\t\tfor _, v := range extraColumns {\n\t\t\tswitch v {\n\t\t\tcase \"SummaryHtml\":\n\t\t\t\tptrs = append(ptrs, &summaryHTML)\n\t\t\tcase \"Tags\":\n\t\t\t\tptrs = append(ptrs, &tr.Tags)\n\t\t\tcase \"TestMetadata\":\n\t\t\t\tptrs = append(ptrs, &tmd)\n\t\t\tcase \"Variant\":\n\t\t\t\tptrs = append(ptrs, &tr.Variant)\n\t\t\tcase \"VariantHash\":\n\t\t\t\tptrs = append(ptrs, &tr.VariantHash)\n\t\t\tcase \"FailureReason\":\n\t\t\t\tptrs = append(ptrs, &fr)\n\t\t\tcase \"Properties\":\n\t\t\t\tptrs = append(ptrs, &properties)\n\t\t\tdefault:\n\t\t\t\tpanic(\"impossible\")\n\t\t\t}\n\t\t}\n\n\t\terr := b.FromSpanner(row, ptrs...)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Generate test result name now in case tr.TestId and tr.ResultId become\n\t\t// empty after q.Mask.Trim(tr).\n\t\ttrName := pbutil.TestResultName(string(invID), tr.TestId, tr.ResultId)\n\t\ttr.SummaryHtml = string(summaryHTML)\n\t\tPopulateExpectedField(tr, maybeUnexpected)\n\t\tPopulateDurationField(tr, micros)\n\t\tif err := populateTestMetadata(tr, tmd); err != nil {\n\t\t\treturn nil, errors.Annotate(err, \"error unmarshalling test_metadata for %s\", trName).Err()\n\t\t}\n\t\tif err := populateFailureReason(tr, fr); err != nil {\n\t\t\treturn nil, errors.Annotate(err, \"error unmarshalling failure_reason for %s\", trName).Err()\n\t\t}\n\t\tif err := populateProperties(tr, properties); err != nil {\n\t\t\treturn nil, errors.Annotate(err, \"failed to unmarshal properties\").Err()\n\t\t}\n\t\tif err := q.Mask.Trim(tr); err != nil {\n\t\t\treturn nil, errors.Annotate(err, \"error trimming fields for %s\", trName).Err()\n\t\t}\n\t\t// Always include name in tr because name is needed to calculate\n\t\t// page token.\n\t\ttr.Name = trName\n\t\treturn tr, nil\n\t}\n\treturn\n}", "func (q queryCtrl) QueryAllSelector(root *Markup, sel *Selector) []*Markup {\n\tvar found []*Markup\n\n\tfor _, child := range root.children {\n\t\tif !q.queryOne(child, sel) {\n\n\t\t\tfor _, kid := range child.children {\n\t\t\t\tif q.queryOne(kid, sel) {\n\t\t\t\t\tfound = append(found, kid)\n\t\t\t\t}\n\n\t\t\t\tfound = append(found, q.QueryAllSelector(kid, sel)...)\n\t\t\t}\n\n\t\t\tcontinue\n\t\t}\n\n\t\tif sel.Children == nil {\n\t\t\tfound = append(found, child)\n\n\t\t\tfor _, kid := range child.children {\n\t\t\t\tif q.queryOne(kid, sel) {\n\t\t\t\t\tfound = append(found, kid)\n\t\t\t\t}\n\n\t\t\t\tfound = append(found, q.QueryAllSelector(kid, sel)...)\n\t\t\t}\n\n\t\t\tcontinue\n\t\t}\n\n\t\tkid := child\n\t\tfor _, kidSel := range sel.Children {\n\t\t\tkid = q.QuerySelector(kid, kidSel)\n\t\t\tif kid == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\tfor _, mkid := range child.children {\n\t\t\tif q.queryOne(mkid, sel) {\n\t\t\t\tfound = append(found, mkid)\n\t\t\t}\n\n\t\t\tfound = append(found, q.QueryAllSelector(mkid, sel)...)\n\t\t}\n\n\t\tfound = append(found, kid)\n\t}\n\n\treturn found\n}", "func (ie *CommonIE) Tests() []SDict {\n\tif len(ie.TEST) > 0 {\n\t\treturn []SDict{ie.TEST}\n\t}\n\treturn ie.TESTS\n}", "func (s *ServiceManager) buildSelectCases() []reflect.SelectCase {\n\tcases := make([]reflect.SelectCase, len(s.signalers)+1)\n\tfor i, value := range s.signalers {\n\t\tcases[i] = reflect.SelectCase{\n\t\t\tDir: reflect.SelectRecv,\n\t\t\tChan: reflect.ValueOf(value.Select()),\n\t\t}\n\t}\n\t// add the waitForIterationDone\n\tcases[len(cases)-1].Chan = reflect.ValueOf(s.waitForIteratorDone)\n\tcases[len(cases)-1].Dir = reflect.SelectRecv\n\treturn cases\n}", "func NewSelector() Selector {\n\treturn internalSelector(nil)\n}", "func (q queryCtrl) QuerySelector(root *Markup, sel *Selector) *Markup {\n\tvar filtered *Markup\n\nchildloop:\n\tfor _, child := range root.children {\n\t\tif q.queryOne(child, sel) {\n\t\t\tfiltered = child\n\t\t\tbreak childloop\n\t\t}\n\n\t\tfor _, kid := range child.children {\n\t\t\tif q.queryOne(kid, sel) {\n\t\t\t\tfiltered = kid\n\t\t\t\tbreak childloop\n\t\t\t}\n\n\t\t\tif item := q.QuerySelector(kid, sel); item != nil {\n\t\t\t\tfiltered = item\n\t\t\t\tbreak childloop\n\t\t\t}\n\t\t}\n\t}\n\n\tif sel.Children == nil {\n\t\treturn filtered\n\t}\n\n\tfor _, child := range sel.Children {\n\t\tfiltered = q.QuerySelector(filtered, child)\n\t\tif filtered == nil {\n\t\t\treturn nil\n\t\t}\n\t}\n\n\treturn filtered\n}", "func NodeSelectorRequirementsAsSelector(nsm []v1.NodeSelectorRequirement) (labels.Selector, error) {\n\tif len(nsm) == 0 {\n\t\treturn labels.Nothing(), nil\n\t}\n\tselector := labels.NewSelector()\n\tfor _, expr := range nsm {\n\t\tvar op selection.Operator\n\t\tswitch expr.Operator {\n\t\tcase v1.NodeSelectorOpIn:\n\t\t\top = selection.In\n\t\tcase v1.NodeSelectorOpNotIn:\n\t\t\top = selection.NotIn\n\t\tcase v1.NodeSelectorOpExists:\n\t\t\top = selection.Exists\n\t\tcase v1.NodeSelectorOpDoesNotExist:\n\t\t\top = selection.DoesNotExist\n\t\tcase v1.NodeSelectorOpGt:\n\t\t\top = selection.GreaterThan\n\t\tcase v1.NodeSelectorOpLt:\n\t\t\top = selection.LessThan\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"%q is not a valid node selector operator\", expr.Operator)\n\t\t}\n\t\tr, err := labels.NewRequirement(expr.Key, op, expr.Values)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tselector = selector.Add(*r)\n\t}\n\treturn selector, nil\n}", "func (q queryCtrl) QueryAll(root *Markup, sel string) []*Markup {\n\tsels := q.ParseSelector(sel)\n\tif sels == nil {\n\t\treturn nil\n\t}\n\n\treturn q.QueryAllSelector(root, sels[0])\n}", "func (wq *WorkQueue) selectWorkunits(workunits WorkList, policy string, available int64, count int) (selected []*Workunit, err error) {\n\tlogger.Debug(3, \"starting selectWorkunits\")\n\n\tif policy == \"FCFS\" {\n\t\tsort.Sort(byFCFS{workunits})\n\t}\n\tadded := 0\n\tfor _, work := range workunits {\n\t\tif added == count {\n\t\t\tbreak\n\t\t}\n\n\t\tinputSize := int64(0)\n\t\tfor _, input := range work.Inputs {\n\t\t\tinputSize = inputSize + input.Size\n\t\t}\n\t\t// skip work that is too large for client\n\t\tif (available < 0) || (available > inputSize) {\n\t\t\tselected = append(selected, work)\n\t\t\tadded = added + 1\n\t\t}\n\n\t}\n\n\tif len(selected) == 0 {\n\t\terr = errors.New(e.NoEligibleWorkunitFound)\n\t\treturn\n\t}\n\n\tlogger.Debug(3, \"done with selectWorkunits\")\n\treturn\n}", "func ListTests() {\n\tfmt.Printf(\"Available test suites:\\n\\tauto\\n\")\n\tfor _, suite := range AllSuites {\n\t\tfmt.Printf(\"\\t%s\\n\", suite)\n\t}\n}", "func selectAny(parsers []parser) parser {\n\treturn func(in parserInput) parserOutput {\n\t\tfor _, p := range parsers {\n\t\t\tif out := p(in); out.result != nil {\n\t\t\t\treturn out\n\t\t\t}\n\t\t}\n\t\treturn fail() // all parsers failed\n\t}\n}", "func Query(sel string) (Selector, error) {\n\tp := &parser{s: sel}\n\tcompiled, err := p.parseSelectorGroup()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif p.i < len(sel) {\n\t\treturn nil, fmt.Errorf(\"parsing %q: %d bytes left over\", sel, len(sel)-p.i)\n\t}\n\n\treturn compiled, nil\n}", "func Test(t *testing.T, driver, dsn string, testSuites []string, rw bool) {\n\tclients, err := ConnectClients(t, driver, dsn, nil)\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to connect to %s (%s driver): %s\\n\", dsn, driver, err)\n\t}\n\tclients.RW = rw\n\ttests := make(map[string]struct{})\n\tfor _, test := range testSuites {\n\t\ttests[test] = struct{}{}\n\t}\n\tif _, ok := tests[SuiteAuto]; ok {\n\t\tt.Log(\"Detecting target service compatibility...\")\n\t\tsuites, err := detectCompatibility(clients.Admin)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Unable to determine server suite compatibility: %s\\n\", err)\n\t\t}\n\t\ttests = make(map[string]struct{})\n\t\tfor _, suite := range suites {\n\t\t\ttests[suite] = struct{}{}\n\t\t}\n\t}\n\ttestSuites = make([]string, 0, len(tests))\n\tfor test := range tests {\n\t\ttestSuites = append(testSuites, test)\n\t}\n\tt.Logf(\"Running the following test suites: %s\\n\", strings.Join(testSuites, \", \"))\n\tfor _, suite := range testSuites {\n\t\tRunTestsInternal(clients, suite)\n\t}\n}", "func (s *TestService) FindTests() ([]models.Test, error) {\n\tvar tests = []models.Test{}\n\tsession := s.DBSession.Copy()\n\tdefer session.Close()\n\tc := session.DB(conf.DBName).C(\"test\")\n\terr := c.Find(nil).All(&tests)\n\treturn tests, err\n}", "func AllSelector() datamodel.Node {\n\tssb := builder.NewSelectorSpecBuilder(basicnode.Prototype.Any)\n\treturn ssb.ExploreRecursive(selector.RecursionLimitNone(),\n\t\tssb.ExploreAll(ssb.ExploreRecursiveEdge())).Node()\n}", "func (q queryCtrl) ParseSelector(sel string) []*Selector {\n\tvar sels []*Selector\n\titems := []byte(sel)\n\titemsLen := len(items)\n\n\tvar index int\n\tvar doChildren bool\n\tvar seenSpace bool\n\tvar seenComa bool\n\n\tvar child *Selector\n\tsels = append(sels, &Selector{})\n\n\t{\n\n\tparseLoop:\n\t\tfor {\n\t\t\tif index >= itemsLen {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\titem := items[index]\n\t\t\tif seenSpace && item == space {\n\t\t\t\tindex++\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif seenSpace && item != space {\n\t\t\t\tseenSpace = false\n\t\t\t}\n\n\t\t\tcsel := sels[len(sels)-1]\n\n\t\t\tswitch item {\n\t\t\tcase space:\n\t\t\t\tif seenComa {\n\t\t\t\t\tdoChildren = false\n\t\t\t\t}\n\n\t\t\t\tif !seenComa {\n\t\t\t\t\tdoChildren = true\n\t\t\t\t}\n\n\t\t\t\tif !seenComa {\n\t\t\t\t\tdoChildren = true\n\t\t\t\t}\n\n\t\t\t\tif !seenSpace && !seenComa {\n\t\t\t\t\tcsel.Children = append(csel.Children, &Selector{})\n\t\t\t\t}\n\n\t\t\t\tindex++\n\t\t\t\tseenSpace = true\n\t\t\t\tcontinue parseLoop\n\n\t\t\tcase coma:\n\t\t\t\tseenComa = true\n\n\t\t\t\tsels = append(sels, &Selector{})\n\n\t\t\t\tindex++\n\t\t\t\tcontinue parseLoop\n\n\t\t\tcase dot:\n\t\t\t\tif doChildren {\n\t\t\t\t\tchild = csel.Children[len(csel.Children)-1]\n\t\t\t\t}\n\n\t\t\t\t{\n\t\t\t\t\tvar blk []byte\n\n\t\t\t\t\tfor {\n\t\t\t\t\t\tindex++\n\n\t\t\t\t\t\tif index >= itemsLen {\n\t\t\t\t\t\t\tif len(blk) != 0 {\n\t\t\t\t\t\t\t\tif doChildren {\n\t\t\t\t\t\t\t\t\tchild.Classes = append(child.Classes, string(blk))\n\t\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\t\tcsel.Classes = append(csel.Classes, string(blk))\n\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\tblk = nil\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\titem = items[index]\n\n\t\t\t\t\t\tif item == dot {\n\t\t\t\t\t\t\tif doChildren {\n\t\t\t\t\t\t\t\tchild.Classes = append(child.Classes, string(blk))\n\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\tcsel.Classes = append(csel.Classes, string(blk))\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tblk = nil\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tif item == space || item == coma || item == hash {\n\t\t\t\t\t\t\tif doChildren {\n\t\t\t\t\t\t\t\tchild.Classes = append(child.Classes, string(blk))\n\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\tcsel.Classes = append(csel.Classes, string(blk))\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tblk = nil\n\t\t\t\t\t\t\tcontinue parseLoop\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tblk = append(blk, item)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tcontinue parseLoop\n\n\t\t\tcase hash:\n\t\t\t\tif doChildren {\n\t\t\t\t\tchild = csel.Children[len(csel.Children)-1]\n\t\t\t\t}\n\n\t\t\t\t{\n\t\t\t\t\tvar blk []byte\n\n\t\t\t\t\tfor {\n\t\t\t\t\t\tindex++\n\n\t\t\t\t\t\tif index >= itemsLen {\n\t\t\t\t\t\t\tif doChildren {\n\t\t\t\t\t\t\t\tchild.Id = string(blk)\n\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\tcsel.Id = string(blk)\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tblk = nil\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\titem = items[index]\n\t\t\t\t\t\tif item == dot || item == space || item == coma {\n\n\t\t\t\t\t\t\tif doChildren {\n\t\t\t\t\t\t\t\tchild.Id = string(blk)\n\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\tcsel.Id = string(blk)\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tblk = nil\n\t\t\t\t\t\t\tcontinue parseLoop\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tblk = append(blk, item)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\tcase bracket:\n\t\t\t\tif doChildren {\n\t\t\t\t\tchild = csel.Children[len(csel.Children)-1]\n\t\t\t\t}\n\n\t\t\t\t{\n\t\t\t\t\tvar blk []byte\n\t\t\t\t\tblk = append(blk, item)\n\n\t\t\t\t\tfor {\n\t\t\t\t\t\tindex++\n\n\t\t\t\t\t\tif index >= itemsLen {\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\titem := items[index]\n\t\t\t\t\t\tif item == endbracket {\n\t\t\t\t\t\t\tblk = append(blk, item)\n\n\t\t\t\t\t\t\tattr, val, op := q.splitBracketSelector(string(blk))\n\t\t\t\t\t\t\tval = strings.Replace(val, \"'\", \"\", -1)\n\t\t\t\t\t\t\tval = strings.Replace(val, \"\\\"\", \"\", -1)\n\n\t\t\t\t\t\t\tif doChildren {\n\t\t\t\t\t\t\t\tchild.AttrOp = op\n\t\t\t\t\t\t\t\tchild.AttrName = attr\n\t\t\t\t\t\t\t\tchild.AttrValue = val\n\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\tcsel.AttrOp = op\n\t\t\t\t\t\t\t\tcsel.AttrName = attr\n\t\t\t\t\t\t\t\tcsel.AttrValue = val\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tindex++\n\t\t\t\t\t\t\tcontinue parseLoop\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tblk = append(blk, item)\n\t\t\t\t\t}\n\n\t\t\t\t\tcontinue parseLoop\n\t\t\t\t}\n\n\t\t\tdefault:\n\t\t\t\tif doChildren {\n\t\t\t\t\tchild = csel.Children[len(csel.Children)-1]\n\t\t\t\t}\n\n\t\t\t\t{\n\t\t\t\t\tvar blk []byte\n\t\t\t\t\tblk = append(blk, item)\n\n\t\t\t\t\tfor {\n\t\t\t\t\t\tindex++\n\n\t\t\t\t\t\tif index >= itemsLen {\n\t\t\t\t\t\t\tif len(blk) != 0 {\n\t\t\t\t\t\t\t\tif doChildren {\n\t\t\t\t\t\t\t\t\tchild.Tag = string(blk)\n\t\t\t\t\t\t\t\t\tif psud := strings.Index(child.Tag, \":\"); psud != -1 {\n\t\t\t\t\t\t\t\t\t\tpsuedo := child.Tag[psud:]\n\t\t\t\t\t\t\t\t\t\tchild.Tag = child.Tag[:psud]\n\t\t\t\t\t\t\t\t\t\tchild.Psuedo = psuedo\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\t\tcsel.Tag = string(blk)\n\t\t\t\t\t\t\t\t\tif psud := strings.Index(csel.Tag, \":\"); psud != -1 {\n\t\t\t\t\t\t\t\t\t\tpsuedo := csel.Tag[psud:]\n\t\t\t\t\t\t\t\t\t\tcsel.Tag = csel.Tag[:psud]\n\t\t\t\t\t\t\t\t\t\tcsel.Psuedo = psuedo\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\titem := items[index]\n\t\t\t\t\t\tif item == space || item == coma || item == hash || item == dot || item == bracket || item == endbracket {\n\t\t\t\t\t\t\tif doChildren {\n\t\t\t\t\t\t\t\tchild.Tag = string(blk)\n\t\t\t\t\t\t\t\tif psud := strings.Index(child.Tag, \":\"); psud != -1 {\n\t\t\t\t\t\t\t\t\tpsuedo := child.Tag[psud:]\n\t\t\t\t\t\t\t\t\tchild.Tag = child.Tag[:psud]\n\t\t\t\t\t\t\t\t\tchild.Psuedo = psuedo\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\tcsel.Tag = string(blk)\n\t\t\t\t\t\t\t\tif psud := strings.Index(csel.Tag, \":\"); psud != -1 {\n\t\t\t\t\t\t\t\t\tpsuedo := csel.Tag[psud:]\n\t\t\t\t\t\t\t\t\tcsel.Tag = csel.Tag[:psud]\n\t\t\t\t\t\t\t\t\tcsel.Psuedo = psuedo\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tcontinue parseLoop\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tblk = append(blk, item)\n\t\t\t\t\t}\n\n\t\t\t\t\tcontinue parseLoop\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tindex++\n\t\t}\n\t}\n\n\treturn sels\n}", "func SelectorFromValidatedSet(ls Set) Selector {\n\tif ls == nil || len(ls) == 0 {\n\t\treturn internalSelector{}\n\t}\n\trequirements := make([]Requirement, 0, len(ls))\n\tfor label, value := range ls {\n\t\trequirements = append(requirements, Requirement{key: label, operator: selection.Equals, strValues: []string{value}})\n\t}\n\t// sort to have deterministic string representation\n\tsort.Sort(ByKey(requirements))\n\treturn internalSelector(requirements)\n}", "func (rdq *ResultsDefinitionQuery) Select(fields ...string) *ResultsDefinitionSelect {\n\trdq.fields = append(rdq.fields, fields...)\n\tselbuild := &ResultsDefinitionSelect{ResultsDefinitionQuery: rdq}\n\tselbuild.label = resultsdefinition.Label\n\tselbuild.flds, selbuild.scan = &rdq.fields, selbuild.Scan\n\treturn selbuild\n}", "func (ts *TestSuite) RunTests() {\n\n\tif len(ts.Tests) == 0 {\n\t\tout.Printf(\"No tests to run\\n\")\n\t\treturn\n\t}\n\n\tstartTime := time.Now()\n\n\t// setup search\n\ts := search.NewSearch()\n\tsl := search.NewSearchLimits()\n\tsl.MoveTime = ts.Time\n\tsl.Depth = ts.Depth\n\tif sl.MoveTime > 0 {\n\t\tsl.TimeControl = true\n\t}\n\n\tout.Printf(\"Running Test Suite\\n\")\n\tout.Printf(\"==================================================================\\n\")\n\tout.Printf(\"EPD File: %s\\n\", ts.FilePath)\n\tout.Printf(\"SearchTime: %d ms\\n\", ts.Time.Milliseconds())\n\tout.Printf(\"MaxDepth: %d\\n\", ts.Depth)\n\tout.Printf(\"Date: %s\\n\", time.Now().Local())\n\tout.Printf(\"No of tests: %d\\n\", len(ts.Tests))\n\tout.Println()\n\n\t// execute all tests and store results in the\n\t// test instance\n\tfor i, t := range ts.Tests {\n\t\tout.Printf(\"Test %d of %d\\nTest: %s -- Target Result %s\\n\", i+1, len(ts.Tests), t.line, t.targetMoves.StringUci())\n\t\tstartTime2 := time.Now()\n\t\trunSingleTest(s, sl, t)\n\t\telapsedTime := time.Since(startTime2)\n\t\tt.nodes = s.NodesVisited()\n\t\tt.time = s.LastSearchResult().SearchTime\n\t\tt.nps = util.Nps(s.NodesVisited(), s.LastSearchResult().SearchTime)\n\t\tout.Printf(\"Test finished in %d ms with result %s (%s) - nps: %d\\n\\n\",\n\t\t\telapsedTime.Milliseconds(), t.rType.String(), t.actual.StringUci(), t.nps)\n\t}\n\n\t// sum up result for report\n\ttr := &SuiteResult{}\n\tfor _, t := range ts.Tests {\n\t\ttr.Counter++\n\t\tswitch t.rType {\n\t\tcase NotTested:\n\t\t\ttr.NotTestedCounter++\n\t\tcase Skipped:\n\t\t\ttr.SkippedCounter++\n\t\tcase Failed:\n\t\t\ttr.FailedCounter++\n\t\tcase Success:\n\t\t\ttr.SuccessCounter++\n\t\t}\n\t\ttr.Nodes += t.nodes\n\t\ttr.Time += t.time\n\t}\n\tts.LastResult = tr\n\n\telapsed := time.Since(startTime)\n\n\t// print report\n\tout.Printf(\"Results for Test Suite\\n\", ts.FilePath)\n\tout.Printf(\"------------------------------------------------------------------------------------------------------------------------------------\\n\")\n\tout.Printf(\"EPD File: %s\\n\", ts.FilePath)\n\tout.Printf(\"SearchTime: %d ms\\n\", ts.Time.Milliseconds())\n\tout.Printf(\"MaxDepth: %d\\n\", ts.Depth)\n\tout.Printf(\"Date: %s\\n\", time.Now().Local())\n\tout.Printf(\"====================================================================================================================================\\n\")\n\tout.Printf(\" %-4s | %-10s | %-8s | %-8s | %-15s | %s | %s\\n\", \" Nr.\", \"Result\", \"Move\", \"Value\", \"Expected Result\", \"Fen\", \"Id\")\n\tout.Printf(\"====================================================================================================================================\\n\")\n\tfor i, t := range ts.Tests {\n\t\tif t.tType == DM {\n\t\t\tout.Printf(\" %-4d | %-10s | %-8s | %-8s | %s%-15d | %s | %s\\n\",\n\t\t\t\ti+1, t.rType.String(), t.actual.StringUci(), t.value.String(), \"dm \", t.mateDepth, t.fen, t.id)\n\t\t} else {\n\t\t\tout.Printf(\" %-4d | %-10s | %-8s | %-8s | %s %-15s | %s | %s\\n\",\n\t\t\t\ti+1, t.rType.String(), t.actual.StringUci(), t.value.String(), t.tType.String(), t.targetMoves.StringUci(), t.fen, t.id)\n\t\t}\n\t}\n\tout.Printf(\"====================================================================================================================================\\n\")\n\tout.Printf(\"Summary:\\n\")\n\tout.Printf(\"EPD File: %s\\n\", ts.FilePath)\n\tout.Printf(\"SearchTime: %d ms\\n\", ts.Time.Milliseconds())\n\tout.Printf(\"MaxDepth: %d\\n\", ts.Depth)\n\tout.Printf(\"Date: %s\\n\", time.Now().Local())\n\tout.Printf(\"Successful: %-3d (%d %%)\\n\", tr.SuccessCounter, 100*tr.SuccessCounter/tr.Counter)\n\tout.Printf(\"Failed: %-3d (%d %%)\\n\", tr.FailedCounter, 100*tr.FailedCounter/tr.Counter)\n\tout.Printf(\"Skipped: %-3d (%d %%)\\n\", tr.SkippedCounter, 100*tr.SkippedCounter/tr.Counter)\n\tout.Printf(\"Not tested: %-3d (%d %%)\\n\", tr.NotTestedCounter, 100*tr.NotTestedCounter/tr.Counter)\n\tout.Printf(\"Test time: %s\\n\", elapsed)\n\tout.Printf(\"Configuration: %s\\n\", config.Settings.String())\n}", "func textRegexSelector(rx *regexp.Regexp) Selector {\n\treturn func(n *Node) bool {\n\t\treturn rx.MatchString(nodeText(n))\n\t}\n}", "func (n *NVDCVEFeedJSON10DefNode) InnerTests() []iface.LogicalTest {\n\tif len(n.ifaceChildren) != 0 {\n\t\treturn n.ifaceChildren\n\t}\n\tif len(n.Children) == 0 {\n\t\treturn nil\n\t}\n\tchildren := make([]iface.LogicalTest, len(n.Children))\n\tfor i, child := range n.Children {\n\t\tchildren[i] = iface.LogicalTest(child)\n\t}\n\treturn children\n}", "func Select(collection interface{}, predicate interface{}) interface{} {\n\tcv := ensureSlice(collection)\n\tpv := ensureFuncReturns(predicate, 1, 1, reflect.Bool)\n\tensureCanMap(cv, pv)\n\n\tlength := cv.Len()\n\toutput := reflect.MakeSlice(reflect.SliceOf(cv.Type().Elem()), 0, cv.Cap())\n\n\tfor i := 0; i < length; i++ {\n\t\tinput := cv.Index(i)\n\t\tyes := pv.Call([]reflect.Value{input})[0].Bool()\n\t\tif yes {\n\t\t\toutput = reflect.Append(output, input)\n\t\t}\n\t}\n\n\treturn output.Interface()\n}", "func Compile(sel string) (Selector, error) {\n\tcompiled, err := ParseGroup(sel)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn Selector(compiled.Match), nil\n}", "func MatchesSelector(installable interfaces.IInstallable, selector string) (bool, error) {\n\n\tlog.Logger.Tracef(\"Testing whether installable '%s' matches the selector '%s'\",\n\t\tinstallable.FullyQualifiedId(), selector)\n\n\tselectorParts := strings.Split(selector, constants.NamespaceSeparator)\n\tif len(selectorParts) != 2 {\n\t\treturn false, errors.New(fmt.Sprintf(\"Fully-qualified IDs must \"+\n\t\t\t\"be given, i.e. formatted 'manifest-id%skapp-id' or 'manifest-id%s%s' \"+\n\t\t\t\"for all kapps in a manifest\", constants.NamespaceSeparator,\n\t\t\tconstants.NamespaceSeparator, constants.WildcardCharacter))\n\t}\n\n\tselectorManifestId := selectorParts[0]\n\tselectorId := selectorParts[1]\n\n\tidParts := strings.Split(installable.FullyQualifiedId(), constants.NamespaceSeparator)\n\tif len(idParts) != 2 {\n\t\treturn false, errors.New(fmt.Sprintf(\"Fully-qualified kapp ID \"+\n\t\t\t\"has an unexpected format: %s\", installable.FullyQualifiedId()))\n\t}\n\n\tkappManifestId := idParts[0]\n\tkappId := idParts[1]\n\n\tif selectorManifestId == kappManifestId {\n\t\tif selectorId == constants.WildcardCharacter || selectorId == kappId {\n\t\t\tlog.Logger.Tracef(\"Installable '%s' did match the selector '%s'\", installable.FullyQualifiedId(), selector)\n\t\t\treturn true, nil\n\t\t}\n\t}\n\n\tlog.Logger.Tracef(\"Installable '%s' didn't match the selector '%s'\", installable.FullyQualifiedId(), selector)\n\treturn false, nil\n}", "func (m *MockMessageRepository) Select(ctx context.Context, rID int32) ([]*model.Message, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Select\", ctx, rID)\n\tret0, _ := ret[0].([]*model.Message)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func RackSelector(r cassandrav1alpha1.RackSpec, c *cassandrav1alpha1.Cluster) labels.Selector {\n\n\trackLabelsSet := labels.Set(RackLabels(r, c))\n\tsel := labels.SelectorFromSet(rackLabelsSet)\n\n\treturn sel\n}", "func Select(root NodeNavigator, expr string) *NodeIterator {\n\texp, err := Compile(expr)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn exp.Select(root)\n}", "func (c *Config) Tests() []*Test {\n\treturn c.tests\n}", "func (m *mergeQuerier) Select(sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet {\n\tmatchedTenants, filteredMatchers := filterValuesByMatchers(defaultTenantLabel, m.tenantIDs, matchers...)\n\tvar jobs = make([]interface{}, len(matchedTenants))\n\tvar seriesSets = make([]storage.SeriesSet, len(matchedTenants))\n\tvar jobPos int\n\tfor tenantPos := range m.tenantIDs {\n\t\tif _, matched := matchedTenants[m.tenantIDs[tenantPos]]; !matched {\n\t\t\tcontinue\n\t\t}\n\t\tjobs[jobPos] = &selectJob{\n\t\t\tpos: jobPos,\n\t\t\tquerier: m.queriers[tenantPos],\n\t\t\ttenantID: m.tenantIDs[tenantPos],\n\t\t}\n\t\tjobPos++\n\t}\n\n\trun := func(ctx context.Context, jobIntf interface{}) error {\n\t\tjob, ok := jobIntf.(*selectJob)\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"unexpected type %T\", jobIntf)\n\t\t}\n\t\tseriesSets[job.pos] = &addLabelsSeriesSet{\n\t\t\tupstream: job.querier.Select(sortSeries, hints, filteredMatchers...),\n\t\t\tlabels: labels.Labels{\n\t\t\t\t{\n\t\t\t\t\tName: defaultTenantLabel,\n\t\t\t\t\tValue: job.tenantID,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\treturn nil\n\t}\n\n\terr := concurrency.ForEach(m.ctx, jobs, maxConcurrency, run)\n\tif err != nil {\n\t\treturn storage.ErrSeriesSet(err)\n\t}\n\n\treturn storage.NewMergeSeriesSet(seriesSets, storage.ChainedSeriesMerge)\n}", "func (n *node) Selected() ParsedOpts {\n\tm := n.matchedCommand()\n\treturn m\n}", "func LabelSelectorAsSelector(ps *model.LabelSelector) (labels.Selector, error) {\n\tif ps == nil {\n\t\treturn labels.Nothing(), nil\n\t}\n\tif len(ps.MatchLabels)+len(ps.MatchExpressions) == 0 {\n\t\treturn labels.Everything(), nil\n\t}\n\tselector := labels.NewSelector()\n\tfor k, v := range ps.MatchLabels {\n\t\tr, err := labels.NewRequirement(k, labels.Equals, []string{v})\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tselector = selector.Add(*r)\n\t}\n\tfor _, expr := range ps.MatchExpressions {\n\t\tvar op labels.Operator\n\t\tswitch expr.Operator {\n\t\tcase model.LabelSelectorOpIn:\n\t\t\top = labels.In\n\t\tcase model.LabelSelectorOpNotIn:\n\t\t\top = labels.NotIn\n\t\tcase model.LabelSelectorOpExists:\n\t\t\top = labels.Exists\n\t\tcase model.LabelSelectorOpDoesNotExist:\n\t\t\top = labels.DoesNotExist\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"%q is not a valid pod selector operator\", expr.Operator)\n\t\t}\n\t\tr, err := labels.NewRequirement(expr.Key, op, append([]string(nil), expr.Values...))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tselector = selector.Add(*r)\n\t}\n\treturn selector, nil\n}", "func executeTests(t *testing.T, tests ...testExecution) {\n\tctx := setupTestRequirements(t)\n\tdefer ctx.Cleanup()\n\n\tsetupComplianceOperatorCluster(t, ctx)\n\n\t// get global framework variables\n\tf := framework.Global\n\n\tns, err := ctx.GetNamespace()\n\tif err != nil {\n\t\tt.Fatalf(\"could not get namespace: %v\", err)\n\t}\n\n\tfor _, test := range tests {\n\t\tt.Run(test.Name, func(t *testing.T) {\n\t\t\tif err := test.TestFn(t, f, ctx, ns); err != nil {\n\t\t\t\tt.Error(err)\n\t\t\t}\n\t\t})\n\n\t}\n}", "func matchesSelector(blockMeta *metadata.Meta, selectorLabels labels.Labels) bool {\n\tfor _, l := range selectorLabels {\n\t\tif v, ok := blockMeta.Thanos.Labels[l.Name]; !ok || (l.Value != \"*\" && v != l.Value) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (s *Selector) Matches(l Labels) bool {\n\tif len(s.Requirements) == 0 {\n\t\treturn false\n\t}\n\tfor ii := range s.Requirements {\n\t\tif matches := s.Requirements[ii].Matches(l); !matches {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func Select(mock sqlmock.Sqlmock, table string, columns []string, err error, values ...[]driver.Value) {\n\tsql := fmt.Sprintf(\"SELECT (.+) FROM %s\", table)\n\n\tif err != nil {\n\t\tmock.ExpectQuery(sql).WillReturnError(err)\n\t\treturn\n\t}\n\n\tif values == nil || len(values) == 0 {\n\t\tmock.ExpectQuery(sql).WillReturnRows(&sqlmock.Rows{})\n\t\treturn\n\t}\n\n\trows := sqlmock.NewRows(columns)\n\tfor _, value := range values {\n\t\trows.AddRow(value...)\n\t}\n\n\tmock.ExpectQuery(sql).WillReturnRows(rows)\n}", "func (s MockInputsBoolsHelper) RunTests(t testRunner, testSet []bool, testFunc func(t *testing.T, index int, f bool)) {\n\tif test, ok := t.(helper); ok {\n\t\ttest.Helper()\n\t}\n\n\ttest := internal.GetTest(t)\n\tif test == nil {\n\t\tt.Error(internal.ErrCanNotRunIfNotBuiltinTesting)\n\t\treturn\n\t}\n\n\tfor i, v := range testSet {\n\t\ttest.Run(fmt.Sprint(v), func(t *testing.T) {\n\t\t\tt.Helper()\n\n\t\t\ttestFunc(t, i, v)\n\t\t})\n\t}\n}", "func (c *Configuration) TestsByName(names ...string) <-chan JobWithError {\n\toutput := make(chan JobWithError)\n\tgo func() {\n\t\tc.mutex.RLock()\n\t\tdefer c.mutex.RUnlock()\n\n\t\tfor _, test := range names {\n\t\t\tj, ok := c.tests[test]\n\n\t\t\tif !ok {\n\t\t\t\toutput <- JobWithError{\n\t\t\t\t\tJob: nil,\n\t\t\t\t\tErr: errors.Errorf(\"no test named %s\", test),\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\toutput <- JobWithError{Job: j, Err: nil}\n\t\t}\n\n\t\tclose(output)\n\t}()\n\n\treturn output\n}", "func Select(db *sql.DB, q Queryer, filters string) ([]Queryer, error) {\n\tquery := fmt.Sprintf(\"SELECT * FROM %s %s\", q.TableName(), filters)\n\trows, err := db.Query(query)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tqs, err := receive(q, rows)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn qs, nil\n}", "func Select(c runner.Client) *Query {\n\treturn NewQuery(types.Select, c)\n}", "func run(t *testing.T, formatter Formatter, suites ...TCatcher) {\n\tvar (\n\t\tbeforeAllFound, afterAllFound bool\n\t\tbeforeAll, afterAll, before, after reflect.Value\n\t\ttotalPassed, totalFailed, totalPending, totalNoAssertions int\n\t)\n\n\tflag.Parse()\n\n\tfor _, s := range suites {\n\t\tbeforeAll, afterAll, before, after = reflect.Value{}, reflect.Value{}, reflect.Value{}, reflect.Value{}\n\t\ts.SetT(t)\n\t\ts.Reset()\n\n\t\tiType := reflect.TypeOf(s)\n\n\t\tformatter.PrintSuiteName(strings.Split(iType.String(), \".\")[1])\n\n\t\t// search for Before and After methods\n\t\tfor i := 0; i < iType.NumMethod(); i++ {\n\t\t\tmethod := iType.Method(i)\n\t\t\tif ok, _ := regexp.MatchString(\"^BeforeAll\", method.Name); ok {\n\t\t\t\tif !beforeAllFound {\n\t\t\t\t\tbeforeAll = method.Func\n\t\t\t\t\tbeforeAllFound = true\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t\tif ok, _ := regexp.MatchString(\"^AfterAll\", method.Name); ok {\n\t\t\t\tif !afterAllFound {\n\t\t\t\t\tafterAll = method.Func\n\t\t\t\t\tafterAllFound = true\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t\tif ok, _ := regexp.MatchString(\"^Before\", method.Name); ok {\n\t\t\t\tbefore = method.Func\n\t\t\t}\n\t\t\tif ok, _ := regexp.MatchString(\"^After\", method.Name); ok {\n\t\t\t\tafter = method.Func\n\t\t\t}\n\t\t}\n\n\t\tif beforeAll.IsValid() {\n\t\t\tbeforeAll.Call([]reflect.Value{reflect.ValueOf(s)})\n\t\t}\n\n\t\tfor i := 0; i < iType.NumMethod(); i++ {\n\t\t\tmethod := iType.Method(i)\n\t\t\tif ok, _ := regexp.MatchString(*testToRun, method.Name); ok {\n\t\t\t\tif ok, _ := regexp.MatchString(formatter.AllowedMethodsPattern(), method.Name); ok {\n\n\t\t\t\t\ts.SetStatus(&Status{Code: STATUS_NO_ASSERTIONS})\n\n\t\t\t\t\tif before.IsValid() {\n\t\t\t\t\t\tbefore.Call([]reflect.Value{reflect.ValueOf(s)})\n\t\t\t\t\t}\n\n\t\t\t\t\tmethod.Func.Call([]reflect.Value{reflect.ValueOf(s)})\n\n\t\t\t\t\tif after.IsValid() {\n\t\t\t\t\t\tafter.Call([]reflect.Value{reflect.ValueOf(s)})\n\t\t\t\t\t}\n\n\t\t\t\t\tvar info *suiteInfo\n\t\t\t\t\tstatus := s.GetStatus()\n\n\t\t\t\t\tswitch status.Code {\n\t\t\t\t\tcase STATUS_PASS:\n\t\t\t\t\t\tinfo = s.GetInfo()\n\t\t\t\t\t\ttotalPassed++\n\t\t\t\t\tcase STATUS_FAIL:\n\t\t\t\t\t\tinfo = s.GetInfo()\n\t\t\t\t\t\tt.Error(status.ErrorMessage)\n\t\t\t\t\t\ttotalFailed++\n\t\t\t\t\tcase STATUS_PENDING:\n\t\t\t\t\t\tinfo = s.GetInfo()\n\t\t\t\t\t\tinfo.assertions = 0\n\t\t\t\t\t\ttotalPending++\n\t\t\t\t\tcase STATUS_NO_ASSERTIONS:\n\t\t\t\t\t\tinfo = &suiteInfo{0, method.Name}\n\t\t\t\t\t\ttotalNoAssertions++\n\t\t\t\t\t}\n\t\t\t\t\tformatter.PrintStatus(status, info)\n\t\t\t\t}\n\t\t\t}\n\n\t\t}\n\n\t\tif afterAll.IsValid() {\n\t\t\tafterAll.Call([]reflect.Value{reflect.ValueOf(s)})\n\t\t}\n\t}\n\n\tformatter.PrintFinalReport(totalPassed, totalFailed, totalPending, totalNoAssertions)\n}", "func runIndexTests(t *testing.T, f func(s, sep []rune) int, funcName string, testCases []indexTest) {\n\tfor _, test := range testCases {\n\t\tactual := f(test.s, test.sep)\n\t\tif actual != test.out {\n\t\t\tt.Errorf(\"%s(%q,%q) = %v; want %v\", funcName, test.s, test.sep, actual, test.out)\n\t\t}\n\t}\n}", "func GetSelectedFields(selectionPath []string, query string) []string {\n\t// (?s) - multiline search, .+? not greedy search\n\tvar regexpStr = `(?s)` + strings.Join(selectionPath, `.+?`)\n\n\tvar ss = selectionAfter(regexpStr, query)\n\tif ss == \"\" {\n\t\treturn []string{}\n\t}\n\n\t// remove all ()\n\tfor o := outsideBrackets(\"()\", ss); o != ss; o = outsideBrackets(\"()\", ss) {\n\t\tss = o\n\t}\n\t// remove all {}\n\tfor o := outsideBrackets(\"{}\", ss); o != ss; o = outsideBrackets(\"{}\", ss) {\n\t\tss = o\n\t}\n\tss = removeSpaces(ss)\n\treturn strings.Split(ss, \" \")\n}", "func Parse(selector string) (*Selector, error) {\n\tp := &Parser{l: &Lexer{s: selector, pos: 0}}\n\trequirements, err := p.parse()\n\tif err != nil {\n\t\treturn &Selector{}, err\n\t}\n\tsort.Sort(ByKey(requirements)) // sort to grant determistic parsing\n\treturn &Selector{\n\t\tRequirements: requirements,\n\t}, nil\n}", "func (t TestCases) Run(fn func(string) (string, string), hideInput bool) {\n\tfor _, test := range t {\n\t\tpart1, part2 := fn(test.Input)\n\t\tpassedPart1 := part1 == test.ExpectedPart1 || test.ExpectedPart1 == \"\"\n\t\tpassedPart2 := part2 == test.ExpectedPart2 || test.ExpectedPart2 == \"\"\n\t\tpassed := passedPart1 && passedPart2\n\n\t\tif !passed && !hideInput {\n\t\t\tfmt.Println(\"Input \", test.Input)\n\t\t}\n\t\tif !passedPart1 {\n\t\t\tfmt.Println(\" - PART1: \", part1, \" but expected \", test.ExpectedPart1)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tif !passedPart2 {\n\t\t\tfmt.Println(\" - PART2: \", part2, \" but expected \", test.ExpectedPart2)\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n}", "func (m *MockFullNode) MpoolSelect(arg0 context.Context, arg1 types0.TipSetKey, arg2 float64) ([]*types.SignedMessage, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"MpoolSelect\", arg0, arg1, arg2)\n\tret0, _ := ret[0].([]*types.SignedMessage)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func TestSelector(t *testing.T) {\n\tsrvs := []string{\"127.0.0.1:9876\", \"127.0.0.1:9879\", \"12.24.123.243:10911\", \"12.24.123.243:10915\"}\n\tnamesrv, err := NewNamesrv(primitive.NewPassthroughResolver(srvs), nil)\n\tassert.Nil(t, err)\n\n\tassert.Equal(t, srvs[0], namesrv.getNameServerAddress())\n\tassert.Equal(t, srvs[1], namesrv.getNameServerAddress())\n\tassert.Equal(t, srvs[2], namesrv.getNameServerAddress())\n\tassert.Equal(t, srvs[3], namesrv.getNameServerAddress())\n\tassert.Equal(t, srvs[0], namesrv.getNameServerAddress())\n\tassert.Equal(t, srvs[1], namesrv.getNameServerAddress())\n\tassert.Equal(t, srvs[2], namesrv.getNameServerAddress())\n\tassert.Equal(t, srvs[3], namesrv.getNameServerAddress())\n\tassert.Equal(t, srvs[0], namesrv.getNameServerAddress())\n}", "func (e *LiteralExpr) Selector() LogSelectorExpr { return e }", "func IncludeTests() Option {\n\treturn func(o *options) {\n\t\to.tests = true\n\t}\n}", "func (suite *AddCommandTestSuite) TestExecuteWithMultipleURLs() {\n\n}", "func (suite *AddCommandTestSuite) TestExecuteWhenMultipleTracksFound() {\n\n}", "func (c *Client) GetTests(runID int, statusID ...[]int) ([]Test, error) {\n\treturnTest := []Test{}\n\turi := \"get_tests/\" + strconv.Itoa(runID)\n\n\tif len(statusID) > 0 {\n\t\turi = applySpecificFilter(uri, \"status_id\", statusID[0])\n\t}\n\tvar err error\n\tif c.useBetaApi {\n\t\terr = c.sendRequestBeta(\"GET\", uri, nil, &returnTest, \"tests\")\n\t} else {\n\t\terr = c.sendRequest(\"GET\", uri, nil, &returnTest)\n\t}\n\treturn returnTest, err\n}", "func (b *QueryBuilder) Select(nodes ...NodeI) {\n\tif b.GroupBys != nil {\n\t\tpanic(\"You cannot have Select and GroupBy statements in the same query. The GroupBy columns will automatically be selected.\")\n\t}\n\tfor _, n := range nodes {\n\t\tif NodeGetType(n) != ColumnNodeType {\n\t\t\tpanic(\"you can only select column nodes\")\n\t\t}\n\t}\n\tb.Selects = append(b.Selects, nodes...)\n}", "func testsFilter(info os.FileInfo) bool {\n\treturn !strings.HasSuffix(info.Name(), \"_test.go\")\n}", "func extractSelectors(rule *model.Rule) (src, dst, notSrc, notDst []selector.Selector) {\n\t// Calculate a minimal set of selectors. combineMatchesIfPossible will try to combine the\n\t// negative matches into that single selector, if possible.\n\tsrcRawSel, notSrcSel := combineMatchesIfPossible(rule.SrcSelector, rule.NotSrcSelector)\n\tdstRawSel, notDstSel := combineMatchesIfPossible(rule.DstSelector, rule.NotDstSelector)\n\n\tparseAndAppendSelectorIfNonZero := func(slice []selector.Selector, rawSelector string) []selector.Selector {\n\t\tif rawSelector == \"\" {\n\t\t\treturn slice\n\t\t}\n\t\tsel, err := selector.Parse(rawSelector)\n\t\tif err != nil {\n\t\t\t// Should have been validated further back in the pipeline.\n\t\t\tlog.WithField(\"selector\", rawSelector).Panic(\n\t\t\t\t\"Failed to parse selector that should have been validated already.\")\n\t\t}\n\t\treturn append(slice, sel)\n\t}\n\tsrc = parseAndAppendSelectorIfNonZero(src, srcRawSel)\n\tdst = parseAndAppendSelectorIfNonZero(dst, dstRawSel)\n\tnotSrc = parseAndAppendSelectorIfNonZero(notSrc, notSrcSel)\n\tnotDst = parseAndAppendSelectorIfNonZero(notDst, notDstSel)\n\n\treturn\n}", "func (s *StatGSSAPI) Selects() []string {\n\treturn []string{\n\t\t\"pg_stat_gssapi.pid\",\n\t\t\"pg_stat_gssapi.gss_authenticated\",\n\t\t\"pg_stat_gssapi.principal\",\n\t\t\"pg_stat_gssapi.encrypted\",\n\t}\n}", "func WithSelectors(selectors ...string) Option {\n\treturn func(cfg *Config) {\n\t\tcfg.Selectors = append(cfg.Selectors, selectors...)\n\t}\n}", "func createSelectCases(channels []<-chan error) []reflect.SelectCase {\n\tcases := make([]reflect.SelectCase, len(channels))\n\tfor i, ch := range channels {\n\t\tcases[i] = reflect.SelectCase{\n\t\t\tDir: reflect.SelectRecv,\n\t\t\tChan: reflect.ValueOf(ch),\n\t\t}\n\t}\n\treturn cases\n}", "func RunSuite(s *test.Suite, context Context) ([]*Result, error) {\n results := make([]*Result, 0)\n c := context.Subcontext(make(map[string]interface{}))\n \n for _, e := range context.Gendoc {\n err := e.Prefix()\n if err != nil {\n return nil, err\n }\n }\n \n for _, e := range s.Cases {\n r, err := RunTest(e, c)\n if err != nil {\n return nil, err\n }\n results = append(results, r)\n }\n \n for _, e := range context.Gendoc {\n err := e.Suffix()\n if err != nil {\n return nil, err\n }\n }\n \n return results, nil\n}" ]
[ "0.74813306", "0.7090248", "0.5607932", "0.5303558", "0.5094614", "0.5069643", "0.5042959", "0.50331104", "0.49627775", "0.49533433", "0.49461237", "0.48975343", "0.47849888", "0.4777621", "0.47751915", "0.47463155", "0.47339553", "0.46907428", "0.46764013", "0.46577954", "0.46514568", "0.4648656", "0.46346706", "0.46267024", "0.46157312", "0.4612774", "0.46083155", "0.45868614", "0.45594838", "0.45488822", "0.4546418", "0.45433894", "0.45395163", "0.45267296", "0.44969642", "0.44929743", "0.44875887", "0.44845665", "0.4475174", "0.44744983", "0.44715056", "0.44520113", "0.44142056", "0.44098788", "0.43861505", "0.43749672", "0.43670616", "0.43663633", "0.4352289", "0.43441916", "0.43409207", "0.4319839", "0.43066373", "0.4304339", "0.43041754", "0.43025082", "0.42969176", "0.42951182", "0.4290633", "0.42866585", "0.42672178", "0.42585576", "0.4256025", "0.42443615", "0.4235183", "0.42330778", "0.4225761", "0.42202875", "0.42172447", "0.4212871", "0.42098218", "0.42039406", "0.4194045", "0.41931304", "0.41865885", "0.4180105", "0.4178484", "0.41740993", "0.41694945", "0.41668308", "0.41579196", "0.41549093", "0.41536635", "0.41516185", "0.4147543", "0.41451833", "0.41442966", "0.41427687", "0.41360024", "0.41328856", "0.41290262", "0.41171682", "0.4107008", "0.41018113", "0.4099859", "0.4098561", "0.4095678", "0.4094441", "0.4092759", "0.4088487" ]
0.7408755
1
Initialize sets up the bundle configmap for tests
func (r *PodTestRunner) Initialize(ctx context.Context) error { bundleData, err := r.getBundleData() if err != nil { return fmt.Errorf("error getting bundle data %w", err) } r.configMapName, err = r.CreateConfigMap(ctx, bundleData) if err != nil { return fmt.Errorf("error creating ConfigMap %w", err) } return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (r *PodTestRunner) Initialize(ctx context.Context) error {\n\tbundleData, err := r.getBundleData()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error getting bundle data %w\", err)\n\t}\n\n\tr.configMapName, err = r.CreateConfigMap(ctx, bundleData)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error creating ConfigMap %w\", err)\n\t}\n\n\treturn nil\n\n}", "func init() {\n\tinitconf(configLocation)\n}", "func (b *bundle) Initialize(bootstrap *core.Bootstrap) {\n\t// Do nothing\n}", "func init() {\n\tsetUpConfig()\n\tsetUpUsingEnv()\n}", "func (c *ConfigMapSpec) Initialize(qserv *qservv1beta1.Qserv) client.Object {\n\tc.qserv = qserv\n\tvar object client.Object = &v1.ConfigMap{}\n\treturn object\n}", "func init() {\n\t// bootstrap cosmos-sdk config for kava chain\n\tkavaConfig := sdk.GetConfig()\n\tapp.SetBech32AddressPrefixes(kavaConfig)\n\tapp.SetBip44CoinType(kavaConfig)\n\tkavaConfig.Seal()\n}", "func init() {\n\tcallbacks = make(map[ModuleType]*ConfigCallback, 8)\n\tmodules = make(map[string]ModuleType, 32)\n}", "func init() {\n\tcore.RegisterConfigGroup(defaultConfigs)\n\tcore.RegisterServiceWithConfig(\"api\", &api.ApiServiceFactory{}, api.Configs)\n\tcore.RegisterServiceWithConfig(\"collector\", &collector.CollectorServiceFactory{}, collector.Configs)\n}", "func init() {\n\tfor group, values := range defaultConfigs {\n\t\tcore.RegisterConfig(group, values)\n\t}\n\tcore.RegisterService(\"indicator\", indicator.Configs, &indicator.IndicatorServiceFactory{})\n\tcore.RegisterService(\"executor\", executor.Configs, &executor.ExecutorServiceFactory{})\n}", "func init() {\n\tRegistry.Add(eksinfo.New())\n\tRegistry.Add(vpcinfo.New())\n\tRegistry.Add(iamresourceusage.New())\n}", "func init() {\n\tprepareOptionsFromCommandline(&configFromInit)\n\tparseConfigFromEnvironment(&configFromInit)\n}", "func init() {\n\tregistry.Add(\"tapo\", NewTapoFromConfig)\n}", "func Init() {\n\tfmt.Printf(\"Loading confusables..\")\n\treplacer = *strings.NewReplacer(confusables...)\n\tfmt.Printf(\"Loaded %d confusables\\n\", len(confusables)/2)\n}", "func (a *App) Initialize() {\n\ta.Router = mux.NewRouter()\n\ta.setRouters()\n\ta.Clientset = k8manager.Initialize()\n}", "func init() {\n\truntime.GOMAXPROCS(runtime.NumCPU())\n\tappAddr = os.Getenv(\"APP_ADDR\") // e.g. \"0.0.0.0:8080\" or \"\"\n\n\tconf = new(app.ConfigConode)\n\tif err := app.ReadTomlConfig(conf, defaultConfigFile); err != nil {\n\t\tfmt.Printf(\"Couldn't read configuration file: %v\", err)\n\t\tos.Exit(1)\n\t}\n\n\tsuite = app.GetSuite(conf.Suite)\n\tpub, _ := base64.StdEncoding.DecodeString(conf.AggPubKey)\n\tsuite.Read(bytes.NewReader(pub), &public_X0)\n}", "func InitConfig() {\n\tenvLoader()\n}", "func Initialize(config interface{}, hooks ...int) error {\n\t// setting the internal config\n\tcfg = loadConfig(config)\n\n\tvar err error\n\tfor _, hook := range hooks {\n\n\t\tswitch hook {\n\t\tcase LOGGER:\n\t\t\terr = initLogger()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase BUILDER:\n\t\t\terr = initBuilder()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase ROUTER:\n\t\t\terr = initRouter()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase CACHE:\n\t\t\terr = initCache()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func init() {\n\tv := initViper()\n\tconf = Config{\n\t\tApp: appConfig(v),\n\t\tMysql: mysqlConfig(v),\n\t\tRedis: redisConfig(v),\n\t\tLog: logConfig(v),\n\t\tElasticAPM: elasticApmConfig(v),\n\t\tSentry: sentryConfig(v),\n\t}\n}", "func init() {\n\tinitCfgDir()\n\tinitCreds()\n}", "func (c *Configurations) Init() error {\n\tc.Version = Version\n\tc.Location = \"Local\"\n\tc.Debug = Debug\n\n\t// server\n\tc.Server = &Server{}\n\tc.Server.Init()\n\n\t// redis init\n\tc.RedisConf = &RedisConf{}\n\tc.RedisConf.Init()\n\n\treturn nil\n}", "func init() {\n\t// Clear the air and set some stock testing values\n\tos.Setenv(\"TEST_VAULT_SKIP_VERIFY\", \"\")\n\tos.Setenv(\"TEST_VAULT_TOKEN\", \"\")\n\tviper.SetEnvPrefix(\"test_vault\")\n\tviper.BindEnv(\"token\")\n\tviper.BindEnv(\"skip_verify\")\n\n\tviper.SetConfigName(\"vaultVisualizeConfig\") // name of config file (without extension)\n\tviper.AddConfigPath(\"../test\") // path to look for the config file in\n\terr := viper.ReadInConfig() // Find and read the config file\n\tif err != nil { // Handle errors reading the config file\n\t\tpanic(fmt.Errorf(\"Fatal error config file: %s \\n\", err))\n\t}\n}", "func init() {\n\t// set reasonable defaults\n\tsetDefaults()\n\n\t// override defaults with configuration read from configuration file\n\tviper.AddConfigPath(\"$GOPATH/src/github.com/xlab-si/emmy/config\")\n\terr := loadConfig(\"defaults\", \"yml\")\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n}", "func init() {\n\tlog.SetFlags(0)\n\tlog.SetPrefix(\":: \")\n\n\tif XDG_CONFIG_HOME == \"\" {\n\t\tXDG_CONFIG_HOME = filepath.Join(os.Getenv(\"HOME\"), \".config\")\n\t}\n\n\tif XDG_DATA_DIRS == \"\" {\n\t\tXDG_DATA_DIRS = \"/usr/local/share/:/usr/share\"\n\t}\n\n\tcache.actions = make(map[string]string)\n\tcache.actionFiles = make(map[string]string)\n\tcache.scriptFiles = make(map[string]string)\n\n\tconfig = os.Getenv(\"DEMLORC\")\n\tif config == \"\" {\n\t\tconfig = filepath.Join(XDG_CONFIG_HOME, application, application+\"rc\")\n\t}\n}", "func InitResourceMapping(m *ResourceMapping) { resourceMapping = m }", "func Initialize(m VersionMap) {\n\tif vmgr != nil {\n\t\treturn\n\t}\n\tvmgr = new(versionManager)\n\tvmgr.mapping = m\n}", "func InitForTesting(webapp_root string) {\n\twebhook.InitRequestSaltForTesting()\n\tinitUrls(webapp_root)\n}", "func init() {\n\tcli.InitConfig(configName)\n}", "func init() {\n\tcli.InitConfig(configName)\n}", "func (p *MatterpollPlugin) initBundle() (*i18n.Bundle, error) {\n\tbundle := i18n.NewBundle(language.English)\n\tbundle.RegisterUnmarshalFunc(\"json\", json.Unmarshal)\n\n\tbundlePath, err := p.API.GetBundlePath()\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to get bundle path\")\n\t}\n\n\ti18nDir := filepath.Join(bundlePath, \"assets\", \"i18n\")\n\tfiles, err := ioutil.ReadDir(i18nDir)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to open i18n directory\")\n\t}\n\n\tfor _, file := range files {\n\t\tif !strings.HasPrefix(file.Name(), \"active.\") {\n\t\t\tcontinue\n\t\t}\n\n\t\tif file.Name() == \"active.en.json\" {\n\t\t\tcontinue\n\t\t}\n\t\t_, err = bundle.LoadMessageFile(filepath.Join(i18nDir, file.Name()))\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"failed to load message file %s\", file.Name())\n\t\t}\n\t}\n\n\treturn bundle, nil\n}", "func Init() error {\n\tessentialfiles := []string{\n\t\tfileutil.GlobalDefinition(),\n\t\tfileutil.GetMicroserviceDesc(),\n\t}\n\tcommonfiles := []string{\n\t\tfileutil.HystrixDefinition(),\n\t\tfileutil.GetLoadBalancing(),\n\t\tfileutil.GetRateLimiting(),\n\t\tfileutil.GetTLS(),\n\t\tfileutil.GetMonitoring(),\n\t\tfileutil.GetAuth(),\n\t\tfileutil.GetTracing(),\n\t}\n\n\tdConf, err := NewConfig(essentialfiles, commonfiles)\n\tDefaultConf = dConf\n\treturn err\n}", "func init() {\n\tSetup()\n}", "func (a *App) Initialize(config *config.Config) {\n\ta.RPCURL = config.RPCURL\n\ta.Router = mux.NewRouter()\n\ta.setRouters()\n\tlog.Println(\"Supervisord statuspage started\")\n}", "func Init(pluginRegistry *pluginregistry.PluginRegistry, log xcontext.Logger) {\n\n\t// Register TargetManager plugins\n\tfor _, tmloader := range targetManagers {\n\t\tif err := pluginRegistry.RegisterTargetManager(tmloader()); err != nil {\n\t\t\tlog.Fatalf(\"%v\", err)\n\t\t}\n\t}\n\n\t// Register TestFetcher plugins\n\tfor _, tfloader := range testFetchers {\n\t\tif err := pluginRegistry.RegisterTestFetcher(tfloader()); err != nil {\n\t\t\tlog.Fatalf(\"%v\", err)\n\t\t}\n\t}\n\n\t// Register TestStep plugins\n\tfor _, tsloader := range testSteps {\n\t\tif err := pluginRegistry.RegisterTestStep(tsloader()); err != nil {\n\t\t\tlog.Fatalf(\"%v\", err)\n\n\t\t}\n\t}\n\n\t// Register Reporter plugins\n\tfor _, rfloader := range reporters {\n\t\tif err := pluginRegistry.RegisterReporter(rfloader()); err != nil {\n\t\t\tlog.Fatalf(\"%v\", err)\n\t\t}\n\t}\n\n\t// user-defined function registration\n\ttestInitOnce.Do(func() {\n\t\tfor _, userFunction := range userFunctions {\n\t\t\tfor name, fn := range userFunction {\n\t\t\t\tif err := test.RegisterFunction(name, fn); err != nil {\n\t\t\t\t\tlog.Fatalf(\"%v\", err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t})\n}", "func MockInitialize() {\n\tledgermgmt.InitializeTestEnvWithInitializer(\n\t\t&ledgermgmt.Initializer{\n\t\t\tCustomTxProcessors: ConfigTxProcessors,\n\t\t},\n\t)\n\tchains.list = make(map[string]*chain)\n\tchainInitializer = func(string) { return }\n}", "func Init(config *config.Registry) {\n\tcfg = config\n}", "func Initialize(cfg Config) {\n\tvar err error\n\tif cfg.UseKms {\n\t\t// FIXME(xnum): set at cmd.\n\t\tif utils.FullnodeCluster != utils.Environment() {\n\t\t\tif err = initKmsClient(); err != nil {\n\t\t\t\tpanic(err.Error())\n\t\t\t}\n\t\t}\n\t}\n\n\tswitch cfg.Source {\n\tcase None:\n\t\tgetters = []Getter{noneGetter}\n\tcase K8S:\n\t\tgetters = []Getter{k8sGetter}\n\tcase File:\n\t\tgetters = []Getter{staticGetter}\n\t\tif err = initDataFromFile(); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t// FIXME(xnum): not encourge to use. It depends on env.\n\tcase Auto:\n\t\tif utils.Environment() == utils.LocalDevelopment ||\n\t\t\tutils.Environment() == utils.CI {\n\t\t\tgetters = []Getter{staticGetter}\n\t\t\terr := initDataFromFile()\n\t\t\tif err != nil {\n\t\t\t\tlog.Panicln(err)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tgetters = []Getter{k8sGetter}\n\t}\n}", "func (rs *Resources) Init() {\n\trs.rm = make(resourceKeyMap)\n\trs.rsMap = make(ResourceConfigMap)\n\trs.objDeps = make(ObjectDependencyMap)\n\trs.oldRsMap = make(ResourceConfigMap)\n}", "func (a *App) Initialize(e *Env) {\n\t// set log formatter\n\tlog.SetFlags(log.LstdFlags | log.Lshortfile)\n\ta.Config = e\n\ta.Router = mux.NewRouter()\n\ta.Middlewares = &Middleware{}\n\ta.InitializeRouter()\n}", "func initializeConfigTest() (config config.Configuration, err error) {\n\tpanic(wire.Build(pkgSetConfigTest))\n}", "func init() {\n\t// Load env variable from .env file\n\tenvConfig = env.NewEnvConfig(\"../.env\")\n\n\t// Load cors domain list\n\tcorsDomainList = strings.Split(envConfig[\"APP_CORS_DOMAIN\"], \",\")\n\n\thost = envConfig[\"APP_HOST\"]\n\tif str.StringToBool(envConfig[\"APP_DEBUG\"]) {\n\t\tdebug = true\n\t\tlog.Printf(\"Running on Debug Mode: On at host [%v]\", host)\n\t}\n}", "func Init(baseCfg BaseConfig) Config {\n\t// urls := []string{}\n\n\tappConfig := Config{\n\t\tbaseCfg,\n\t\t// urls,\n\t}\n\n\treturn appConfig\n}", "func configInit() {\n\tLoadConfig(configPath, \"config\")\n\tConfig().SetDefault(\"log-level\", \"debug\")\n\tConfig().SetDefault(\"addr\", \"localhost:8081\")\n}", "func Init(c Config) error {\n\terr := c.validate()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tapiKEY = c.APIKey\n\tlog = c.Logger\n\tlistMapper = c.ListMapper\n\n\treturn nil\n}", "func InitTestConfig() {\n\tviper.SetConfigType(\"yaml\")\n\tviper.SetConfigName(\"config\")\n\tviper.AddConfigPath(\"../../\")\n\terr := viper.ReadInConfig()\n\tif err != nil {\n\t\tfmt.Println(\"viper load config faild\", err)\n\t}\n}", "func Init(configFile string) {\n\tinitializer := configInitializer{\n\t\tCommon: newDefaultCommonConfig(),\n\t\tServer: newDefaultServerConfig(),\n\t\tClient: newDefaultClientConfig(),\n\t}\n\n\tif configFile == \"\" {\n\t\tconfigFile = \"./cfg/dtail.json\"\n\t}\n\n\tif _, err := os.Stat(configFile); !os.IsNotExist(err) {\n\t\tinitializer.parseConfig(configFile)\n\t}\n\n\t// Assign pointers to global variables, so that we can access the\n\t// configuration from any place of the program.\n\tCommon = initializer.Common\n\tServer = initializer.Server\n\tClient = initializer.Client\n\n\tif Server.MapreduceLogFormat == \"\" {\n\t\tServer.MapreduceLogFormat = \"default\"\n\t}\n}", "func init() {\n\t// TODO: set logger\n\t// TODO: register storage plugin to plugin manager\n}", "func (a *App) init() {\n\tversion := flag.BoolP(\"version\", \"v\", false, \"prints the app version\")\n\thelp := flag.BoolP(\"help\", \"h\", false, \"prints the app help (--full for all parameters)\")\n\thelpFull := flag.Bool(\"full\", false, \"prints full app help (only in combination with -h)\")\n\n\tif a.options.initComponent == nil {\n\t\tpanic(\"unable to initialize app: no InitComponent given\")\n\t}\n\n\t// default config\n\tdefaultConfig := NewConfigurationSet(\"app\", \"config\", \"appConfigFilePath\", DefaultFlagSetName, true, true, true, \"config.json\", \"c\")\n\ta.appFlagSet = defaultConfig.flagSet\n\ta.appConfig = defaultConfig.config\n\n\ta.appParams = &ParametersApp{}\n\ta.appConfig.BindParameters(a.appFlagSet, \"app\", a.appParams)\n\n\tloggerParams := &logger.Config{}\n\ta.appConfig.BindParameters(a.appFlagSet, \"logger\", loggerParams)\n\n\t// provide the app params in the container\n\tif err := a.container.Provide(func() *ParametersApp {\n\t\treturn a.appParams\n\t}); err != nil {\n\t\tpanic(err)\n\t}\n\n\ta.configs = ConfigurationSets{}\n\ta.configs = append(a.configs, defaultConfig)\n\ta.configs = append(a.configs, a.options.initComponent.AdditionalConfigs...)\n\n\t// config file flags (needed to change the path of the config files before loading them)\n\tconfigFilesFlagSet := configuration.NewUnsortedFlagSet(\"config_files\", flag.ContinueOnError)\n\n\tfor _, config := range a.configs {\n\t\tvar cfgFilePath *string\n\t\tif config.shortHand != \"\" {\n\t\t\tcfgFilePath = configFilesFlagSet.StringP(config.filePathFlagName, config.shortHand, config.defaultConfigPath, fmt.Sprintf(\"file path of the %s configuration file\", config.configName))\n\t\t} else {\n\t\t\tcfgFilePath = configFilesFlagSet.String(config.filePathFlagName, config.defaultConfigPath, fmt.Sprintf(\"file path of the %s configuration file\", config.configName))\n\t\t}\n\n\t\tif config.filePathFlagProvideName != \"\" {\n\t\t\t// we need to provide the results of the config files flag sets, because the results are not contained in any configuration\n\t\t\tif err := a.container.Provide(func() *string {\n\t\t\t\treturn cfgFilePath\n\t\t\t}, dig.Name(config.filePathFlagProvideName)); err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t}\n\t}\n\n\t// provide all config files in the container\n\tfor cfgName, config := range a.configs.ConfigsMap() {\n\t\tc := config\n\t\tif err := a.container.Provide(func() *configuration.Configuration {\n\t\t\treturn c\n\t\t}, dig.Name(cfgName)); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\n\t//\n\t// Collect parameters\n\t//\n\n\tcollectParameters := func(component *Component) {\n\t\tcomponent.app = a\n\n\t\tif component.Params == nil {\n\t\t\treturn\n\t\t}\n\n\t\tif component.Params.Params != nil {\n\t\t\t// sort namespaces first\n\t\t\tsortedNamespaces := make([]string, 0, len(component.Params.Params))\n\t\t\tfor namespace := range component.Params.Params {\n\t\t\t\tsortedNamespaces = append(sortedNamespaces, namespace)\n\t\t\t}\n\n\t\t\tsort.Slice(sortedNamespaces, func(i, j int) bool {\n\t\t\t\treturn sortedNamespaces[i] < sortedNamespaces[j]\n\t\t\t})\n\n\t\t\t// bind parameters in sorted order\n\t\t\tfor _, namespace := range sortedNamespaces {\n\t\t\t\tpointerToStruct := component.Params.Params[namespace]\n\t\t\t\ta.appConfig.BindParameters(a.appFlagSet, namespace, pointerToStruct)\n\t\t\t}\n\t\t}\n\n\t\tif component.Params.AdditionalParams != nil {\n\t\t\t// sort config names first\n\t\t\tsortedCfgNames := make([]string, 0, len(component.Params.AdditionalParams))\n\t\t\tfor cfgName := range component.Params.AdditionalParams {\n\t\t\t\tsortedCfgNames = append(sortedCfgNames, cfgName)\n\t\t\t}\n\n\t\t\tsort.Slice(sortedCfgNames, func(i, j int) bool {\n\t\t\t\treturn sortedCfgNames[i] < sortedCfgNames[j]\n\t\t\t})\n\n\t\t\t// iterate through config names in sorted order\n\t\t\tfor _, cfgName := range sortedCfgNames {\n\t\t\t\tparams := component.Params.AdditionalParams[cfgName]\n\n\t\t\t\t// sort namespaces first\n\t\t\t\tsortedNamespaces := make([]string, 0, len(params))\n\t\t\t\tfor namespace := range params {\n\t\t\t\t\tsortedNamespaces = append(sortedNamespaces, namespace)\n\t\t\t\t}\n\n\t\t\t\tsort.Slice(sortedNamespaces, func(i, j int) bool {\n\t\t\t\t\treturn sortedNamespaces[i] < sortedNamespaces[j]\n\t\t\t\t})\n\n\t\t\t\t// bind parameters in sorted order\n\t\t\t\tfor _, namespace := range sortedNamespaces {\n\t\t\t\t\tpointerToStruct := params[namespace]\n\t\t\t\t\ta.configs.ConfigsMap()[cfgName].BindParameters(a.configs.FlagSetsMap()[cfgName], namespace, pointerToStruct)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif component.Params.Masked != nil {\n\t\t\ta.maskedKeys = append(a.maskedKeys, component.Params.Masked...)\n\t\t}\n\t}\n\n\tcollectParameters(a.options.initComponent.Component)\n\n\tforEachComponent(a.options.components, func(component *Component) bool {\n\t\tcollectParameters(component)\n\n\t\treturn true\n\t})\n\n\t//\n\t// Init Stage\n\t//\n\t// the init hook function could modify the startup behavior (e.g. to display tools)\n\tif a.options.initComponent.Init != nil {\n\t\tif err := a.options.initComponent.Init(a); err != nil {\n\t\t\tpanic(fmt.Errorf(\"unable to initialize app: %w\", err))\n\t\t}\n\t}\n\n\tflag.Usage = func() {\n\t\tif a.options.usageText == \"\" {\n\t\t\t// no usage text given, use default\n\t\t\tfmt.Fprintf(os.Stderr, `Usage of %s (%s %s):\n\t\t\t\nCommand line flags:\n`, os.Args[0], a.Info().Name, a.Info().Version)\n\t\t} else {\n\t\t\tfmt.Fprintf(os.Stderr, a.options.usageText)\n\t\t}\n\n\t\tflag.PrintDefaults()\n\t}\n\n\t// parse command line flags from args\n\tconfiguration.ParseFlagSets(append(a.configs.FlagSets(), configFilesFlagSet))\n\n\t// check if version should be printed\n\tif *version {\n\t\tfmt.Println(a.Info().Name + \" \" + a.Info().Version)\n\t\tos.Exit(0)\n\t}\n\n\t// check if help text should be displayed\n\tif *help {\n\t\tif !*helpFull {\n\t\t\t// hides all non-essential flags from the help/usage text.\n\t\t\tconfiguration.HideFlags(a.configs.FlagSets(), a.options.initComponent.NonHiddenFlags)\n\t\t}\n\t\tflag.Usage()\n\t\tos.Exit(0)\n\t}\n\n\t// load all config files\n\tif err := loadConfigurations(configFilesFlagSet, a.configs); err != nil {\n\t\tpanic(err)\n\t}\n\n\t// enable version check\n\ta.options.versionCheckEnabled = a.appParams.CheckForUpdates\n\n\t// initialize the root logger\n\tloggerRoot, err := logger.NewRootLogger(*loggerParams)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\ta.loggerRoot = loggerRoot\n\n\t// initialize logger after init phase because components could modify it\n\ta.logger = loggerRoot.Named(\"App\")\n}", "func init() {\n\thome, _ := os.UserHomeDir()\n\n\tGlobalConfig = GlobalOpts{\n\t\tInstallDir: filepath.Join(home, \"probr\"),\n\t\tGodogResultsFormat: \"cucumber\",\n\t\tStartTime: time.Now(),\n\t}\n\tSetTmpDir(filepath.Join(home, \"probr\", \"tmp\")) // TODO: this needs error handling\n}", "func init() {\n\tconfigFile, err := os.Open(\"settings/config.json\")\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tbyteValue, _ := ioutil.ReadAll(configFile)\n\n\terr = json.Unmarshal(byteValue, &Config)\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tdefer configFile.Close()\n}", "func Init() error {\n\tif err := archaius.AddFile(Configurations.ConfigFile, archaius.WithFileHandler(util.UseFileNameAsKeyContentAsValue)); err != nil {\n\t\treturn err\n\t}\n\t_, filename := filepath.Split(Configurations.ConfigFile)\n\tcontent := archaius.GetString(filename, \"\")\n\treturn yaml.Unmarshal([]byte(content), Configurations)\n}", "func init() {\n\tcfg, err := config.LoadDefaultConfig(context.TODO(), config.WithSharedConfigProfile(\"tavern-automation\"))\n\tif err != nil {\n\t\tlog.Fatal(\"ERROR: Unable to resolve credentials for tavern-automation: \", err)\n\t}\n\n\tstsc = sts.NewFromConfig(cfg)\n\torgc = organizations.NewFromConfig(cfg)\n\tec2c = ec2.NewFromConfig(cfg)\n\n\t// NOTE: By default, only describes regions that are enabled in the root org account, not all Regions\n\tresp, err := ec2c.DescribeRegions(context.TODO(), &ec2.DescribeRegionsInput{})\n\tif err != nil {\n\t\tlog.Fatal(\"ERROR: Unable to describe regions\", err)\n\t}\n\n\tfor _, region := range resp.Regions {\n\t\tregions = append(regions, *region.RegionName)\n\t}\n\tfmt.Println(\"INFO: Listing all enabled regions:\")\n\tfmt.Println(regions)\n}", "func init() {\n\tviper.SetConfigFile(`services/room/config.yml`)\n\tif err := viper.ReadInConfig(); err != nil {\n\t\tfmt.Printf(\"Error reading config file, %s\", err)\n\t}\n\n}", "func Init() {\n\tif initialized {\n\t\treturn\n\t}\n\tinitialized = true\n\tpopulatecnamechain()\n\tensureresourcefinder()\n\tloadphantomjs()\n}", "func InitTestConfig() {\n\tviper.SetConfigName(\"config.test\")\n\tviper.SetConfigType(\"toml\")\n\tviper.AddConfigPath(\".\")\n\n\tif err := viper.ReadInConfig(); err != nil {\n\t\tlog.Fatal(err.Error())\n\t}\n\n\tappCfg = AppConfig{\n\t\tPort: viper.GetInt(\"app.port\"),\n\t\tMaxFileSize: viper.GetInt(\"app.max_filesize\"),\n\t\tLogLevel: viper.GetString(\"app.log_level\"),\n\t\tRespScannerVendor: strings.ToLower(viper.GetString(\"app.resp_scanner_vendor\")),\n\t\tReqScannerVendor: strings.ToLower(viper.GetString(\"app.req_scanner_vendor\")),\n\t\tRespScannerVendorShadow: strings.ToLower(viper.GetString(\"app.resp_scanner_vendor_shadow\")),\n\t\tReqScannerVendorShadow: strings.ToLower(viper.GetString(\"app.req_scanner_vendor_shadow\")),\n\t\tBypassExtensions: viper.GetStringSlice(\"app.bypass_extensions\"),\n\t\tProcessExtensions: viper.GetStringSlice(\"app.process_extensions\"),\n\t\tPreviewBytes: viper.GetString(\"app.preview_bytes\"),\n\t\tPropagateError: viper.GetBool(\"app.propagate_error\"),\n\t}\n}", "func init() {\n\ttestEnv.Init()\n}", "func init() {\n\tconsul.Register()\n\tetcd.Register()\n\tzookeeper.Register()\n\tboltdb.Register()\n}", "func init() {\n\t// NOTE: reminder that flag.Parse will be called by `go test`, so we don't need to call it here.\n\tflagConfig = flag.String(\"cbtest.config\", \"cbtest.json\", \"Path to the config file to use (credentials mostly)\")\n\tflagConfigOut = flag.String(\"cbtest.config-out\", \"\", \"Path to write the config to\")\n\tflagPlatformURL = flag.String(\"cbtest.platform-url\", \"\", \"Platform URL to use\")\n\tflagMessagingURL = flag.String(\"cbtest.messaging-url\", \"\", \"Messaging URL to use\")\n\tflagRegistrationKey = flag.String(\"cbtest.registration-key\", \"\", \"Registration key to use when creating developers\")\n\tflagSystemKey = flag.String(\"cbtest.system-key\", \"\", \"System key to use\")\n\tflagSystemSecret = flag.String(\"cbtest.system-secret\", \"\", \"System secret to use\")\n\tflagDevEmail = flag.String(\"cbtest.dev-email\", \"\", \"Developer email to use\")\n\tflagDevPassword = flag.String(\"cbtest.dev-password\", \"\", \"Developer password to use\")\n\tflagUserEmail = flag.String(\"cbtest.user-email\", \"\", \"User email to use\")\n\tflagUserPassword = flag.String(\"cbtest.user-password\", \"\", \"User password to use\")\n\tflagDeviceName = flag.String(\"cbtest.device-name\", \"\", \"Device name to use\")\n\tflagDeviceActiveKey = flag.String(\"cbtest.device-active-key\", \"\", \"Device active key to use\")\n\tflagImportUsers = flag.Bool(\"cbtest.import-users\", true, \"Whenever users should be imported\")\n\tflagImportRows = flag.Bool(\"cbtest.import-rows\", true, \"Whenever rows should be imported\")\n}", "func (s *Store) Init(ctx context.Context, metadataRaw secretstores.Metadata) error {\n\tmetadata, err := s.parseSecretManagerMetadata(metadataRaw)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tclient, err := s.getClient(ctx, metadata)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to setup secretmanager client: %s\", err)\n\t}\n\n\ts.client = client\n\ts.ProjectID = metadata.ProjectID\n\n\treturn nil\n}", "func (cfg *Config) init() {\n\tcfg.Version = viper.GetString(\"version\")\n\tcfg.setLogLevel(viper.GetString(\"log_level\"))\n\tcfg.AppName = viper.GetString(\"app_name\")\n\tcfg.AppShortName = viper.GetString(\"app_short_name\")\n\n\tcfg.API.UsingHttps = viper.GetBool(\"api.usingHttps\")\n\tcfg.API.Port = viper.GetInt(\"api.port\")\n\tcfg.API.AllowedMethods = viper.GetStringSlice(\"api.allowed_methods\")\n\tcfg.API.AllowedHeaders = viper.GetStringSlice(\"api.allowed_headers\")\n\tcfg.API.AllowedOrigins = viper.GetStringSlice(\"api.allowed_origins\")\n\n\tcfg.Database.Host = viper.GetString(\"database.host\")\n\tcfg.Database.Port = viper.GetInt(\"database.port\")\n\tcfg.Database.Db = viper.GetString(\"database.database\")\n\tcfg.Database.User = viper.GetString(\"database.user\")\n\tcfg.Database.Password = viper.GetString(\"database.password\")\n\tcfg.Database.SSLMode = viper.GetString(\"database.sslmode\")\n\n\tcfg.Keys.CSRFKey = viper.GetString(\"secrets.csrf\")\n\tcfg.Keys.JWTSecret = viper.GetString(\"secrets.jwtsecret\")\n\tcfg.Keys.ApiLogin = viper.GetString(\"secrets.api_login\")\n}", "func (ms *ManagerService) Init(cfgfile string) {\n\tif _, err := os.Stat(cfgfile); os.IsNotExist(err) {\n\t\tlog.Fatalf(\"File '%s' does not exist.\\n\", cfgfile)\n\t} else {\n\t\tms.Config, _ = toml.LoadFile(cfgfile)\n\t}\n}", "func Init() error {\n\tbytes, err := ioutil.ReadFile(\"config.json\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = json.Unmarshal(bytes, Config)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func init() {\n\tflag.StringVar(&opt.confFile, \"static-pools-conf-file\", \"\", \"STP pool configuration file\")\n\tflag.StringVar(&opt.confDir, \"static-pools-conf-dir\", \"/etc/cmk\", \"STP pool configuration directory\")\n\tflag.BoolVar(&opt.createNodeLabel, \"static-pools-create-cmk-node-label\", false, \"Create CMK-related node label for backwards compatibility\")\n\tflag.BoolVar(&opt.createNodeTaint, \"static-pools-create-cmk-node-taint\", false, \"Create CMK-related node taint for backwards compatibility\")\n\n\tconfig.Register(PolicyPath, PolicyDescription, cfg, defaultConfig)\n}", "func init() {\n\trunEnv := os.Getenv(\"RUN_ENV\")\n\n\tif runEnv == \"prod\" {\n\t\tlistPath = \"list.json\"\n\t\tpricesPath = \"prices.json\"\n\t} else {\n\t\tlistPath = \"storage/list.json\"\n\t\tpricesPath = \"storage/prices.json\"\n\t}\n}", "func init() {\n\tcurrentDir, _ = pathhelper.GetCurrentExecDir()\n\tconfigFile = path.Join(currentDir, \"config.json\")\n}", "func init() {\n\tconfig = Config{DB: make(map[int]models.User, 10), Friends: make(map[int][]int, 10)}\n\n\t// Just to make PrivateKey assign on the next line\n\tvar err error\n\n\tconfig.PrivateKey, err = ioutil.ReadFile(\"./config/keys/key.pem\")\n\tif err != nil {\n\t\tlog.Println(\"Error reading private key\")\n\t\tlog.Println(\"private key reading error: \", err)\n\t\treturn\n\t}\n\n\tconfig.CertKey, err = ioutil.ReadFile(\"./config/keys/cert.pem\")\n\tif err != nil {\n\t\tlog.Println(\"Error reading cert key\")\n\t\tlog.Println(\"cert key error: \", err)\n\t\treturn\n\t}\n\n}", "func init() {\n\t_ = godotenv.Load()\n}", "func Init() map[string]interface{} {\n\n\tconfig := make(map[string]interface{})\n\tmaps := make(map[string]interface{})\n\tfor _, v := range common.Protocols {\n\t\tswitch v {\n\t\tcase \"ipfs\":\n\t\t\tmaps[\"ipfs\"] = \"place\"\n\t\t}\n\t}\n\tconfig[\"domains\"] = Domains{\n\t\tPatterns: []string{\"test\"},\n\t\tSeen: []string{\"testtest\"},\n\t}\n\tconfig[\"map\"] = maps\n\treturn config\n}", "func (c *CentralCacheTestImpl) Init(conf Config) {\n\tc.baseUrl = conf.Host\n\tc.keyPrefix = conf.KeyPrefix\n\tc.dumpFilePath = conf.DumpFilePath\n\tc.expirySec = conf.ExpirySec\n\tc.file = nil\n}", "func Init() error {\n\t// Logger = elog.DefaultLogger\n\tC.Github = &C.GithubOauth{}\n\tC.Facebook = &C.FacebookOauth{}\n\tC.Minio = &C.MinioConfig{}\n\tC.Seq = &C.Sequence{}\n\tC.JWT = &C.JWTConfig{}\n\n\terr := econf.UnmarshalKey(\"ceres.oauth.github\", C.Github)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = econf.UnmarshalKey(\"ceres.oauth.facebook\", C.Facebook)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = econf.UnmarshalKey(\"ceres.minio\", C.Minio)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = econf.UnmarshalKey(\"ceres.sequence\", C.Seq)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = econf.UnmarshalKey(\"ceres.jwt\", C.JWT)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func InitializeAll() {\n\tcs, err := framework.NewConfigStore(Constants.ComponentName)\n\tif err != nil {\n\t\tlog.Error(\"Panic while setting up config store: \" + err.Error())\n\t\tpanic(err)\n\t}\n\terr = cs.SetupEnvironmentFromSSM()\n\tif err != nil {\n\t\tlog.Error(\"Panic pulling from SSM: \" + err.Error())\n\t\tpanic(err)\n\t}\n}", "func (c *configData) init() {\n\tconst filename = \".workflow.yml\"\n\n\tc.Global = viper.New()\n\tc.Local = viper.New()\n\n\t// c.Local.SetConfigFile(\n\t// \tpath.Join(git.RootDir(), filename),\n\t// )\n\n\tc.Global.SetConfigFile(\n\t\tpath.Join(currentUser.HomeDir, filename),\n\t)\n\n\t// TODO: configs := []*viper.Viper{c.Local, c.Global}\n\tconfigs := []*viper.Viper{c.Global}\n\tfor _, v := range configs {\n\t\t_, _ = file.Touch(v.ConfigFileUsed())\n\t\tfailIfError(v.ReadInConfig())\n\t}\n\n\tfailIfError(c.validate())\n\tfailIfError(c.update())\n\tc.initJira()\n}", "func (e *Implementation) Initialize(ctx *internal.TestContext) error {\n\tconfig, err := kube.CreateConfig(ctx.Settings().KubeConfig)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif e.Accessor, err = kube.NewAccessor(config); err != nil {\n\t\treturn err\n\t}\n\n\te.ctx = ctx\n\n\treturn e.allocateDependencyNamespace()\n}", "func (m *MockInterface) Init(kubeconfigPath, kubeconfigContext string) error {\n\treturn nil\n}", "func init() {\n\n\tviper.SetConfigName(\"config\") // name of config file (without extension)\n\tviper.SetConfigType(\"json\")\n\tviper.AddConfigPath(\".\") // path to look for the config file in\n\tviper.AddConfigPath(\"./config/\")\n\terr := viper.ReadInConfig() // Find and read the config file\n\tif err != nil { // Handle errors reading the config file\n\t\tpanic(fmt.Errorf(\"fatal error config file: %w\", err))\n\t}\n\n\tServer.Host = viper.GetString(\"host\")\n\tServer.DBUname = viper.GetString(\"dbuname\")\n\tServer.DBPword = viper.GetString(\"dbpword\")\n\tServer.DBAddress = viper.GetString(\"dbaddress\")\n\tServer.Port = viper.GetString(\"port\")\n\n\t// Setup global logging format\n\tlog.SetFlags(log.Ldate | log.Ltime | log.Lshortfile | log.Lmicroseconds)\n\n\t// Seed default random source\n\trand.Seed(time.Now().UnixNano())\n\n\tif !Debug.LongLog {\n\t\tlog.SetFlags(log.Ldate | log.Ltime)\n\t\tlog.Printf(\"Switching to short log format.\")\n\t}\n}", "func init() {\n\tHome, err := homedir.Dir()\n\tif err != nil {\n\t\tzap.S().Fatalw(\"error finding home directory\", err)\n\t}\n\n\t// Search config in home directory with name \".[Executable]\" (without extension).\n\tviper.AddConfigPath(\".\")\n\tviper.AddConfigPath(Home)\n\tviper.SetConfigName(\".\" + Executable)\n\tviper.SetTypeByDefaultValue(true)\n\tviper.SetEnvKeyReplacer(strings.NewReplacer(\".\", \"_\"))\n\tviper.SetDefault(\"kubeconfig\", filepath.Join(Home, \".kube\", \"config\"))\n\tviper.SetDefault(\"namespace\", \"\")\n}", "func InitConfig() {\n\tglobalConfig.BackendServerAddr = \"http://localhost:8080\"\n}", "func init() {\n\t// Load Env vars\n\tgotenv.Load()\n}", "func (gr *Reconciler) Init() {\n\tkm := k8s.NewRsrcManager().WithName(\"basek8s\").WithClient(gr.Manager.GetClient()).WithScheme(gr.Manager.GetScheme())\n\tgr.RsrcMgr.Add(k8s.Type, km)\n\tapp.AddToScheme(&AddToSchemes)\n\tAddToSchemes.AddToScheme(gr.Manager.GetScheme())\n}", "func init() {\n\tpkgcfg.Register(PolicyPath, PolicyDescription, conf, defaultConfig)\n}", "func initApplicationConfiguration() {\n var emptyConfigParam string = \"\"\n\n config.InitApp(emptyConfigParam)\n config.InitDatabase(emptyConfigParam)\n config.InitRoutes(emptyConfigParam)\n}", "func init() {\n\tuseSim, err := strconv.ParseBool(os.Getenv(\"FORCE_TPM_VER\"))\n\tif err == nil {\n\t\tConfig.UseSimulator = useSim\n\t\tswitch simVer := os.Getenv(\"TPM_VER\"); simVer {\n\t\tcase \"V12\":\n\t\tcase \"1.2\":\n\t\t\tConfig.SimulatorVersion = V12\n\t\tcase \"V20\":\n\t\tcase \"2.0\":\n\t\t\tConfig.SimulatorVersion = V20\n\t\t}\n\t}\n\tswitch tcti := os.Getenv(\"TPM_TCTI\"); tcti {\n\tcase \"socket-legacy\":\n\t\tConfig.V20.Tcti = SocketLegacy\n\tcase \"socket\":\n\t\tConfig.V20.Tcti = Socket\n\tcase \"abrmd\":\n\t\tConfig.V20.Tcti = Abrmd\n\tcase \"abrmd-legacy\":\n\tdefault:\n\t\tConfig.V20.Tcti = AbrmdLegacy\n\t}\n}", "func (b *Bootstrapper) Initialize(config Config) error {\n\tb.Config = config\n\tb.Ctx.Log.Info(\"Starting bootstrap...\")\n\n\tif b.Config.StartupAlpha > 0 {\n\t\treturn nil\n\t}\n\treturn b.startup()\n}", "func (a *Application) Init(self ApplicationI) {\n\ta.Base.Init(self)\n\n\tself.SetupErrorHandling()\n\tself.SetupPagestateCaching()\n\tself.SetupSessionManager()\n\tself.SetupMessenger()\n\tself.SetupPaths()\n\tself.SetupDatabaseWatcher()\n\n\tpage.DefaultCheckboxLabelDrawingMode = html5tag.LabelAfter\n}", "func init() {\n\tflag.Parse()\n\n\tConfigBytes, err := ioutil.ReadFile(*ConfigFile)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\terr = json.Unmarshal(ConfigBytes, &Config)\n\tif err != nil {\n\t\tpanic(err)\n\t\t// TODO: add line numbers to log so i can use log.fatal\n\t\t// https://golang.org/pkg/log/#pkg-examples\n\t\t// logger := log.New(os.Stderr, \"OH NO AN ERROR\", log.Llongfile)\n\t}\n\n\tProducts, err = search.New(Config.Search)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}", "func initConfig() {\n\n\t// Configure logging\n\tconst logNameConsole = \"console\"\n\tlevel, _ := glog.NewLogLevel(rootFlags.logLevel)\n\tglog.ClearBackends()\n\tglog.SetBackend(logNameConsole, glog.NewWriterBackend(os.Stderr, \"\", level, \"\"))\n\n\t// Load config from file\n\tresticmanager.AppConfig.Load(rootFlags.appConfigFile)\n\tresticmanager.AppConfig.DryRun = rootFlags.dryrun\n\n\t// Add file logging if required\n\tif logConfig := resticmanager.AppConfig.LoggingConfig(); (!rootFlags.noFileLogging) && (logConfig != nil) {\n\t\tconst logNameFile = \"appfile\"\n\n\t\tglog.SetBackend(logNameFile,\n\t\t\tglog.NewFileBackend(\n\t\t\t\tlogConfig.Filename,\n\t\t\t\tlogConfig.Append,\n\t\t\t\t\"\",\n\t\t\t\tlogConfig.Level,\n\t\t\t\t\"\", // Use default logging format\n\t\t\t))\n\n\t}\n\n}", "func init() {\n\t// Log as JSON instead of the default ASCII formatter.\n\tlog.SetFormatter(&log.JSONFormatter{})\n\tlog.SetOutput(os.Stdout)\n\tlog.SetLevel(log.InfoLevel)\n\tInitConfiguration(\"./\")\n}", "func (s *Flattener) Init(conf map[string]string) error {\n\treturn nil\n}", "func (s *SampleManager) Initialize(app string) error {\n\tif app == \"\" {\n\t\treturn errors.New(\"Sample name is empty\")\n\t}\n\n\tappPath, err := s.appCacheFolder(app)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// We still set the repo path here. There are some failure cases\n\t// that we can still work with (like no updates or repo already exists)\n\ts.repoPath = appPath\n\n\tlist, err := s.SampleLister.ListSamples(\"create\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif _, err := s.Fs.Stat(appPath); os.IsNotExist(err) {\n\t\tsampleData, ok := list[app]\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"Sample %s does not exist\", app)\n\t\t}\n\t\terr = s.Git.Clone(appPath, sampleData.GitRepo())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\terr := s.Git.Pull(appPath)\n\t\tif err != nil {\n\t\t\tif err != nil {\n\t\t\t\tswitch e := err.Error(); e {\n\t\t\t\tcase git.NoErrAlreadyUpToDate.Error():\n\t\t\t\t\t// Repo is already up to date. This isn't a program\n\t\t\t\t\t// error to continue as normal\n\t\t\t\t\tbreak\n\t\t\t\tdefault:\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tconfigFile, err := afero.ReadFile(s.Fs, filepath.Join(appPath, \".cli.json\"))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = json.Unmarshal(configFile, &s.SampleConfig)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func Init() (err error) {\n\tconfig := &Configuration{}\n\n\tpublicPath := os.Getenv(\"TAILLA_PUBLIC_PATH\")\n\ttemplatePath := os.Getenv(\"TAILLA_TEMPLATE_PATH\")\n\tbind := os.Getenv(\"TAILLA_BIND\")\n\tuploadPath := os.Getenv(\"UPLOAD_PATH\")\n\n\tif publicPath == \"\" {\n\t\treturn errors.New(\"Missing TAILLA_PUBLIC_PATH\")\n\t}\n\tif templatePath == \"\" {\n\t\treturn errors.New(\"Missing TAILLA_TEMPLATE_PATH\")\n\t}\n\tif bind == \"\" {\n\t\tbind = \"0.0.0.0:8000\"\n\t}\n\tif uploadPath == \"\" {\n\t\tuploadPath = \"/tmp\"\n\t}\n\n\tconfig.PublicPath = publicPath\n\tconfig.TemplatePath = templatePath\n\tconfig.Bind = bind\n\tconfig.UploadPath = uploadPath\n\n\tCurrentApplication.Configuration = config\n\n\tloadTemplates()\n\treturn\n}", "func Init() {\n\tdocker.Init()\n\thost.Init()\n\tlabel.Init()\n\tospackages.Init()\n\tdiff.Init()\n\tcontainer.Init()\n}", "func (service *Service) Initialize(configURL *url.URL, logger types.StdLogger) error {\n\tservice.Logger.SetLogger(logger)\n\tconfig, pkr := DefaultConfig()\n\tservice.config = config\n\tservice.pkr = pkr\n\n\treturn service.config.setURL(&service.pkr, configURL)\n}", "func Initialize(conf *config.Config) error {\n\tstripe.Key = conf.Stripe.SecretToken\n\tif conf.Debug {\n\t\tstripe.LogLevel = 3\n\t}\n\tgo CreateDefaultPlans()\n\treturn nil\n}", "func Init() {\n\tlog.SetFlags(log.Lshortfile | log.LstdFlags)\n\n\tif conf, err = config.Load(); err != nil {\n\t\tlog.Fatal(err.Error())\n\t}\n\tlog.Printf(\"Load config %+v\\n\", conf)\n}", "func doInitialSetup() {\n\terr := initLogging(os.Getenv(\"SENTRY_DSN\"))\n\tpanicOnError(err)\n\tconfPath := os.Getenv(\"OJC_CONFIG_PATH\")\n\tif len(confPath) == 0 {\n\t\tconfPath = \"/opt/ojc/ojc.yml\"\n\t}\n\tuserConfig, err = loadConfig(confPath) //userConfig is a global\n\tif ocDebug {\n\t\tlog.Println(userConfig)\n\t}\n\tpanicOnError(err)\n}", "func TestEarlybirdCfg_ConfigInit(t *testing.T) {\n\teb.ConfigInit()\n}", "func init() {\n\tflag.StringVar(&configfile, \"configfile\", \"/data/config/go/best-practices/config.yml\", \"config file full path\")\n\tflag.StringVar(&loggerfile, \"loggerfile\", \"\", \"seelog config file\")\n\tflag.BoolVar(&help, \"h\", false, \"show help\")\n\tflag.IntVar(&port, \"port\", 0, \"service port to listen\")\n\tflag.Parse()\n\n\tif help {\n\t\tflag.Usage()\n\t}\n\t// init logger firstly!!!\n\tmylogger.Init(loggerfile)\n\n\tappConfig.GetConfig(configfile)\n\n\tlogger.Infof(\"Init with config:%+v\", appConfig)\n}", "func init() {\n\t// <<-- Creer-Merge: init -->>\n\t// package initialization logic can go here\n\t// <<-- /Creer-Merge: init -->>\n}", "func (a *App) Initialize(configpath string) {\n\tvar err error\n\n\t// load the configs\n\ta.Configs, err = config.LoadConfiguration(configpath)\n\tif err != nil {\n\t\tfmt.Printf(\"%s failed to start due to invalid config. error: %+v, config-path: %s\", AppName, err.Error(), configpath)\n\t\tos.Exit(1) // kill the app\n\t}\n\n\t// initializing logging\n\tlogConfig := logging.LogConfig{\n\t\tAppName: a.Configs.Logging.AppName,\n\t\tAppVersion: a.Configs.Logging.AppVersion,\n\t\tLevel: a.Configs.Logging.Level,\n\t}\n\n\tlogger, err := logging.New(&logConfig)\n\tif err != nil {\n\t\tfmt.Printf(\"%s failed to init logger. Error: %+v\", AppName, err.Error())\n\t\tos.Exit(1) // kill the app\n\t}\n\n\t// attach the logger instrument to the app struct\n\ta.Logger = &logger\n\n\ta.router = mux.NewRouter()\n\ta.initializeRoutes()\n}", "func initConfig() {\n\n\t_, hasToken := os.LookupEnv(\"PRIVATE_ACCESS_TOKEN\")\n\t_, hasURL := os.LookupEnv(\"CI_PROJECT_URL\")\n\tif !hasToken || !hasURL {\n\t\tlog.Fatal(\"You need to set 'CI_PROJECT_URL' and 'PRIVATE_ACCESS_TOKEN'\")\n\t}\n\n\tviper.Set(\"Token\", os.Getenv(\"PRIVATE_ACCESS_TOKEN\"))\n\tviper.Set(\"ProjectUrl\", os.Getenv(\"CI_PROJECT_URL\"))\n\n\tu, err := url.Parse(viper.GetString(\"ProjectUrl\"))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tviper.Set(\"BaseUrl\", fmt.Sprintf(\"%s://%s\", u.Scheme, u.Host))\n\tviper.Set(\"RegistryUrl\", fmt.Sprintf(\"%s/container_registry.json\", viper.GetString(\"ProjectUrl\")))\n\n}", "func Init() {\n\tconf := config.GetConfig()\n\tvar err error\n\n\tLog, err = plivolog.New()\n\tif conf.GetString(\"general.config\") == \"test\" {\n\t\tLog.Info(\"For testing environment err will be ignored\")\n\t\treturn\n\t}\n\tif err != nil {\n\t\tpanic(\"logger could not be initialized. Error \" + err.Error())\n\t}\n}" ]
[ "0.7311859", "0.6341264", "0.63236684", "0.6308758", "0.6293854", "0.6219397", "0.61819357", "0.6114997", "0.6113638", "0.61045194", "0.610196", "0.6097097", "0.60905164", "0.6058089", "0.6026225", "0.60097724", "0.60009825", "0.5987813", "0.59876114", "0.5987233", "0.5917201", "0.5900763", "0.5847975", "0.5835712", "0.5833477", "0.5812115", "0.58077013", "0.58077013", "0.5803678", "0.5802756", "0.57802516", "0.5770079", "0.5769165", "0.5768041", "0.5764391", "0.5762994", "0.575512", "0.57514405", "0.5749705", "0.5744964", "0.5738927", "0.57381797", "0.5733818", "0.57322043", "0.57316613", "0.5730928", "0.57302064", "0.57295555", "0.5727604", "0.57253987", "0.57231474", "0.5716576", "0.5716015", "0.571217", "0.5705364", "0.5699698", "0.56969297", "0.5692161", "0.5689297", "0.5682242", "0.5682188", "0.56816655", "0.5678898", "0.5671848", "0.5671224", "0.5654619", "0.56528485", "0.5649887", "0.5648211", "0.56386936", "0.5632444", "0.5627811", "0.562407", "0.5621768", "0.5599639", "0.5597577", "0.55975443", "0.5595382", "0.55858463", "0.55810016", "0.5580575", "0.5578918", "0.5572611", "0.55714697", "0.5570037", "0.5567714", "0.5564327", "0.555578", "0.5550355", "0.5549467", "0.55481124", "0.5544605", "0.5543765", "0.55417526", "0.55403036", "0.5537301", "0.5537258", "0.5527444", "0.55274373", "0.5526082" ]
0.7322952
0
Cleanup deletes pods and configmap resources from this test run
func (r PodTestRunner) Cleanup(ctx context.Context) (err error) { err = r.deletePods(ctx, r.configMapName) if err != nil { return err } err = r.deleteConfigMap(ctx, r.configMapName) if err != nil { return err } return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (r PodTestRunner) Cleanup(ctx context.Context) (err error) {\n\n\terr = r.deletePods(ctx, r.configMapName)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = r.deleteConfigMap(ctx, r.configMapName)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (p *PodmanTestIntegration) Cleanup() {\n\tp.StopVarlink()\n\t// TODO\n\t// Stop all containers\n\t// Rm all containers\n\n\tif err := os.RemoveAll(p.TempDir); err != nil {\n\t\tfmt.Printf(\"%q\\n\", err)\n\t}\n\n\t// Clean up the registries configuration file ENV variable set in Create\n\tresetRegistriesConfigEnv()\n}", "func (f *Framework) CleanUp(ns string) error {\n\tlogrus.Info(\"Cleaning up now.\")\n\tdefer logrus.Info(\"Finished cleanup.\")\n\tagonesV1 := f.AgonesClient.AgonesV1()\n\tdeleteOptions := metav1.DeleteOptions{}\n\tlistOptions := metav1.ListOptions{}\n\n\t// find and delete pods created by tests and labeled with our special label\n\tpods := f.KubeClient.CoreV1().Pods(ns)\n\tctx := context.Background()\n\tpodList, err := pods.List(ctx, metav1.ListOptions{\n\t\tLabelSelector: AutoCleanupLabelKey + \"=\" + AutoCleanupLabelValue,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor i := range podList.Items {\n\t\tp := &podList.Items[i]\n\t\tif err := pods.Delete(ctx, p.ObjectMeta.Name, deleteOptions); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\terr = agonesV1.Fleets(ns).DeleteCollection(ctx, deleteOptions, listOptions)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = f.AgonesClient.AutoscalingV1().FleetAutoscalers(ns).DeleteCollection(ctx, deleteOptions, listOptions)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn agonesV1.GameServers(ns).\n\t\tDeleteCollection(ctx, deleteOptions, listOptions)\n}", "func (p *PodmanTestIntegration) CleanupPod() {\n\t// TODO\n}", "func Teardown() {\n\tfmt.Println(\"====== Clean kubernetes testing pod ======\")\n\tres, err := kubeclient.ExecKubectl(\"kubectl delete pod -l app=nsenter\")\n\tfmt.Println(res)\n\tif err != nil {\n\t\tfmt.Println(\"Error: \" + err.Error())\n\t}\n}", "func (pm partitionMap) cleanup() {\n\tfor ns, partitions := range pm {\n\t\tfor i := range partitions.Replicas {\n\t\t\tfor j := range partitions.Replicas[i] {\n\t\t\t\tpartitions.Replicas[i][j] = nil\n\t\t\t}\n\t\t\tpartitions.Replicas[i] = nil\n\t\t}\n\n\t\tpartitions.Replicas = nil\n\t\tpartitions.regimes = nil\n\n\t\tdelete(pm, ns)\n\t}\n}", "func CleanUp(ctx context.Context, cfg *config.Config, pipeline *pipelines.Pipeline, name names.Name) error {\n\tkubectlPath, err := cfg.Tools[config.Kubectl].Resolve()\n\tif err != nil {\n\t\treturn err\n\t}\n\tcmd := proc.GracefulCommandContext(ctx, kubectlPath, \"delete\",\n\t\t\"all\",\n\t\t\"-l\", k8s.StackLabel+\"=\"+name.DNSName(),\n\t)\n\tif err := cmd.Run(); err != nil {\n\t\treturn fmt.Errorf(\"could not delete k8s resources: %v\", err)\n\t}\n\treturn nil\n}", "func (h *HealthCheck) cleanup() {\n\tif h.frameworkError != nil && h.namespace != nil {\n\t\tglog.V(4).Infof(\"Cleaning up. Deleting the binding, instance and test namespace %v\", h.namespace.Name)\n\t\th.serviceCatalogClientSet.ServicecatalogV1beta1().ServiceBindings(h.namespace.Name).Delete(h.bindingName, nil)\n\t\th.serviceCatalogClientSet.ServicecatalogV1beta1().ServiceInstances(h.namespace.Name).Delete(h.instanceName, nil)\n\t\tDeleteKubeNamespace(h.kubeClientSet, h.namespace.Name)\n\t\th.namespace = nil\n\t}\n}", "func (p *Pod) cleanupFiles(silent bool) error {\n\tfor _, ns := range p.namespaces {\n\t\tglog.V(8).Infof(\"Removing binded namespace %s\", ns.Path)\n\t\terr := namespace.Remove(ns)\n\t\tif err != nil && !silent {\n\t\t\treturn fmt.Errorf(\"could not remove namespace: %v\", err)\n\t\t}\n\t}\n\tglog.V(8).Infof(\"Removing pod base directory %s\", p.baseDir)\n\terr := os.RemoveAll(p.baseDir)\n\tif err != nil && !silent {\n\t\treturn fmt.Errorf(\"could not cleanup pod: %v\", err)\n\t}\n\tglog.V(8).Infof(\"Removing pod log directory %s\", p.GetLogDirectory())\n\terr = os.RemoveAll(p.GetLogDirectory())\n\tif err != nil && !silent {\n\t\treturn fmt.Errorf(\"could not remove log directory: %v\", err)\n\t}\n\treturn nil\n}", "func performCleanup(ctx context.Context, clientMap map[string]kubernetes.Interface, flags flags) error {\n\tfor _, cluster := range flags.memberClusters {\n\t\tc := clientMap[cluster]\n\t\tif err := cleanupClusterResources(ctx, c, cluster, flags.memberClusterNamespace); err != nil {\n\t\t\treturn xerrors.Errorf(\"failed cleaning up cluster %s namespace %s: %w\", cluster, flags.memberClusterNamespace, err)\n\t\t}\n\t}\n\tc := clientMap[flags.centralCluster]\n\tif err := cleanupClusterResources(ctx, c, flags.centralCluster, flags.centralClusterNamespace); err != nil {\n\t\treturn xerrors.Errorf(\"failed cleaning up cluster %s namespace %s: %w\", flags.centralCluster, flags.centralClusterNamespace, err)\n\t}\n\treturn nil\n}", "func cleanupKubectlInputs(fileContents string, ns string, selectors ...string) {\n\tginkgo.By(\"using delete to clean up resources\")\n\t// support backward compatibility : file paths or raw json - since we are removing file path\n\t// dependencies from this test.\n\te2ekubectl.RunKubectlOrDieInput(ns, fileContents, \"delete\", \"--grace-period=0\", \"--force\", \"-f\", \"-\")\n\tassertCleanup(ns, selectors...)\n}", "func (f *HelmConfiguration) Cleanup() {\n\tif len(f.Folder) > 0 {\n\t\tos.RemoveAll(f.Folder)\n\t}\n}", "func cleanup() {\n\tos.Remove(dummyPath)\n}", "func Clean(c Config) {\n\n\tSetup(&c)\n\tContainers, _ := model.DockerContainerList()\n\n\tfor _, Container := range Containers {\n\t\ttarget := false\n\t\tif l := Container.Labels[\"pygmy.enable\"]; l == \"true\" || l == \"1\" {\n\t\t\ttarget = true\n\t\t}\n\t\tif l := Container.Labels[\"pygmy\"]; l == \"pygmy\" {\n\t\t\ttarget = true\n\t\t}\n\n\t\tif target {\n\t\t\terr := model.DockerKill(Container.ID)\n\t\t\tif err == nil {\n\t\t\t\tfmt.Printf(\"Successfully killed %v.\\n\", Container.Names[0])\n\t\t\t}\n\n\t\t\terr = model.DockerRemove(Container.ID)\n\t\t\tif err == nil {\n\t\t\t\tfmt.Printf(\"Successfully removed %v.\\n\", Container.Names[0])\n\t\t\t}\n\t\t}\n\t}\n\n\tfor _, network := range c.Networks {\n\t\tmodel.DockerNetworkRemove(&network)\n\t\tif s, _ := model.DockerNetworkStatus(&network); s {\n\t\t\tfmt.Printf(\"Successfully removed network %v\\n\", network.Name)\n\t\t} else {\n\t\t\tfmt.Printf(\"Network %v was not removed\\n\", network.Name)\n\t\t}\n\t}\n\n\tfor _, resolver := range c.Resolvers {\n\t\tresolver.Clean()\n\t}\n}", "func (r *Redis) Cleanup() {\n\tos.RemoveAll(\"/tmp/go-harness\")\n}", "func TearDown(clients *test.Clients, names test.ResourceNames, logger *zap.SugaredLogger) {\n\tif clients != nil {\n\t\tclients.Delete([]string{names.Route}, []string{names.Config})\n\t}\n\n\t// There seems to be an Istio bug where if we delete / create\n\t// VirtualServices too quickly we will hit pro-longed \"No health\n\t// upstream\" causing timeouts. Adding this small sleep to\n\t// sidestep the issue.\n\t//\n\t// TODO(#1376): Fix this when upstream fix is released.\n\tlogger.Info(\"Sleeping for 20 seconds after Route deletion to avoid hitting issue in #1376\")\n\ttime.Sleep(20 * time.Second)\n}", "func (s *JobApiTestSuite) cleanUp() {\n\ttest.DeleteAllRuns(s.runClient, s.resourceNamespace, s.T())\n\ttest.DeleteAllJobs(s.jobClient, s.resourceNamespace, s.T())\n\ttest.DeleteAllPipelines(s.pipelineClient, s.T())\n\ttest.DeleteAllExperiments(s.experimentClient, s.resourceNamespace, s.T())\n}", "func (td *OsmTestData) Cleanup(ct CleanupType) {\n\tif td.Client == nil {\n\t\t// Avoid any cleanup (crash) if no test is run;\n\t\t// init doesn't happen and clientsets are nil\n\t\treturn\n\t}\n\n\t// Verify no crashes/restarts of OSM and control plane components were observed during the test\n\t// We will not immediately call Fail() here to not disturb the cleanup process, and instead\n\t// call it at the end of cleanup\n\trestartSeen := td.VerifyRestarts()\n\n\t// If collect logs or\n\t// (test failed, by either restarts were seen or because spec failed) and (collect logs on error)\n\tif td.CollectLogs == CollectLogs || td.CollectLogs == ControlPlaneOnly ||\n\t\t((restartSeen && !td.IgnoreRestarts) || CurrentGinkgoTestDescription().Failed) && td.CollectLogs == CollectLogsIfErrorOnly {\n\t\t// Grab logs. We will move this to use CLI when able.\n\n\t\tif err := td.GrabLogs(); err != nil {\n\t\t\ttd.T.Logf(\"Error getting logs: %v\", err)\n\t\t}\n\n\t\tif err := td.GetBugReport(); err != nil {\n\t\t\ttd.T.Logf(\"Error getting bug report: %v\", err)\n\t\t}\n\t}\n\n\tcleanupTrigger := td.CleanupTest\n\t// If we are on kind env\n\tif td.InstType == KindCluster {\n\t\t// Check if we can/want to avoid K8s cleanup\n\t\tcleanupTrigger = cleanupTrigger && td.shouldCleanupK8sOnKind(ct)\n\t}\n\n\tif cleanupTrigger {\n\t\t// Use selector to refer to all namespaces used in this test\n\t\tnsSelector := metav1.ListOptions{\n\t\t\tLabelSelector: labels.SelectorFromSet(td.GetTestNamespaceSelectorMap()).String(),\n\t\t}\n\n\t\ttestNs, err := td.Client.CoreV1().Namespaces().List(context.Background(), nsSelector)\n\t\tif err != nil {\n\t\t\ttd.T.Fatalf(\"Failed to get list of test NS: %v\", err)\n\t\t}\n\n\t\tfor _, ns := range testNs.Items {\n\t\t\terr := td.DeleteNs(ns.Name)\n\t\t\tif err != nil {\n\t\t\t\ttd.T.Logf(\"Err deleting ns %s: %v\", ns.Name, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tBy(fmt.Sprintf(\"[Cleanup] waiting for %s:%d test NS cleanup\", osmTest, GinkgoRandomSeed()))\n\t\tif td.WaitForCleanup {\n\t\t\terr := wait.Poll(2*time.Second, 240*time.Second,\n\t\t\t\tfunc() (bool, error) {\n\t\t\t\t\tnsList, err := td.Client.CoreV1().Namespaces().List(context.TODO(), nsSelector)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\ttd.T.Logf(\"Err waiting for ns list to disappear: %v\", err)\n\t\t\t\t\t\treturn false, err\n\t\t\t\t\t}\n\t\t\t\t\treturn len(nsList.Items) == 0, nil\n\t\t\t\t},\n\t\t\t)\n\t\t\tif err != nil {\n\t\t\t\ttd.T.Logf(\"Error polling namespaces for deletion: %s\", err)\n\t\t\t\ttestNsInfo, _ := json.MarshalIndent(testNs, \"\", \" \")\n\t\t\t\ttd.T.Logf(\"Namespaces info:\\n%s\", string(testNsInfo))\n\t\t\t}\n\t\t}\n\t}\n\n\t// Kind cluster deletion, if needed\n\tif (td.InstType == KindCluster) && td.ClusterProvider != nil {\n\t\tif ct == Test && td.CleanupKindClusterBetweenTests || ct == Suite && td.CleanupKindCluster {\n\t\t\ttd.T.Logf(\"Deleting kind cluster: %s\", td.ClusterName)\n\t\t\tif err := td.ClusterProvider.Delete(td.ClusterName, clientcmd.RecommendedHomeFile); err != nil {\n\t\t\t\ttd.T.Logf(\"error deleting cluster: %v\", err)\n\t\t\t}\n\t\t\ttd.ClusterProvider = nil\n\t\t}\n\t}\n\n\t// Check restarts\n\tif restartSeen && !td.IgnoreRestarts {\n\t\tFail(\"Unexpected restarts for control plane processes were observed\")\n\t}\n}", "func (ts *TestSetup) Cleanup() {\n\tif ts.Server != nil {\n\t\tts.Server.Stop()\n\t}\n\tif ts.NC != nil {\n\t\tts.NC.Close()\n\t}\n\tif ts.GNATSD != nil {\n\t\tts.GNATSD.Shutdown()\n\t}\n\n\tif ts.SystemUserCredsFile != \"\" {\n\t\tos.Remove(ts.SystemUserCredsFile)\n\t}\n\n\tif ts.SystemAccountJWTFile != \"\" {\n\t\tos.Remove(ts.SystemAccountJWTFile)\n\t}\n\n\tif ts.OperatorJWTFile != \"\" {\n\t\tos.Remove(ts.SystemUserCredsFile)\n\t}\n}", "func cleanup(t *testing.T) {\n\tconn := connectAllZk(t)\n\terr := recursiveDelete(conn, zkPrefix)\n\tif err != nil {\n\t\tt.Fatalf(\"cleanup err=%q\", err)\n\t}\n\tconn.Close()\n}", "func (h *H) Cleanup() {\n\tif h.proj != nil {\n\t\terr := h.cleanup(h.proj.Name)\n\t\tExpect(err).ShouldNot(HaveOccurred(), \"could not delete project '%s'\", h.proj)\n\t}\n\n\th.restConfig = nil\n\th.proj = nil\n}", "func (p *DefaultProvisioner) Cleanup() error {\n\terr := p.terraform.destroy()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn testutil.NewExec(\"rm\",\n\t\ttestutil.WithArgs(\"-rf\", p.testPath),\n\t).Run()\n}", "func cleanup() {\n\tserver.Applications = []*types.ApplicationMetadata{}\n}", "func (setup *SimpleTestSetup) TearDown() {\n\tsetup.harnessPool.DisposeAll()\n\tsetup.harnessWalletPool.DisposeAll()\n\t//setup.nodeGoBuilder.Dispose()\n\tsetup.WorkingDir.Dispose()\n}", "func CleanupFiles(configuration *types.JobConfiguration) {\n\tjobName := configuration.JobName\n\tmapperInputFileNames := configuration.MapperInputFileNames\n\tnumReducers := configuration.NumReducers\n\n\t// Clean up mapper output files.\n\tfor mapTaskIdx := range mapperInputFileNames {\n\t\tfor reduceTaskIdx := 0; reduceTaskIdx < numReducers; reduceTaskIdx++ {\n\t\t\tfileName := IntermediateFileName(\n\t\t\t\tjobName, mapTaskIdx, reduceTaskIdx,\n\t\t\t)\n\t\t\tos.Remove(fileName)\n\t\t}\n\t}\n\n\t// Clean up reducer output files.\n\tfor reduceTaskIdx := 0; reduceTaskIdx < numReducers; reduceTaskIdx++ {\n\t\tfileName := ReducerOutputFileName(jobName, reduceTaskIdx)\n\t\tos.Remove(fileName)\n\t}\n}", "func cleanup() {\n\tlog.Verbose(\"Cleaning up sensitive and temp files\")\n\tif _, err := os.Stat(\"ca.crt\"); err == nil {\n\t\tdeleteFile(\"ca.crt\")\n\t}\n\n\tif _, err := os.Stat(\"ca.key\"); err == nil {\n\t\tdeleteFile(\"ca.key\")\n\t}\n\n\tif _, err := os.Stat(\"client.crt\"); err == nil {\n\t\tdeleteFile(\"client.crt\")\n\t}\n\n\tif _, err := os.Stat(\"bearer.token\"); err == nil {\n\t\tdeleteFile(\"bearer.token\")\n\t}\n\n\tfor _, app := range s.Apps {\n\t\tif _, err := os.Stat(app.SecretsFile + \".dec\"); err == nil {\n\t\t\tdeleteFile(app.SecretsFile + \".dec\")\n\t\t}\n\t\tfor _, secret := range app.SecretsFiles {\n\t\t\tif _, err := os.Stat(secret + \".dec\"); err == nil {\n\t\t\t\tdeleteFile(secret + \".dec\")\n\t\t\t}\n\t\t}\n\t}\n\n}", "func (r *ReconcileAerospikeCluster) cleanupPods(aeroCluster *aerospikev1alpha1.AerospikeCluster, podNames []string, rackState RackState) error {\n\tlogger := pkglog.New(log.Ctx{\"AerospikeCluster\": utils.ClusterNamespacedName(aeroCluster)})\n\n\tlogger.Info(\"Removing pvc for removed pods\", log.Ctx{\"pods\": podNames})\n\n\t// Delete PVCs if cascadeDelete\n\tpvcItems, err := r.getPodsPVCList(aeroCluster, podNames, rackState.Rack.ID)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Could not find pvc for pods %v: %v\", podNames, err)\n\t}\n\tstorage := rackState.Rack.Storage\n\tif err := r.removePVCs(aeroCluster, &storage, pvcItems); err != nil {\n\t\treturn fmt.Errorf(\"Could not cleanup pod PVCs: %v\", err)\n\t}\n\n\tneedStatusCleanup := []string{}\n\n\tclusterPodList, err := r.getClusterPodList(aeroCluster)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Could not cleanup pod PVCs: %v\", err)\n\t}\n\n\tfor _, podName := range podNames {\n\t\t// Clear references to this pod in the running cluster.\n\t\tfor _, np := range clusterPodList.Items {\n\t\t\t// TODO: We remove node from the end. Nodes will not have seed of successive nodes\n\t\t\t// So this will be no op.\n\t\t\t// We should tip in all nodes the same seed list,\n\t\t\t// then only this will have any impact. Is it really necessary?\n\n\t\t\t// TODO: tip after scaleup and create\n\t\t\t// All nodes from other rack\n\t\t\tr.tipClearHostname(aeroCluster, &np, podName)\n\n\t\t\tr.alumniReset(aeroCluster, &np)\n\t\t}\n\n\t\tif aeroCluster.Spec.MultiPodPerHost {\n\t\t\t// Remove service for pod\n\t\t\t// TODO: make it more roboust, what if it fails\n\t\t\tif err := r.deleteServiceForPod(podName, aeroCluster.Namespace); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\t_, ok := aeroCluster.Status.Pods[podName]\n\t\tif ok {\n\t\t\tneedStatusCleanup = append(needStatusCleanup, podName)\n\t\t}\n\t}\n\n\tif len(needStatusCleanup) > 0 {\n\t\tlogger.Info(\"Removing pod status for dangling pods\", log.Ctx{\"pods\": podNames})\n\n\t\tif err := r.removePodStatus(aeroCluster, needStatusCleanup); err != nil {\n\t\t\treturn fmt.Errorf(\"Could not cleanup pod status: %v\", err)\n\t\t}\n\t}\n\n\treturn nil\n}", "func (t *Tube) Cleanup(a *app.App) {}", "func (r *ContainerizedWorkloadReconciler) cleanupResources(ctx context.Context,\n\tworkload *oamv1alpha2.ContainerizedWorkload, deployUID, serviceUID *types.UID) error {\n\tlog := r.Log.WithValues(\"gc deployment\", workload.Name)\n\tvar deploy appsv1.Deployment\n\tvar service corev1.Service\n\tfor _, res := range workload.Status.Resources {\n\t\tuid := res.UID\n\t\tif res.Kind == KindDeployment {\n\t\t\tif uid != *deployUID {\n\t\t\t\tlog.Info(\"Found an orphaned deployment\", \"deployment UID\", *deployUID, \"orphaned UID\", uid)\n\t\t\t\tdn := client.ObjectKey{Name: res.Name, Namespace: workload.Namespace}\n\t\t\t\tif err := r.Get(ctx, dn, &deploy); err != nil {\n\t\t\t\t\tif apierrors.IsNotFound(err) {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif err := r.Delete(ctx, &deploy); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tlog.Info(\"Removed an orphaned deployment\", \"deployment UID\", *deployUID, \"orphaned UID\", uid)\n\t\t\t}\n\t\t} else if res.Kind == KindService {\n\t\t\tif uid != *serviceUID {\n\t\t\t\tlog.Info(\"Found an orphaned service\", \"orphaned UID\", uid)\n\t\t\t\tsn := client.ObjectKey{Name: res.Name, Namespace: workload.Namespace}\n\t\t\t\tif err := r.Get(ctx, sn, &service); err != nil {\n\t\t\t\t\tif apierrors.IsNotFound(err) {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif err := r.Delete(ctx, &service); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tlog.Info(\"Removed an orphaned service\", \"orphaned UID\", uid)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func cleanup(t *testing.T, namespace string) {\n\targs := \"delete\"\n\tif namespace != \"\" {\n\t\targs += \" -n \" + namespace\n\t}\n\n\terr, stdout, stderr := runSonobuoyCommand(t, args)\n\n\tif err != nil {\n\t\tt.Logf(\"Error encountered during cleanup: %q\\n\", err)\n\t\tt.Log(stdout.String())\n\t\tt.Log(stderr.String())\n\t}\n}", "func Clean() error {\n\tfixtureDir := filepath.Join(\"integration\", \"testdata\", \"fixtures\")\n\tpaths := []string{\n\t\tfilepath.Join(fixtureDir, \"images\"),\n\t\tfilepath.Join(fixtureDir, \"vm-images\"),\n\t}\n\tfor _, p := range paths {\n\t\tif err := sh.Rm(p); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (r PodTestRunner) deletePods(ctx context.Context, configMapName string) error {\n\tdo := metav1.DeleteOptions{}\n\tselector := fmt.Sprintf(\"testrun=%s\", configMapName)\n\tlo := metav1.ListOptions{LabelSelector: selector}\n\terr := r.Client.CoreV1().Pods(r.Namespace).DeleteCollection(ctx, do, lo)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error deleting pods (label selector %q): %w\", selector, err)\n\t}\n\treturn nil\n}", "func (f *Framework) AfterEach() {\n\tginkgo.By(fmt.Sprintf(\"Destory the namespace, basename %s\", f.BaseName))\n\terr := f.KubeClient.Delete(context.Background(), f.Namespace)\n\tExpectNoError(err)\n}", "func (s *StepConfigureHardware) Cleanup(multistep.StateBag) {}", "func Cleanup() {\n\terr := lib.DeleteService(listOfServicesCreated, namespace)\n\tif err != nil {\n\t\tfmt.Println(\"ERROR : Cleanup of Services \", listOfServicesCreated, \" failed due to the error : \", err)\n\t}\n\terr = lib.DeleteApp(appName, namespace)\n\tif err != nil {\n\t\tfmt.Println(\"ERROR : Cleanup of Deployment \"+appName+\" failed due to the error : \", err)\n\t}\n}", "func (p *AWSProvisioner) Cleanup() error {\n\terr := p.terraform.destroy()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"%v\", err)\n\t}\n\n\t_, err = executeCommand(\"\", \"rm\", []string{\"-rf\", p.testPath}, nil)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"%v\", err)\n\t}\n\n\treturn nil\n}", "func (p *DOProvisioner) Cleanup() error {\n\terr := p.terraform.destroy()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"%v\", err)\n\t}\n\n\t_, err = executeCommand(\"\", \"rm\", []string{\"-rf\", p.testPath}, nil)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"%v\", err)\n\t}\n\n\treturn nil\n}", "func (atb *adminXLTestBed) TearDown() {\n\tos.RemoveAll(atb.configPath)\n\tremoveRoots(atb.xlDirs)\n\tresetTestGlobals()\n}", "func (s *Spec) Cleanup() error {\n\tif s.stopWatching != nil {\n\t\tclose(s.stopWatching)\n\t}\n\n\treturn s.Services.Stop()\n}", "func assertCleanup(ns string, selectors ...string) {\n\tvar e error\n\tverifyCleanupFunc := func() (bool, error) {\n\t\te = nil\n\t\tfor _, selector := range selectors {\n\t\t\tresources := e2ekubectl.RunKubectlOrDie(ns, \"get\", \"rc,svc\", \"-l\", selector, \"--no-headers\")\n\t\t\tif resources != \"\" {\n\t\t\t\te = fmt.Errorf(\"Resources left running after stop:\\n%s\", resources)\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t\tpods := e2ekubectl.RunKubectlOrDie(ns, \"get\", \"pods\", \"-l\", selector, \"-o\", \"go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ \\\"\\\\n\\\" }}{{ end }}{{ end }}\")\n\t\t\tif pods != \"\" {\n\t\t\t\te = fmt.Errorf(\"Pods left unterminated after stop:\\n%s\", pods)\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t}\n\t\treturn true, nil\n\t}\n\terr := wait.PollImmediate(500*time.Millisecond, 1*time.Minute, verifyCleanupFunc)\n\tif err != nil {\n\t\tframework.Failf(e.Error())\n\t}\n}", "func (m *MinikubeRunner) TearDown(t *testing.T) {\n\tprofileArg := fmt.Sprintf(\"-p=%s\", m.Profile)\n\tpath, _ := filepath.Abs(m.BinaryPath)\n\tcmd := exec.Command(path, profileArg, \"delete\")\n\terr := cmd.Start() // don't wait for it to finish\n\tif err != nil {\n\t\tt.Errorf(\"error tearing down minikube %s : %v\", profileArg, err)\n\t}\n}", "func (f *fixture) cleanUp(ctx context.Context, s *testing.FixtState) {\n\tif err := ash.CloseAllWindows(ctx, f.tconn); err != nil {\n\t\ts.Error(\"Failed trying to close all windows: \", err)\n\t}\n\n\tf.tconn = nil\n\n\tif len(f.drivefsOptions) > 0 && f.driveFs != nil {\n\t\tif err := f.driveFs.ClearCommandLineFlags(); err != nil {\n\t\t\ts.Fatal(\"Failed to remove command line args file: \", err)\n\t\t}\n\t}\n\tf.driveFs = nil\n\tf.mountPath = \"\"\n\n\t// Clean up files in this account that are older than 1 hour, files past this\n\t// date are assumed no longer required and were not successfully cleaned up.\n\t// Note this removal can take a while ~1s per file and may end up exceeding\n\t// the timeout, this is not a failure as the next run will try to remove the\n\t// files that weren't deleted in time.\n\tfileList, err := f.APIClient.ListAllFilesOlderThan(ctx, time.Hour)\n\tif err != nil {\n\t\ts.Error(\"Failed to list all my drive files: \", err)\n\t} else {\n\t\ts.Logf(\"Attempting to remove %d files older than 1 hour\", len(fileList.Files))\n\t\tfor _, i := range fileList.Files {\n\t\t\tif err := f.APIClient.RemoveFileByID(ctx, i.Id); err != nil {\n\t\t\t\ts.Logf(\"Failed to remove file %q (%s): %v\", i.Name, i.Id, err)\n\t\t\t} else {\n\t\t\t\ts.Logf(\"Successfully removed file %q (%s, %s)\", i.Name, i.Id, i.ModifiedTime)\n\t\t\t}\n\t\t}\n\t}\n\tf.APIClient = nil\n\tif f.cr != nil {\n\t\tif err := f.cr.Close(ctx); err != nil {\n\t\t\ts.Log(\"Failed closing chrome: \", err)\n\t\t}\n\t\tf.cr = nil\n\t}\n}", "func (p *Plugin) Cleanup(kubeclient kubernetes.Interface) {\n\tp.cleanedUp = true\n\tgracePeriod := int64(1)\n\tdeletionPolicy := metav1.DeletePropagationBackground\n\n\tlistOptions := p.listOptions()\n\tdeleteOptions := metav1.DeleteOptions{\n\t\tGracePeriodSeconds: &gracePeriod,\n\t\tPropagationPolicy: &deletionPolicy,\n\t}\n\n\t// Delete the DaemonSet created by this plugin\n\terr := kubeclient.ExtensionsV1beta1().DaemonSets(p.Namespace).DeleteCollection(\n\t\t&deleteOptions,\n\t\tlistOptions,\n\t)\n\tif err != nil {\n\t\terrlog.LogError(errors.Wrapf(err, \"could not delete DaemonSet %v for daemonset plugin %v\", p.daemonSetName(), p.GetName()))\n\t}\n\n\t// Delete the ConfigMap created by this plugin\n\terr = kubeclient.CoreV1().ConfigMaps(p.Namespace).DeleteCollection(\n\t\t&deleteOptions,\n\t\tlistOptions,\n\t)\n\tif err != nil {\n\t\terrlog.LogError(errors.Wrapf(err, \"could not delete ConfigMap %v for daemonset plugin %v\", p.configMapName(), p.GetName()))\n\t}\n}", "func (i *InteractiveMode) Cleanup(congress *lassie.Client, app lassie.Application, gw lassie.Gateway) {\n\tif !i.Config.KeepDevices {\n\t\tfor _, v := range i.devices {\n\t\t\tcongress.DeleteDevice(app.EUI, v.device.EUI)\n\t\t}\n\t}\n}", "func Cleanup(ctx context.Context, config *tsbridge.Config) error {\n\tstore, err := LoadStorageEngine(ctx, config)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer store.Close()\n\n\tmetrics, err := tsbridge.NewMetricConfig(ctx, config, store)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar metricNames []string\n\tfor _, m := range metrics.Metrics() {\n\t\tmetricNames = append(metricNames, m.Name)\n\t}\n\n\tif err := store.CleanupRecords(ctx, metricNames); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func Cleanup(ctx context.Context, config *tsbridge.Config) error {\n\tstore, err := LoadStorageEngine(ctx, config)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer store.Close()\n\n\tmetrics, err := tsbridge.NewMetricConfig(ctx, config, store)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar metricNames []string\n\tfor _, m := range metrics.Metrics() {\n\t\tmetricNames = append(metricNames, m.Name)\n\t}\n\n\tif err := store.CleanupRecords(ctx, metricNames); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (p *HetznerProvisioner) Cleanup() error {\n\terr := p.terraform.destroy()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"%v\", err)\n\t}\n\n\t_, err = executeCommand(\"\", \"rm\", []string{\"-rf\", p.testPath}, nil)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"%v\", err)\n\t}\n\n\treturn nil\n}", "func TearDown(w http.ResponseWriter, r *http.Request) {\n\tctx := r.Context()\n\n\tcontainer := di.Get()\n\n\tif err := container.TaskRepository.DeleteAll(ctx); err != nil {\n\t\thttp.Error(w, \"taskRepository.DeleteAll error: \"+err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tif err := container.UserRepository.DeleteAll(ctx); err != nil {\n\t\thttp.Error(w, \"userRepository.DeleteAll error: \"+err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tfmt.Fprint(w, \"done\")\n}", "func (c *projectController) ensureProjectCleanup(project *kubermaticv1.Project) error {\n\t// cluster resources don't have OwnerReferences set thus we need to manually remove them\n\tfor _, clusterProvider := range c.seedClusterProviders {\n\t\tif clusterProvider.clusterResourceLister == nil {\n\t\t\treturn fmt.Errorf(\"there is no lister for cluster resources for cluster provider %s\", clusterProvider.providerName)\n\t\t}\n\t\tclusterResources, err := clusterProvider.clusterResourceLister.List(labels.Everything())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfor _, clusterResource := range clusterResources {\n\t\t\tif clusterProject := clusterResource.Labels[kubermaticv1.ProjectIDLabelKey]; clusterProject == project.Name {\n\t\t\t\terr := clusterProvider.kubermaticClient.KubermaticV1().Clusters().Delete(clusterResource.Name, &metav1.DeleteOptions{})\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// remove subjects from Cluster RBAC Bindings for project's resources\n\tfor _, projectResource := range c.projectResources {\n\t\tif len(projectResource.namespace) > 0 {\n\t\t\tcontinue\n\t\t}\n\t\tfor _, groupPrefix := range AllGroupsPrefixes {\n\t\t\tgroupName := GenerateActualGroupNameFor(project.Name, groupPrefix)\n\t\t\tif skip, err := shouldSkipClusterRBACRoleBindingFor(groupName, projectResource.gvr.Resource, kubermaticv1.SchemeGroupVersion.Group, project.Name, projectResource.kind); skip {\n\t\t\t\tcontinue\n\t\t\t} else if err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif projectResource.destination == destinationSeed {\n\t\t\t\tfor _, seedClusterProvider := range c.seedClusterProviders {\n\t\t\t\t\tseedClusterRESTClient := seedClusterProvider.kubeClient\n\t\t\t\t\terr := cleanUpClusterRBACRoleBindingFor(seedClusterRESTClient, groupName, projectResource.gvr.Resource)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\terr := cleanUpClusterRBACRoleBindingFor(c.masterClusterProvider.kubeClient, groupName, projectResource.gvr.Resource)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// remove subjects from RBAC Bindings for project's resources\n\tfor _, projectResource := range c.projectResources {\n\t\tif len(projectResource.namespace) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tfor _, groupPrefix := range AllGroupsPrefixes {\n\t\t\tgroupName := GenerateActualGroupNameFor(project.Name, groupPrefix)\n\t\t\tif skip, err := shouldSkipRBACRoleBindingFor(groupName, projectResource.gvr.Resource, kubermaticv1.SchemeGroupVersion.Group, project.Name, projectResource.kind, projectResource.namespace); skip {\n\t\t\t\tcontinue\n\t\t\t} else if err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif projectResource.destination == destinationSeed {\n\t\t\t\tfor _, seedClusterProvider := range c.seedClusterProviders {\n\t\t\t\t\tseedClusterRESTClient := seedClusterProvider.kubeClient\n\t\t\t\t\terr := cleanUpRBACRoleBindingFor(seedClusterRESTClient, groupName, projectResource.gvr.Resource, projectResource.namespace)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\terr := cleanUpRBACRoleBindingFor(c.masterClusterProvider.kubeClient, groupName, projectResource.gvr.Resource, projectResource.namespace)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tkuberneteshelper.RemoveFinalizer(project, CleanupFinalizerName)\n\t_, err := c.masterClusterProvider.kubermaticClient.KubermaticV1().Projects().Update(project)\n\treturn err\n}", "func (r *PodsIncomingReflector) CleanupNamespace(namespace string) {\n\tforeignNamespace, err := r.NattingTable().NatNamespace(namespace)\n\tif err != nil {\n\t\tklog.Error(err)\n\t\treturn\n\t}\n\n\tobjects, err := r.GetCacheManager().ListForeignNamespacedObject(apimgmt.Pods, foreignNamespace)\n\tif err != nil {\n\t\tklog.Errorf(\"error while listing foreign objects in namespace %v\", foreignNamespace)\n\t\treturn\n\t}\n\n\tretriable := func(err error) bool {\n\t\tswitch kerrors.ReasonForError(err) {\n\t\tcase metav1.StatusReasonNotFound:\n\t\t\treturn false\n\t\tdefault:\n\t\t\tklog.Warningf(\"retrying while deleting pod because of ERR; %v\", err)\n\t\t\treturn true\n\t\t}\n\t}\n\n\tfor _, obj := range objects {\n\t\tforeignPod := obj.(*corev1.Pod)\n\t\tif foreignPod.Labels == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\thomePodName, ok := foreignPod.Labels[virtualKubelet.ReflectedpodKey]\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\t\t// allow deletion of the related homePod by removing its finalizer\n\t\tfinalizerPatch := []byte(fmt.Sprintf(\n\t\t\t`[{\"op\":\"remove\",\"path\":\"/metadata/finalizers\",\"value\":[\"%s\"]}]`,\n\t\t\tvirtualKubelet.HomePodFinalizer))\n\n\t\t_, err = r.GetHomeClient().CoreV1().Pods(namespace).Patch(context.TODO(),\n\t\t\thomePodName,\n\t\t\ttypes.JSONPatchType,\n\t\t\tfinalizerPatch,\n\t\t\tmetav1.PatchOptions{})\n\t\tif err != nil {\n\t\t\tklog.Error(err)\n\t\t\tcontinue\n\t\t}\n\n\t\tif err := retry.OnError(retry.DefaultBackoff, retriable, func() error {\n\t\t\treturn r.GetHomeClient().CoreV1().Pods(namespace).Delete(context.TODO(), homePodName, metav1.DeleteOptions{})\n\t\t}); err != nil {\n\t\t\tklog.Errorf(\"Error while deleting home pod %v/%v - ERR: %v\", namespace, homePodName, err)\n\t\t}\n\t}\n}", "func (tc *testContext) cleanup() {\n\ttc.osdkTestCtx.Cleanup()\n}", "func (env testEnvironment) Cleanup() error {\n\tif err := env.DeleteProject(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func cleanup(config *Config) {\n\twriteErr := config.WriteConfig(configPath)\n\tif writeErr != nil {\n\t\tError.Println(writeErr.Error())\n\t\treturn\n\t}\n}", "func CommonAfterSuite() {\n\n\t// run all registered cleanup functions\n\tRunCleanupActions()\n\n\tresourcesDir, err := filepath.Abs(filepath.Join(\"..\", \"..\", \"framework\", \"resources\"))\n\tExpectNoError(err)\n\terr = os.RemoveAll(filepath.Join(resourcesDir, \"charts\"))\n\tgomega.Expect(err).NotTo(gomega.HaveOccurred())\n\n\terr = os.RemoveAll(filepath.Join(resourcesDir, \"repository\", \"cache\"))\n\tgomega.Expect(err).NotTo(gomega.HaveOccurred())\n}", "func CleanupSuite() {\n\t// Run on all Ginkgo nodes\n}", "func (tr *TestRunner) CleanUp() error {\n\tfor imsi := range tr.imsis {\n\t\terr := deleteSubscribersFromHSS(imsi)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tfor _, instance := range tr.activePCRFs {\n\t\terr := clearSubscribersFromPCRFPerInstance(instance)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tfor _, instance := range tr.activeOCSs {\n\t\terr := clearSubscribersFromOCSPerInstance(instance)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (fr *Runner) Cleanup() {\n\tif fr.LocalDataDir != \"\" {\n\t\tos.RemoveAll(fr.LocalDataDir) //nolint:errcheck\n\t}\n}", "func (c *DefaultCleaner) Cleanup(config *api.Config) {\n\tif config.PreserveWorkingDir {\n\t\tglog.Infof(\"Temporary directory '%s' will be saved, not deleted\", config.WorkingDir)\n\t} else {\n\t\tglog.V(2).Infof(\"Removing temporary directory %s\", config.WorkingDir)\n\t\tc.fs.RemoveDirectory(config.WorkingDir)\n\t}\n\tif config.LayeredBuild {\n\t\tglog.V(2).Infof(\"Removing temporary image %s\", config.BuilderImage)\n\t\tc.docker.RemoveImage(config.BuilderImage)\n\t}\n}", "func (t *tInfo) teardown() {\n\tt.recorders.close()\n\n\tif t.apiClient != nil {\n\t\tt.apiClient.ClusterV1().Version().Delete(context.Background(), &api.ObjectMeta{Name: t.testName})\n\t\tt.apiClient.Close()\n\t\tt.apiClient = nil\n\t}\n\n\tif t.esClient != nil {\n\t\tt.esClient.Close()\n\t}\n\n\ttestutils.StopElasticsearch(t.elasticsearchName, t.elasticsearchDir)\n\n\tif t.mockCitadelQueryServer != nil {\n\t\tt.mockCitadelQueryServer.Stop()\n\t\tt.mockCitadelQueryServer = nil\n\t}\n\n\tif t.evtsMgr != nil {\n\t\tt.evtsMgr.Stop()\n\t\tt.evtsMgr = nil\n\t}\n\n\tt.evtProxyServices.Stop()\n\n\tif t.apiServer != nil {\n\t\tt.apiServer.Stop()\n\t\tt.apiServer = nil\n\t}\n\n\t// stop certificate server\n\ttestutils.CleanupIntegTLSProvider()\n\n\tif t.mockResolver != nil {\n\t\tt.mockResolver.Stop()\n\t\tt.mockResolver = nil\n\t}\n\n\t// remove the local persistent events store\n\tt.logger.Infof(\"removing events store %s\", t.storeConfig.Dir)\n\tos.RemoveAll(t.storeConfig.Dir)\n\n\tt.logger.Infof(\"completed test\")\n}", "func CleanupTestHarness() {\n\tcleanupCerts()\n}", "func cleanBootstrapSetup(workingDirectoryPath string) error {\n\n\t// Stop Yorc server\n\tif yorcServerShutdownChan != nil {\n\t\tclose(yorcServerShutdownChan)\n\t\tyorcServerOutputFile.Close()\n\t} else {\n\t\tcmd := exec.Command(\"pkill\", \"-f\", \"yorc server\")\n\t\tcmd.Run()\n\t}\n\n\t// stop Consul\n\tif cmdConsul != nil {\n\t\tif err := cmdConsul.Process.Kill(); err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tcmd := exec.Command(\"pkill\", \"consul\")\n\t\tcmd.Run()\n\n\t}\n\n\t// Clean working directories\n\tos.RemoveAll(filepath.Join(workingDirectoryPath, \"bootstrapResources\"))\n\tos.RemoveAll(filepath.Join(workingDirectoryPath, \"deployments\"))\n\tos.RemoveAll(filepath.Join(workingDirectoryPath, \"consul-data\"))\n\tos.Remove(filepath.Join(workingDirectoryPath, \"config.yorc.yaml\"))\n\tos.Remove(filepath.Join(workingDirectoryPath, \"locations.yorc.yaml\"))\n\tos.Remove(filepath.Join(workingDirectoryPath, \"yorc.log\"))\n\n\tfmt.Println(\"Local setup cleaned up\")\n\treturn nil\n\n}", "func cleanTests(servicesDir string) {\n\t// Remove the 0-basic used for non building tests\n\tos.RemoveAll(filepath.Join(servicesDir, \"0-basic\"))\n\t// Clean up the service directories in each test\n\tdirs, _ := ioutil.ReadDir(servicesDir)\n\tfor _, d := range dirs {\n\t\t// If this item is not a directory skip it\n\t\tif !d.IsDir() {\n\t\t\tcontinue\n\t\t}\n\t\tremoveTestFiles(filepath.Join(servicesDir, d.Name()))\n\t}\n}", "func (n *Network) Cleanup() {\n\tdefer func() {\n\t\tlock.Unlock()\n\t\tn.T.Log(\"released test network lock\")\n\t}()\n\n\tn.T.Log(\"cleaning up test network...\")\n\n\tfor _, v := range n.Validators {\n\t\tif v.tmNode != nil && v.tmNode.IsRunning() {\n\t\t\t_ = v.tmNode.Stop()\n\t\t}\n\n\t\tif v.api != nil {\n\t\t\t_ = v.api.Close()\n\t\t}\n\n\t\tif v.grpc != nil {\n\t\t\tv.grpc.Stop()\n\t\t\tif v.grpcWeb != nil {\n\t\t\t\t_ = v.grpcWeb.Close()\n\t\t\t}\n\t\t}\n\t}\n\n\tif n.Config.CleanupDir {\n\t\t_ = os.RemoveAll(n.BaseDir)\n\t}\n\n\tn.T.Log(\"finished cleaning up test network\")\n}", "func cleanupClusterResources(ctx context.Context, clientset kubernetes.Interface, clusterName, namespace string) error {\n\tlistOpts := metav1.ListOptions{\n\t\tLabelSelector: \"multi-cluster=true\",\n\t}\n\n\t// clean up secrets\n\tsecretList, err := clientset.CoreV1().Secrets(namespace).List(ctx, listOpts)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif secretList != nil {\n\t\tfor _, s := range secretList.Items {\n\t\t\tfmt.Printf(\"Deleting Secret: %s in cluster %s\\n\", s.Name, clusterName)\n\t\t\tif err := clientset.CoreV1().Secrets(namespace).Delete(ctx, s.Name, metav1.DeleteOptions{}); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\t// clean up service accounts\n\tserviceAccountList, err := clientset.CoreV1().ServiceAccounts(namespace).List(ctx, listOpts)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif serviceAccountList != nil {\n\t\tfor _, sa := range serviceAccountList.Items {\n\t\t\tfmt.Printf(\"Deleting ServiceAccount: %s in cluster %s\\n\", sa.Name, clusterName)\n\t\t\tif err := clientset.CoreV1().ServiceAccounts(namespace).Delete(ctx, sa.Name, metav1.DeleteOptions{}); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\t// clean up roles\n\troleList, err := clientset.RbacV1().Roles(namespace).List(ctx, listOpts)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, r := range roleList.Items {\n\t\tfmt.Printf(\"Deleting Role: %s in cluster %s\\n\", r.Name, clusterName)\n\t\tif err := clientset.RbacV1().Roles(namespace).Delete(ctx, r.Name, metav1.DeleteOptions{}); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// clean up roles\n\troles, err := clientset.RbacV1().Roles(namespace).List(ctx, listOpts)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif roles != nil {\n\t\tfor _, r := range roles.Items {\n\t\t\tfmt.Printf(\"Deleting Role: %s in cluster %s\\n\", r.Name, clusterName)\n\t\t\tif err := clientset.RbacV1().Roles(namespace).Delete(ctx, r.Name, metav1.DeleteOptions{}); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\t// clean up role bindings\n\troleBindings, err := clientset.RbacV1().RoleBindings(namespace).List(ctx, listOpts)\n\tif !errors.IsNotFound(err) && err != nil {\n\t\treturn err\n\t}\n\n\tif roleBindings != nil {\n\t\tfor _, crb := range roleBindings.Items {\n\t\t\tfmt.Printf(\"Deleting RoleBinding: %s in cluster %s\\n\", crb.Name, clusterName)\n\t\t\tif err := clientset.RbacV1().RoleBindings(namespace).Delete(ctx, crb.Name, metav1.DeleteOptions{}); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\t// clean up cluster role bindings\n\tclusterRoleBindings, err := clientset.RbacV1().ClusterRoleBindings().List(ctx, listOpts)\n\tif !errors.IsNotFound(err) && err != nil {\n\t\treturn err\n\t}\n\n\tif clusterRoleBindings != nil {\n\t\tfor _, crb := range clusterRoleBindings.Items {\n\t\t\tfmt.Printf(\"Deleting ClusterRoleBinding: %s in cluster %s\\n\", crb.Name, clusterName)\n\t\t\tif err := clientset.RbacV1().ClusterRoleBindings().Delete(ctx, crb.Name, metav1.DeleteOptions{}); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\t// clean up cluster roles\n\tclusterRoles, err := clientset.RbacV1().ClusterRoles().List(ctx, listOpts)\n\tif !errors.IsNotFound(err) && err != nil {\n\t\treturn err\n\t}\n\n\tif clusterRoles != nil {\n\t\tfor _, cr := range clusterRoles.Items {\n\t\t\tfmt.Printf(\"Deleting ClusterRole: %s in cluster %s\\n\", cr.Name, clusterName)\n\t\t\tif err := clientset.RbacV1().ClusterRoles().Delete(ctx, cr.Name, metav1.DeleteOptions{}); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func (t *MorphTargets) Cleanup(a *app.App) {}", "func (d *Dumper) Cleanup() {\n\td.client.Close()\n\td.adminClient.Close()\n}", "func (r *Runner) Cleanup() {\n\tif !r.Preserve || r.Error() == nil {\n\t\tdefer os.RemoveAll(r.Dir)\n\t}\n}", "func cleanup(handler *deployment.Handler) {\n\thandler.Delete(name)\n}", "func (r *templateRouter) cleanUpServiceAliasConfig(cfg *ServiceAliasConfig) {\n\terr := r.certManager.DeleteCertificatesForConfig(cfg)\n\tif err != nil {\n\t\tlog.Error(err, \"error deleting certificates for route, the route will still be deleted but files may remain in the container\", \"host\", cfg.Host)\n\t}\n}", "func K3sCleanup(k3sTestLock int, dataDir string) error {\n\tif cni0Link, err := netlink.LinkByName(\"cni0\"); err == nil {\n\t\tlinks, _ := netlink.LinkList()\n\t\tfor _, link := range links {\n\t\t\tif link.Attrs().MasterIndex == cni0Link.Attrs().Index {\n\t\t\t\tnetlink.LinkDel(link)\n\t\t\t}\n\t\t}\n\t\tnetlink.LinkDel(cni0Link)\n\t}\n\n\tif flannel1, err := netlink.LinkByName(\"flannel.1\"); err == nil {\n\t\tnetlink.LinkDel(flannel1)\n\t}\n\tif flannelV6, err := netlink.LinkByName(\"flannel-v6.1\"); err == nil {\n\t\tnetlink.LinkDel(flannelV6)\n\t}\n\tif dataDir == \"\" {\n\t\tdataDir = \"/var/lib/rancher/k3s\"\n\t}\n\tif err := os.RemoveAll(dataDir); err != nil {\n\t\treturn err\n\t}\n\tif k3sTestLock != -1 {\n\t\treturn flock.Release(k3sTestLock)\n\t}\n\treturn nil\n}", "func (a *api) cleanUpTestData() {\n\tfmt.Println(\"Nothing to see here - move along\")\n\n\t/* Set up authorization with the token obtained earlier in the test */\n\ta.c.SetJWTSigner(&goaclient.APIKeySigner{\n\t\tSignQuery: false,\n\t\tKeyName: \"Authorization\",\n\t\tKeyValue: savedToken,\n\t\tFormat: \"Bearer %s\",\n\t})\n\n\t/* Delete the workitem */\n\tInfo.Println(\"The ID of the workitem to be deleted is:\", idString)\n\tresp, err := a.c.DeleteWorkitem(context.Background(), \"/api/workitems/\"+idString)\n\ta.resp = resp\n\ta.err = err\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n}", "func projectCleanup(env environment.Environment, macro *model.Macro) error {\n\tpartialComparisonService := env.ServiceFactory().MustPartialComparisonService()\n\treturn partialComparisonService.DeleteFrom(macro)\n}", "func cleanup() {\n\tfor _, cmd := range runningApps {\n\t\tcmd.Process.Kill()\n\t}\n}", "func cleanupFilesystem(helper *clients.TestClient, k8sh *utils.K8sHelper, s *suite.Suite, namespace string, filesystemName string) {\n\tlogger.Infof(\"Deleting file system\")\n\terr := helper.FSClient.Delete(filesystemName, namespace)\n\tassert.Nil(s.T(), err)\n\tlogger.Infof(\"File system %s deleted\", filesystemName)\n}", "func (suite *PouchStartSuite) TearDownTest(c *check.C) {\n\tc.Assert(environment.PruneAllContainers(apiClient), check.IsNil)\n}", "func DeleteResources(f *os.File, cfg *rest.Config, dynamicClient dynamic.Interface, waitForDeletion bool) error {\n\tdeletionPropagation := metav1.DeletePropagationForeground\n\tgracePeriodSeconds := int64(0)\n\n\tdecoder, mapper, err := parseObjects(f, cfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor {\n\t\tresource, unstructuredObj, err := getResource(decoder, mapper, dynamicClient)\n\t\tif err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t} else {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tif err := resource.Delete(context.Background(), unstructuredObj.GetName(),\n\t\t\tmetav1.DeleteOptions{GracePeriodSeconds: &gracePeriodSeconds,\n\t\t\t\tPropagationPolicy: &deletionPropagation}); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif waitForDeletion {\n\t\t// verify deleted\n\t\tdecoder, mapper, err := parseObjects(f, cfg)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfor {\n\t\t\tresource, unstructuredObj, err := getResource(decoder, mapper, dynamicClient)\n\t\t\tif err != nil {\n\t\t\t\tif err == io.EOF {\n\t\t\t\t\tbreak\n\t\t\t\t} else {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfmt.Fprintln(ginkgo.GinkgoWriter, \"wait for deletion\", unstructuredObj.GetName())\n\t\t\tif err := wait.Poll(time.Second*5, time.Second*10, func() (done bool, err error) {\n\t\t\t\tobj, err := resource.Get(context.Background(), unstructuredObj.GetName(), metav1.GetOptions{})\n\n\t\t\t\tif err == nil {\n\t\t\t\t\tfmt.Fprintln(ginkgo.GinkgoWriter, \"remove finalizers\", obj.GetFinalizers(), unstructuredObj.GetName())\n\t\t\t\t\tobj.SetFinalizers(nil)\n\t\t\t\t\t_, err = resource.Update(context.Background(), obj, metav1.UpdateOptions{})\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn false, err\n\t\t\t\t\t}\n\t\t\t\t\treturn false, nil\n\t\t\t\t}\n\t\t\t\tif apierrors.IsNotFound(err) {\n\t\t\t\t\treturn true, nil\n\t\t\t\t}\n\t\t\t\treturn false, err\n\t\t\t}); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func (p *Provisioner) createCleanupPod(pOpts *HelperPodOptions) error {\n\t//err := pOpts.validate()\n\tif err := pOpts.validate(); err != nil {\n\t\treturn err\n\t}\n\n\t// Initialize HostPath builder and validate that\n\t// volume directory is not directly under root.\n\t// Extract the base path and the volume unique path.\n\tparentDir, volumeDir, vErr := hostpath.NewBuilder().WithPath(pOpts.path).\n\t\tWithCheckf(hostpath.IsNonRoot(), \"volume directory {%v} should not be under root directory\", pOpts.path).\n\t\tExtractSubPath()\n\tif vErr != nil {\n\t\treturn vErr\n\t}\n\n\tcleanerPod, _ := pod.NewBuilder().\n\t\tWithName(\"cleanup-\" + pOpts.name).\n\t\tWithRestartPolicy(corev1.RestartPolicyNever).\n\t\tWithNodeName(pOpts.nodeName).\n\t\tWithContainerBuilder(\n\t\t\tcontainer.NewBuilder().\n\t\t\t\tWithName(\"local-path-cleanup\").\n\t\t\t\tWithImage(p.helperImage).\n\t\t\t\tWithCommandNew(append(pOpts.cmdsForPath, filepath.Join(\"/data/\", volumeDir))).\n\t\t\t\tWithVolumeMountsNew([]corev1.VolumeMount{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"data\",\n\t\t\t\t\t\tReadOnly: false,\n\t\t\t\t\t\tMountPath: \"/data/\",\n\t\t\t\t\t},\n\t\t\t\t}),\n\t\t).\n\t\tWithVolumeBuilder(\n\t\t\tvolume.NewBuilder().\n\t\t\t\tWithName(\"data\").\n\t\t\t\tWithHostDirectory(parentDir),\n\t\t).\n\t\tBuild()\n\n\t//Launch the cleanup pod.\n\tcPod, err := p.kubeClient.CoreV1().Pods(p.namespace).Create(cleanerPod)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdefer func() {\n\t\te := p.kubeClient.CoreV1().Pods(p.namespace).Delete(cPod.Name, &metav1.DeleteOptions{})\n\t\tif e != nil {\n\t\t\tglog.Errorf(\"unable to delete the helper pod: %v\", e)\n\t\t}\n\t}()\n\n\t//Wait for the cleanup pod to complete it job and exit\n\tcompleted := false\n\tfor i := 0; i < CmdTimeoutCounts; i++ {\n\t\tcheckPod, err := p.kubeClient.CoreV1().Pods(p.namespace).Get(cPod.Name, metav1.GetOptions{})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t} else if checkPod.Status.Phase == corev1.PodSucceeded {\n\t\t\tcompleted = true\n\t\t\tbreak\n\t\t}\n\t\ttime.Sleep(1 * time.Second)\n\t}\n\tif !completed {\n\t\treturn errors.Errorf(\"create process timeout after %v seconds\", CmdTimeoutCounts)\n\t}\n\n\treturn nil\n}", "func Cleanup() {\n\tif _, err := _etcdClient.Delete(context.Background(), \"\", clientv3.WithPrefix()); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}", "func (b *Builder) Cleanup() {\n\tos.RemoveAll(b.root)\n}", "func (f *Framework) CleanUp(ns string) error {\n\treturn f.AgonesClient.StableV1alpha1().GameServers(ns).\n\t\tDeleteCollection(&v1.DeleteOptions{}, v1.ListOptions{})\n}", "func (d *Daemon) Cleanup() error {\n\tif d.backend != nil {\n\t\td.backend.deleteAllStatus()\n\t}\n\tif err := os.Remove(util.DefaultRSAKeyPath); err != nil {\n\t\tlogrus.Info(\"Delete key failed\")\n\t}\n\td.deleteAllBuilders()\n\td.localStore.CleanContainers()\n\t_, err := d.localStore.Shutdown(false)\n\treturn err\n}", "func setup(t *testing.T) {\n\terr := os.RemoveAll(storagePath)\n\trequire.NoError(t, err)\n}", "func (m manager) CleanupRelease(ctx context.Context, manifest string) (bool, error) {\n\tdc, err := m.actionConfig.RESTClientGetter.ToDiscoveryClient()\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"failed to get Kubernetes discovery client: %w\", err)\n\t}\n\tapiVersions, err := action.GetVersionSet(dc)\n\tif err != nil && !discovery.IsGroupDiscoveryFailedError(err) {\n\t\treturn false, fmt.Errorf(\"failed to get apiVersions from Kubernetes: %w\", err)\n\t}\n\tmanifests := releaseutil.SplitManifests(manifest)\n\t_, files, err := releaseutil.SortManifests(manifests, apiVersions, releaseutil.UninstallOrder)\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"failed to sort manifests: %w\", err)\n\t}\n\t// do not delete resources that are annotated with the Helm resource policy 'keep'\n\t_, filesToDelete := manifestutil.FilterManifestsToKeep(files)\n\tvar builder strings.Builder\n\tfor _, file := range filesToDelete {\n\t\tbuilder.WriteString(\"\\n---\\n\" + file.Content)\n\t}\n\tresources, err := m.kubeClient.Build(strings.NewReader(builder.String()), false)\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"failed to build resources from manifests: %w\", err)\n\t}\n\tif resources == nil || len(resources) <= 0 {\n\t\treturn true, nil\n\t}\n\tfor _, resource := range resources {\n\t\terr = resource.Get()\n\t\tif err != nil {\n\t\t\tif apierrors.IsNotFound(err) {\n\t\t\t\tcontinue // resource is already delete, check the next one.\n\t\t\t}\n\t\t\treturn false, fmt.Errorf(\"failed to get resource: %w\", err)\n\t\t}\n\t\t// found at least one resource that is not deleted so just delete everything again.\n\t\t_, errs := m.kubeClient.Delete(resources)\n\t\tif len(errs) > 0 {\n\t\t\treturn false, fmt.Errorf(\"failed to delete resources: %v\", apiutilerrors.NewAggregate(errs))\n\t\t}\n\t\treturn false, nil\n\t}\n\treturn true, nil\n}", "func (b *Builder) Cleanup() {\n\tb.mu.Lock()\n\tfor r := range b.running {\n\t\tr.Cleanup()\n\t}\n\tb.mu.Unlock()\n\tif !b.Preserve {\n\t\tdefer os.RemoveAll(b.Dir)\n\t}\n}", "func (s *CreateMapping) Cleanup() error {\n\terr := s.mappings.Delete(s.name, &metav1.DeleteOptions{})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn helpers.AwaitResourceDeleted(func() (interface{}, error) {\n\t\treturn s.mappings.Get(s.name, v1.GetOptions{})\n\t}, retry.DelayType(retry.BackOffDelay),\n\t\tretry.Delay(1*time.Second))\n}", "func (d *DefaultDistributor) Cleanup() error {\n\tfor k := range d.active {\n\t\terr := d.StopSharing(k)\n\t\tif err != nil {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"error\": err,\n\t\t\t\t\"image\": k,\n\t\t\t}).Error(\"got error while cleaning up\")\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (t *Targets) Cleanup(context context.Context, instance *iter8v1alpha2.Experiment) {\n\tif instance.Spec.Cleanup != nil && *instance.Spec.Cleanup {\n\t\tassessment := instance.Status.Assessment\n\t\ttoKeep := make(map[string]bool)\n\t\tswitch instance.Spec.GetOnTermination() {\n\t\tcase iter8v1alpha2.OnTerminationToWinner:\n\t\t\tif assessment != nil && assessment.Winner != nil && assessment.Winner.WinnerFound {\n\t\t\t\ttoKeep[assessment.Winner.Winner] = true\n\t\t\t} else {\n\t\t\t\ttoKeep[instance.Spec.Baseline] = true\n\t\t\t}\n\t\tcase iter8v1alpha2.OnTerminationToBaseline:\n\t\t\ttoKeep[instance.Spec.Baseline] = true\n\t\tcase iter8v1alpha2.OnTerminationKeepLast:\n\t\t\tif assessment != nil {\n\t\t\t\tif assessment.Baseline.Weight > 0 {\n\t\t\t\t\ttoKeep[assessment.Baseline.Name] = true\n\t\t\t\t}\n\t\t\t\tfor _, candidate := range assessment.Candidates {\n\t\t\t\t\tif candidate.Weight > 0 {\n\t\t\t\t\t\ttoKeep[candidate.Name] = true\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tsvcNamespace := instance.ServiceNamespace()\n\t\t// delete baseline\n\t\tif ok := toKeep[instance.Spec.Baseline]; !ok {\n\t\t\terr := t.client.Delete(context, &appsv1.Deployment{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tNamespace: svcNamespace,\n\t\t\t\t\tName: instance.Spec.Baseline,\n\t\t\t\t},\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\tutil.Logger(context).Error(err, \"Error when deleting baseline\")\n\t\t\t}\n\t\t}\n\n\t\t// delete candidates\n\t\tfor _, candidate := range instance.Spec.Candidates {\n\t\t\tif ok := toKeep[candidate]; !ok {\n\t\t\t\terr := t.client.Delete(context, &appsv1.Deployment{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\tNamespace: svcNamespace,\n\t\t\t\t\t\tName: candidate,\n\t\t\t\t\t},\n\t\t\t\t})\n\t\t\t\tif err != nil {\n\t\t\t\t\tutil.Logger(context).Error(err, \"Error when deleting candidate\", \"name\", candidate)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func (pbm *PodBackoffMap) CleanupPodsCompletesBackingoff() {\n\tpbm.lock.Lock()\n\tdefer pbm.lock.Unlock()\n\tfor pod, value := range pbm.podLastUpdateTime {\n\t\tif value.Add(pbm.maxDuration).Before(time.Now()) {\n\t\t\tpbm.clearPodBackoff(pod)\n\t\t}\n\t}\n}", "func removeDisabledPods(dataDir, containerRuntimeEndpoint string, disabledItems map[string]bool, clusterReset bool) error {\n\tterminatePods := false\n\texecPath := binDir(dataDir)\n\tmanifestDir := podManifestsDir(dataDir)\n\n\t// no need to clean up static pods if this is a clean install (bin or manifests dirs missing)\n\tfor _, path := range []string{execPath, manifestDir} {\n\t\tif _, err := os.Stat(path); os.IsNotExist(err) {\n\t\t\treturn nil\n\t\t}\n\t}\n\n\t// ensure etcd and the apiserver are terminated if doing a cluster-reset, and force pod\n\t// termination even if there are no manifests on disk\n\tif clusterReset {\n\t\tdisabledItems[\"etcd\"] = true\n\t\tdisabledItems[\"kube-apiserver\"] = true\n\t\tterminatePods = true\n\t}\n\n\t// check to see if there are manifests for any disabled components. If there are no manifests for\n\t// disabled components, and termination wasn't forced by cluster-reset, termination is skipped.\n\tfor component, disabled := range disabledItems {\n\t\tif disabled {\n\t\t\tmanifestName := filepath.Join(manifestDir, component+\".yaml\")\n\t\t\tif _, err := os.Stat(manifestName); err == nil {\n\t\t\t\tterminatePods = true\n\t\t\t}\n\t\t}\n\t}\n\n\tif terminatePods {\n\t\tlogrus.Infof(\"Static pod cleanup in progress\")\n\t\t// delete manifests for disabled items\n\t\tfor component, disabled := range disabledItems {\n\t\t\tif disabled {\n\t\t\t\tmanifestName := filepath.Join(manifestDir, component+\".yaml\")\n\t\t\t\tif err := os.RemoveAll(manifestName); err != nil {\n\t\t\t\t\treturn errors.Wrapf(err, \"unable to delete %s manifest\", component)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tctx, cancel := context.WithTimeout(context.Background(), (5 * time.Minute))\n\t\tdefer cancel()\n\n\t\tcontainerdErr := make(chan error)\n\n\t\t// start containerd, if necessary. The command will be terminated automatically when the context is cancelled.\n\t\tif containerRuntimeEndpoint == \"\" {\n\t\t\tcontainerdCmd := exec.CommandContext(ctx, filepath.Join(execPath, \"containerd\"))\n\t\t\tgo startContainerd(ctx, dataDir, containerdErr, containerdCmd)\n\t\t}\n\t\t// terminate any running containers from the disabled items list\n\t\tgo terminateRunningContainers(ctx, containerRuntimeEndpoint, disabledItems, containerdErr)\n\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase err := <-containerdErr:\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn errors.Wrap(err, \"temporary containerd process exited unexpectedly\")\n\t\t\t\t}\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn errors.New(\"static pod cleanup timed out\")\n\t\t\t}\n\t\t\tlogrus.Info(\"Static pod cleanup completed successfully\")\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn nil\n}", "func TestDeleteResource(t *testing.T) {\n\ttestName := \"TestDeleteResource\"\n\tbeforeTest()\n\t// kinds to check for status\n\tvar kindsToCheckStatus = map[string]bool{\n\t\tAPPLICATION: true,\n\t\t\"Ingress\": true,\n\t\t\"Service\": true,\n\t\t\"Deployment\": true,\n\t\t\"StatefulSet\": true,\n\t\t\"NetworkPolicy\": true,\n\t}\n\n\t// resources to pre-populate\n\tvar files = []string{\n\t\t/* 0 */ KappnavConfigFile,\n\t\t/* 1 */ CrdApplication,\n\t\t/* 4 */ appBookinfo,\n\t\t/* 5 */ appProductpage,\n\t\t/* 6 */ appDetails,\n\t\t/* 7 */ appRatings,\n\t\t/* 8 */ deploymentDetailsV1,\n\t\t/* 9 */ deploymentProcuctpageV1,\n\t\t/* 10 */ deploymentRatingsV1,\n\t\t/* 11 */ ingressBookinfo,\n\t\t/* 12 */ networkpolicyProductpage,\n\t\t/* 13 */ networkpolicyReviews,\n\t\t/* 14 */ serviceDetails,\n\t\t/* 15 */ serviceProductpage,\n\t\t/* 16 */ serviceRatings,\n\t\t/* 17 */ serviceReview,\n\t\t/* 18 */ deploymentReviewsV1,\n\t\t/* 19 */ appReviews,\n\t\t/* 20 */ deploymentReviewsV2,\n\t\t/* 21 */ deploymentReviewsV3,\n\t}\n\titeration0IDs, err := readResourceIDs(files)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ttestActions := newTestActions(testName, kindsToCheckStatus)\n\n\t/* Iteration 0 */\n\titeration0IDs[2].expectedStatus = problem // bookinfo problem due to review app\n\titeration0IDs[17].expectedStatus = problem // review app problem due to deploymentReviewsV3\n\titeration0IDs[18].expectedStatus = warning // deploymentReviewsV2 is WARING\n\titeration0IDs[19].expectedStatus = problem // deploymentReviewsV3 is problem\n\tvar emptyIDs = []resourceID{}\n\ttestActions.addIteration(iteration0IDs, emptyIDs)\n\n\t/* iteration 1: delete deploymentReviewsV3 */\n\tarrayLength := len(iteration0IDs) - 1\n\tvar iteration1IDs = make([]resourceID, arrayLength, arrayLength)\n\tcopy(iteration1IDs, iteration0IDs)\n\titeration1IDs[2].expectedStatus = warning // bookfino now warning\n\titeration1IDs[17].expectedStatus = warning // review app now warning deu to deploymentReviewsV3 being deleted\n\ttestActions.addIteration(iteration1IDs, emptyIDs)\n\n\t/* iteration 2: delete deploymentReviewsV2 */\n\tarrayLength = len(iteration1IDs) - 1\n\tvar iteration2IDs = make([]resourceID, arrayLength, arrayLength)\n\tcopy(iteration2IDs, iteration1IDs)\n\titeration2IDs[2].expectedStatus = Normal // bookfino now Normal\n\titeration2IDs[17].expectedStatus = Normal // reviews now Normal deu to deploymentReviewsV2 being deleted\n\ttestActions.addIteration(iteration2IDs, emptyIDs)\n\n\t/* iteration 3: set deploymentReviewsV1 to warning */\n\tarrayLength = len(iteration2IDs)\n\tvar iteration3IDs = make([]resourceID, arrayLength, arrayLength)\n\tcopy(iteration3IDs, iteration2IDs)\n\titeration3IDs[2].expectedStatus = warning // bookfino now Normal\n\titeration3IDs[16].expectedStatus = warning // deploymentReviewsV1 now warning\n\titeration3IDs[17].expectedStatus = warning // reviews now Normal deu to deploymentReviewsV1 being warning\n\ttestActions.addIteration(iteration3IDs, emptyIDs)\n\n\t/* iteration 4: delet review app */\n\tarrayLength = len(iteration3IDs) - 1\n\tvar iteration4IDs = make([]resourceID, arrayLength, arrayLength)\n\tcopy(iteration4IDs, iteration3IDs)\n\titeration4IDs[2].expectedStatus = Normal // bookfino now Normal due to review app being deleted\n\ttestActions.addIteration(iteration4IDs, emptyIDs)\n\n\t/* iteration 5: clean up */\n\ttestActions.addIteration(emptyIDs, emptyIDs)\n\n\tclusterWatcher, err := createClusterWatcher(iteration0IDs, testActions, StatusFailureRate)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer clusterWatcher.shutDown()\n\n\t// make all trasition of testAction\n\terr = testActions.transitionAll()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n}", "func (h *HTTPGetter) Cleanup() error {\n\treturn os.RemoveAll(h.dst)\n}", "func (cp *OCIConveyorPacker) CleanUp() {\n\tos.RemoveAll(cp.b.Path)\n}", "func Cleanup() {\n\tclient := DockerClient()\n\tif list, err := CleanupDockerImageList(); err == nil {\n\t\tfor _, i := range list {\n\t\t\tclient.RemoveImage(i.ID)\n\t\t}\n\t}\n\tif list, err := CleanupDockerContainersList(); err == nil {\n\t\tfor _, c := range list {\n\t\t\topts := dockerclient.RemoveContainerOptions{ID: c.ID}\n\t\t\tclient.RemoveContainer(opts)\n\t\t}\n\t}\n}", "func (rcsw *RemoteClusterServiceWatcher) cleanupMirroredResources() error {\n\tmatchLabels := map[string]string{\n\t\tconsts.MirroredResourceLabel: \"true\",\n\t\tconsts.RemoteClusterNameLabel: rcsw.clusterName,\n\t}\n\n\tservices, err := rcsw.localAPIClient.Svc().Lister().List(labels.Set(matchLabels).AsSelector())\n\tif err != nil {\n\t\tinnerErr := fmt.Errorf(\"could not retrieve mirrored services that need cleaning up: %s\", err)\n\t\tif kerrors.IsNotFound(err) {\n\t\t\treturn innerErr\n\t\t}\n\t\t// if its not notFound then something else went wrong, so we can retry\n\t\treturn RetryableError{[]error{innerErr}}\n\t}\n\n\tvar errors []error\n\tfor _, svc := range services {\n\t\tif err := rcsw.localAPIClient.Client.CoreV1().Services(svc.Namespace).Delete(svc.Name, &metav1.DeleteOptions{}); err != nil {\n\t\t\tif kerrors.IsNotFound(err) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\terrors = append(errors, fmt.Errorf(\"Could not delete service %s/%s: %s\", svc.Namespace, svc.Name, err))\n\t\t} else {\n\t\t\trcsw.log.Debugf(\"Deleted service %s/%s\", svc.Namespace, svc.Name)\n\t\t}\n\t}\n\n\tendpoints, err := rcsw.localAPIClient.Endpoint().Lister().List(labels.Set(matchLabels).AsSelector())\n\tif err != nil {\n\t\tinnerErr := fmt.Errorf(\"could not retrieve Endpoints that need cleaning up: %s\", err)\n\t\tif kerrors.IsNotFound(err) {\n\t\t\treturn innerErr\n\t\t}\n\t\treturn RetryableError{[]error{innerErr}}\n\t}\n\n\tfor _, endpt := range endpoints {\n\t\tif err := rcsw.localAPIClient.Client.CoreV1().Endpoints(endpt.Namespace).Delete(endpt.Name, &metav1.DeleteOptions{}); err != nil {\n\t\t\tif kerrors.IsNotFound(err) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\terrors = append(errors, fmt.Errorf(\"Could not delete Endpoints %s/%s: %s\", endpt.Namespace, endpt.Name, err))\n\t\t} else {\n\t\t\trcsw.log.Debugf(\"Deleted Endpoints %s/%s\", endpt.Namespace, endpt.Name)\n\t\t}\n\t}\n\n\tif len(errors) > 0 {\n\t\treturn RetryableError{errors}\n\t}\n\treturn nil\n}", "func cleanup(ctx context.Context, fs fs, logger log.FieldLogger, props processorProps) {\n\tspan, _ := opentracing.StartSpanFromContext(ctx, \"cleanup\")\n\tdefer span.Finish()\n\n\terr := fs.DeleteDir(props.WorkDir)\n\tif err != nil {\n\t\tlogger.Errorf(\"%+v\\n\", err)\n\t}\n}", "func TearDown() {\n\n\tcmd := exec.Command(\"/bin/sh\", \"-c\", \"sudo terraform destroy -auto-approve\")\n\tcmd.Dir = \"/home/ubuntu/terradir\"\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\tcmd.Run()\n}", "func (e *Environment) Cleanup() {\n\tif e.serveCancel != nil {\n\t\te.serveCancel()\n\t\t_ = e.serveG.Wait()\n\t}\n\tif appdPath, _ := exec.LookPath(e.Appd()); appdPath != \"\" {\n\t\tos.Remove(appdPath)\n\t}\n\tif appcliPath, _ := exec.LookPath(e.Appcli()); appcliPath != \"\" {\n\t\tos.Remove(appcliPath)\n\t}\n\thome, _ := os.UserHomeDir()\n\tos.RemoveAll(filepath.Join(home, \".\"+e.Appcli()))\n\tos.RemoveAll(filepath.Join(home, \".\"+e.Appd()))\n}", "func (ts testState) assertCleanUp(ctx context.Context, client kubernetesClient) error {\n\terr := makeDeletable(ctx, withMigrationSuffix(ts.pvc), ts.namespace, client)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpods, err := client.ListPods(ctx, ts.namespace)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(pods) != 0 {\n\t\treturn errors.Errorf(\"cleanup unsuccessful, still %v pods\", len(pods))\n\t}\n\treturn nil\n}", "func (p *BuildAhTest) Cleanup() {\n\t// Nuke tempdir\n\tif err := os.RemoveAll(p.TempDir); err != nil {\n\t\tfmt.Printf(\"%q\\n\", err)\n\t}\n\tcleanup := p.BuildAh([]string{\"rmi\", \"-a\", \"-f\"})\n\tcleanup.WaitWithDefaultTimeout()\n}", "func CleanupBuildArtifacts() {\n\tmu.Lock()\n\tdefer mu.Unlock()\n\tif tmpDir != \"\" {\n\t\tos.RemoveAll(tmpDir)\n\t\ttmpDir = \"\"\n\t}\n}" ]
[ "0.82490194", "0.7183337", "0.6963463", "0.6941537", "0.6897071", "0.6610021", "0.6599832", "0.6594847", "0.6564672", "0.6459518", "0.64580095", "0.6447388", "0.6432132", "0.6390202", "0.6367826", "0.636664", "0.63490593", "0.6323104", "0.62937874", "0.6286403", "0.62849927", "0.6247422", "0.6200934", "0.61975044", "0.6191915", "0.61834216", "0.61761904", "0.6118881", "0.6108098", "0.60908175", "0.6057113", "0.6052722", "0.6040662", "0.60190624", "0.60189056", "0.6015518", "0.60127926", "0.59988296", "0.5988524", "0.59756666", "0.5966365", "0.59475714", "0.59451425", "0.59331745", "0.59122854", "0.59122854", "0.5907445", "0.5901997", "0.5896631", "0.5895334", "0.58928293", "0.5891835", "0.58881986", "0.5884132", "0.587205", "0.5869387", "0.58672494", "0.5861467", "0.58533245", "0.58353394", "0.5825032", "0.582304", "0.581963", "0.58164895", "0.5803408", "0.57966536", "0.5793128", "0.57925844", "0.57869107", "0.57708544", "0.57626516", "0.57617074", "0.57567495", "0.5755462", "0.57519376", "0.5735412", "0.57338244", "0.5733493", "0.5731716", "0.57311285", "0.5730143", "0.5725858", "0.5720758", "0.5719608", "0.5708509", "0.5704568", "0.56865644", "0.56862086", "0.56749827", "0.5671019", "0.56705564", "0.56641966", "0.5662905", "0.56513697", "0.564967", "0.564483", "0.5637815", "0.5637254", "0.5618577", "0.5616048" ]
0.8216792
1
RunTest executes a single test
func (r PodTestRunner) RunTest(ctx context.Context, test Test) (result *v1alpha3.TestStatus, err error) { // Create a Pod to run the test podDef := getPodDefinition(r.configMapName, test, r) pod, err := r.Client.CoreV1().Pods(r.Namespace).Create(ctx, podDef, metav1.CreateOptions{}) if err != nil { return result, err } err = r.waitForTestToComplete(ctx, pod) if err != nil { return result, err } result = r.getTestStatus(ctx, pod, test) return result, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func TestRun(t *testing.T) {\n\tRun()\n}", "func (m *Main) RunTest(name, command string, run func(t *Test) error) error {\n\tt := m.NewTest(name, command, run)\n\treturn t.Run()\n}", "func TestRunMain(t *testing.T) {\n\tmain()\n}", "func (envManager *TestEnvManager) RunTest(m runnable) (ret int) {\n\tdefer envManager.TearDown()\n\tif err := envManager.StartUp(); err != nil {\n\t\tlog.Printf(\"Failed to setup framework: %s\", err)\n\t\tret = 1\n\t} else {\n\t\tlog.Printf(\"\\nStart testing ......\")\n\t\tret = m.Run()\n\t}\n\treturn ret\n}", "func (t *Test) Run() error {\n\treturn t.Wrap(t.run)\n}", "func (test Test) Run(t *testing.T) {\n\tt.Logf(\"Starting test %v\", t.Name())\n\tt.Helper()\n\t// Double negative cannot be helped, this is intended to mitigate test failures where a global\n\t// resource is manipulated, e.g.: the default AWS security group.\n\tif !test.RunOptions.NoParallel {\n\t\tt.Parallel()\n\t}\n\tt.Run(\"Python\", func(t *testing.T) {\n\t\trunOpts := integration.ProgramTestOptions{}\n\t\tif test.RunOptions != nil {\n\t\t\trunOpts = *test.RunOptions\n\t\t}\n\t\tconvertOpts := test.Options\n\t\tif test.Python != nil {\n\t\t\tconvertOpts = convertOpts.With(*test.Python)\n\t\t}\n\n\t\ttargetTest := targetTest{\n\t\t\trunOpts: &runOpts,\n\t\t\tconvertOpts: &convertOpts,\n\t\t\tprojectName: test.ProjectName,\n\t\t\tlanguage: \"python\",\n\t\t\truntime: \"python\",\n\t\t}\n\t\ttargetTest.Run(t)\n\t})\n\tt.Run(\"TypeScript\", func(t *testing.T) {\n\t\trunOpts := integration.ProgramTestOptions{}\n\t\tif test.RunOptions != nil {\n\t\t\trunOpts = *test.RunOptions\n\t\t}\n\t\tconvertOpts := test.Options\n\t\tif test.TypeScript != nil {\n\t\t\tconvertOpts = convertOpts.With(*test.TypeScript)\n\t\t}\n\n\t\ttargetTest := targetTest{\n\t\t\trunOpts: &runOpts,\n\t\t\tconvertOpts: &convertOpts,\n\t\t\tprojectName: test.ProjectName,\n\t\t\tlanguage: \"typescript\",\n\t\t\truntime: \"nodejs\",\n\t\t}\n\t\ttargetTest.Run(t)\n\t})\n}", "func runSingleTest(s *search.Search, sl *search.Limits, t *Test) {\n\t// reset search and search limits\n\ts.NewGame()\n\tsl.Mate = 0\n\t// create position\n\tp, _ := position.NewPositionFen(t.fen)\n\tswitch t.tType {\n\tcase DM:\n\t\tdirectMateTest(s, sl, p, t)\n\tcase BM:\n\t\tbestMoveTest(s, sl, p, t)\n\tcase AM:\n\t\tavoidMoveMateTest(s, sl, p, t)\n\tdefault:\n\t\tlog.Warningf(\"Unknown Test type: %d\", t.tType)\n\t}\n}", "func (s *FakeJujuRunnerSuite) TestRun(c *gc.C) {\n\ts.runner.Run()\n\ts.runner.Stop()\n\tresult := s.runner.Wait()\n \n\tc.Assert(result.String(), gc.Equals, \"OK: 1 passed\")\n\tc.Assert(result.Succeeded, gc.Equals, 1)\n\tc.Assert(result.RunError, gc.IsNil)\n\tc.Assert(\n\t\tstrings.Contains(s.output.String(), \"Starting service\"), gc.Equals, true)\n}", "func RunTest(t *testing.T, name string, f Func, testCases []TestCase) {\n\tt.Run(name, func(t *testing.T) {\n\t\tfor _, test := range testCases {\n\t\t\tif actual := f(test.Input); actual != test.Expected {\n\t\t\t\tt.Errorf(\"\\nfor n=%d, expected: %t, actual: %t\", test.Input, test.Expected, actual)\n\t\t\t}\n\t\t}\n\t})\n}", "func RunTest(client pb.GNMIClient, testCase *common.TestCase, timeout time.Duration, stateUpdateDelay time.Duration) error {\n\tif client == nil {\n\t\treturn errors.New(\"gNMI client is not available\")\n\t}\n\tif testCase == nil {\n\t\treturn errors.New(\"empty test case\")\n\t}\n\tif len(testCase.OPs) == 0 {\n\t\t// Succeed if no operation specified in this test case.\n\t\treturn nil\n\t}\n\t// Determine the test case type.\n\tswitch testCase.OPs[0].Type {\n\tcase common.OPReplace, common.OPUpdate, common.OPDelete:\n\t\t// This is a config test.\n\t\treturn runConfigTest(client, testCase, timeout, stateUpdateDelay)\n\tcase common.OPGet:\n\t\t// This is a state fetching test.\n\t\treturn runStateTest(client, testCase, timeout)\n\tcase common.OPSubscribe:\n\t\treturn errors.New(\"not support telemetry streaming test cases\")\n\tdefault:\n\t\treturn fmt.Errorf(\"invalid operation type %s\", testCase.OPs[0].Type)\n\t}\n}", "func TestRun(t *testing.T) {\n\tsuite.Run(t, new(CategoryTestSuite))\n\tsuite.Run(t, new(ProductTestSuite))\n}", "func RunUnitTest(cobraCmd *cobra.Command, args []string) {\n\terr := CommandWithStdout(\"go\", \"test\", \"./...\").Run()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}", "func (c *actionTests) actionRun(t *testing.T) {\n\te2e.EnsureImage(t, c.env)\n\n\ttests := []struct {\n\t\tname string\n\t\targv []string\n\t\texit int\n\t}{\n\t\t{\n\t\t\tname: \"NoCommand\",\n\t\t\targv: []string{c.env.ImagePath},\n\t\t\texit: 0,\n\t\t},\n\t\t{\n\t\t\tname: \"True\",\n\t\t\targv: []string{c.env.ImagePath, \"true\"},\n\t\t\texit: 0,\n\t\t},\n\t\t{\n\t\t\tname: \"False\",\n\t\t\targv: []string{c.env.ImagePath, \"false\"},\n\t\t\texit: 1,\n\t\t},\n\t\t{\n\t\t\tname: \"ScifTestAppGood\",\n\t\t\targv: []string{\"--app\", \"testapp\", c.env.ImagePath},\n\t\t\texit: 0,\n\t\t},\n\t\t{\n\t\t\tname: \"ScifTestAppBad\",\n\t\t\targv: []string{\"--app\", \"fakeapp\", c.env.ImagePath},\n\t\t\texit: 1,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tc.env.RunSingularity(\n\t\t\tt,\n\t\t\te2e.AsSubtest(tt.name),\n\t\t\te2e.WithProfile(e2e.UserProfile),\n\t\t\te2e.WithCommand(\"run\"),\n\t\t\te2e.WithArgs(tt.argv...),\n\t\t\te2e.ExpectExit(tt.exit),\n\t\t)\n\t}\n}", "func (r FakeTestRunner) RunTest(ctx context.Context, test Test) (result *v1alpha3.TestStatus, err error) {\n\treturn r.TestStatus, r.Error\n}", "func (t testCommand) Run() error {\n\tif t.shouldFail {\n\t\treturn errors.New(\"I AM ERROR\")\n\t}\n\treturn nil\n}", "func Run(t *testing.T, s suite.TestingSuite) {\n\tsuite.Run(t, s)\n}", "func RunTest(flags *Flags) error {\n\tswitch flags.Mode {\n\tcase constants.ManagerMode:\n\t\treturn workermanager.New().RunTest()\n\tcase constants.WorkerMode:\n\t\tslackURL := flags.SlackURL\n\t\tvar slacks []string\n\t\tif len(slackURL) > 0 {\n\t\t\tslacks = append(slacks, slackURL)\n\t\t}\n\t\treturn worker.NewWorker().RunTest(flags.Type, slacks)\n\t}\n\n\treturn nil\n}", "func TestMain(m *testing.M) {\n\tprintln(\"do stuff before all tests\")\n\tm.Run()\n\tprintln(\"do stuff after all tests\")\n}", "func RunTest(ctx context.Context, target, location string, nodeIDs []int, limit int, debug, outputJSON bool, runTest runFunc, runOutput runOutputFunc) error {\n\trunReq := &perfops.RunRequest{\n\t\tTarget: target,\n\t\tLocation: location,\n\t\tNodes: nodeIDs,\n\t\tLimit: limit,\n\t}\n\n\tf := NewFormatter(debug && !outputJSON)\n\tf.StartSpinner()\n\ttestID, err := runTest(ctx, runReq)\n\tf.StopSpinner()\n\tif err != nil {\n\t\treturn err\n\t}\n\tres := &RunOutputResult{}\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-time.After(200 * time.Millisecond):\n\t\t\t}\n\t\t\toutput, err := runOutput(ctx, testID)\n\t\t\tres.SetOutput(output, err)\n\t\t\tif err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}()\n\n\tif outputJSON {\n\t\tf.StartSpinner()\n\t}\n\tvar o *perfops.RunOutput\n\tfor {\n\t\tselect {\n\t\tcase <-time.After(50 * time.Millisecond):\n\t\t}\n\t\tif o, err = res.Output(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !outputJSON && o != nil {\n\t\t\tPrintOutput(f, o)\n\t\t}\n\t\tif o != nil && o.IsFinished() {\n\t\t\tbreak\n\t\t}\n\t}\n\tif outputJSON {\n\t\tf.StopSpinner()\n\t\tPrintOutputJSON(o)\n\t}\n\treturn nil\n}", "func (runner *suiteRunner) run() *Result {\n if runner.tracker.result.RunError == nil && len(runner.tests) > 0 {\n runner.tracker.start()\n if runner.checkFixtureArgs() {\n if runner.runFixture(runner.setUpSuite) {\n for i := 0; i != len(runner.tests); i++ {\n c := runner.runTest(runner.tests[i])\n if c.status == fixturePanickedSt {\n runner.missTests(runner.tests[i+1:])\n break\n }\n }\n } else {\n runner.missTests(runner.tests)\n }\n runner.runFixture(runner.tearDownSuite)\n } else {\n runner.missTests(runner.tests)\n }\n runner.tracker.waitAndStop()\n runner.tempDir.removeAll()\n }\n return &runner.tracker.result\n}", "func RunTest(t *testing.T, dir string, opts ...TestOptionsFunc) {\n\ttest := Test{}\n\t// Apply common defaults.\n\ttest.ProjectName = filepath.Base(dir)\n\ttest.Options.Compile = nil\n\ttest.Options.FilterName = \"name\"\n\ttest.RunOptions = &integration.ProgramTestOptions{\n\t\tDir: dir,\n\t\tExpectRefreshChanges: true,\n\t}\n\tfor _, opt := range opts {\n\t\topt(t, &test)\n\t}\n\n\ttest.Run(t)\n}", "func TestMain(m *testing.M) {\n\tos.Exit(runTest(m))\n}", "func Test1IsATest(t *testing.T) {\n}", "func (scenTest *GetStartedFunctionsScenarioTest) RunSubTest(stubber *testtools.AwsmStubber) {\n\tmockQuestioner := demotools.MockQuestioner{Answers: scenTest.Answers}\n\tscenario := NewGetStartedFunctionsScenario(*stubber.SdkConfig, &mockQuestioner, &scenTest.helper)\n\tscenario.isTestRun = true\n\tscenario.Run()\n}", "func Test() error {\n\treturn sh.RunWith(map[string]string{\"GORACE\": \"halt_on_error=1\"},\n\t\t\"go\", \"test\", \"-race\", \"-v\", \"./...\")\n}", "func (o *Options) Run() error {\n\terr := o.Validate()\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"failed to validate\")\n\t}\n\n\ttest := &v1alpha1.TestRun{}\n\terr = o.PopulateTest(test)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"failed to populate the TestRun resource\")\n\t}\n\n\to.TestRun, err = o.TestClient.JxtestV1alpha1().TestRuns(o.Namespace).Create(test)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"failed to create the TestRun CRD\")\n\t}\n\treturn nil\n}", "func (ts *TestSuite) RunTests() {\n\n\tif len(ts.Tests) == 0 {\n\t\tout.Printf(\"No tests to run\\n\")\n\t\treturn\n\t}\n\n\tstartTime := time.Now()\n\n\t// setup search\n\ts := search.NewSearch()\n\tsl := search.NewSearchLimits()\n\tsl.MoveTime = ts.Time\n\tsl.Depth = ts.Depth\n\tif sl.MoveTime > 0 {\n\t\tsl.TimeControl = true\n\t}\n\n\tout.Printf(\"Running Test Suite\\n\")\n\tout.Printf(\"==================================================================\\n\")\n\tout.Printf(\"EPD File: %s\\n\", ts.FilePath)\n\tout.Printf(\"SearchTime: %d ms\\n\", ts.Time.Milliseconds())\n\tout.Printf(\"MaxDepth: %d\\n\", ts.Depth)\n\tout.Printf(\"Date: %s\\n\", time.Now().Local())\n\tout.Printf(\"No of tests: %d\\n\", len(ts.Tests))\n\tout.Println()\n\n\t// execute all tests and store results in the\n\t// test instance\n\tfor i, t := range ts.Tests {\n\t\tout.Printf(\"Test %d of %d\\nTest: %s -- Target Result %s\\n\", i+1, len(ts.Tests), t.line, t.targetMoves.StringUci())\n\t\tstartTime2 := time.Now()\n\t\trunSingleTest(s, sl, t)\n\t\telapsedTime := time.Since(startTime2)\n\t\tt.nodes = s.NodesVisited()\n\t\tt.time = s.LastSearchResult().SearchTime\n\t\tt.nps = util.Nps(s.NodesVisited(), s.LastSearchResult().SearchTime)\n\t\tout.Printf(\"Test finished in %d ms with result %s (%s) - nps: %d\\n\\n\",\n\t\t\telapsedTime.Milliseconds(), t.rType.String(), t.actual.StringUci(), t.nps)\n\t}\n\n\t// sum up result for report\n\ttr := &SuiteResult{}\n\tfor _, t := range ts.Tests {\n\t\ttr.Counter++\n\t\tswitch t.rType {\n\t\tcase NotTested:\n\t\t\ttr.NotTestedCounter++\n\t\tcase Skipped:\n\t\t\ttr.SkippedCounter++\n\t\tcase Failed:\n\t\t\ttr.FailedCounter++\n\t\tcase Success:\n\t\t\ttr.SuccessCounter++\n\t\t}\n\t\ttr.Nodes += t.nodes\n\t\ttr.Time += t.time\n\t}\n\tts.LastResult = tr\n\n\telapsed := time.Since(startTime)\n\n\t// print report\n\tout.Printf(\"Results for Test Suite\\n\", ts.FilePath)\n\tout.Printf(\"------------------------------------------------------------------------------------------------------------------------------------\\n\")\n\tout.Printf(\"EPD File: %s\\n\", ts.FilePath)\n\tout.Printf(\"SearchTime: %d ms\\n\", ts.Time.Milliseconds())\n\tout.Printf(\"MaxDepth: %d\\n\", ts.Depth)\n\tout.Printf(\"Date: %s\\n\", time.Now().Local())\n\tout.Printf(\"====================================================================================================================================\\n\")\n\tout.Printf(\" %-4s | %-10s | %-8s | %-8s | %-15s | %s | %s\\n\", \" Nr.\", \"Result\", \"Move\", \"Value\", \"Expected Result\", \"Fen\", \"Id\")\n\tout.Printf(\"====================================================================================================================================\\n\")\n\tfor i, t := range ts.Tests {\n\t\tif t.tType == DM {\n\t\t\tout.Printf(\" %-4d | %-10s | %-8s | %-8s | %s%-15d | %s | %s\\n\",\n\t\t\t\ti+1, t.rType.String(), t.actual.StringUci(), t.value.String(), \"dm \", t.mateDepth, t.fen, t.id)\n\t\t} else {\n\t\t\tout.Printf(\" %-4d | %-10s | %-8s | %-8s | %s %-15s | %s | %s\\n\",\n\t\t\t\ti+1, t.rType.String(), t.actual.StringUci(), t.value.String(), t.tType.String(), t.targetMoves.StringUci(), t.fen, t.id)\n\t\t}\n\t}\n\tout.Printf(\"====================================================================================================================================\\n\")\n\tout.Printf(\"Summary:\\n\")\n\tout.Printf(\"EPD File: %s\\n\", ts.FilePath)\n\tout.Printf(\"SearchTime: %d ms\\n\", ts.Time.Milliseconds())\n\tout.Printf(\"MaxDepth: %d\\n\", ts.Depth)\n\tout.Printf(\"Date: %s\\n\", time.Now().Local())\n\tout.Printf(\"Successful: %-3d (%d %%)\\n\", tr.SuccessCounter, 100*tr.SuccessCounter/tr.Counter)\n\tout.Printf(\"Failed: %-3d (%d %%)\\n\", tr.FailedCounter, 100*tr.FailedCounter/tr.Counter)\n\tout.Printf(\"Skipped: %-3d (%d %%)\\n\", tr.SkippedCounter, 100*tr.SkippedCounter/tr.Counter)\n\tout.Printf(\"Not tested: %-3d (%d %%)\\n\", tr.NotTestedCounter, 100*tr.NotTestedCounter/tr.Counter)\n\tout.Printf(\"Test time: %s\\n\", elapsed)\n\tout.Printf(\"Configuration: %s\\n\", config.Settings.String())\n}", "func TestOne(t *testing.T) {\n\ttest(t, 42)\n}", "func (t *Test) Run(fn func(ctx TestContext)) {\n\tstart := time.Now()\n\n\tscopes.CI.Infof(\"=== BEGIN: Test: '%s[%s]' ===\", rt.SuiteContext().Settings().TestID, t.t.Name())\n\tdefer func() {\n\t\tend := time.Now()\n\t\tscopes.CI.Infof(\"=== DONE: Test: '%s[%s] (%v)' ===\", rt.SuiteContext().Settings().TestID, t.t.Name(), end.Sub(start))\n\t}()\n\n\tctx := NewContext(t.t, t.labels...)\n\tdefer ctx.Done(t.t)\n\tfn(ctx)\n}", "func TestMain(t *testing.T) {\n}", "func (t *Tester) Test() error {\n\tif err := t.pretestSetup(); err != nil {\n\t\treturn err\n\t}\n\n\te2eTestArgs := []string{\n\t\t\"--host=\" + t.host,\n\t\t\"--provider=\" + t.provider,\n\t\t\"--kubeconfig=\" + t.kubeconfigPath,\n\t\t\"--ginkgo.flakeAttempts=\" + t.flakeAttempts,\n\t\t\"--ginkgo.skip=\" + t.skipRegex,\n\t\t\"--ginkgo.focus=\" + t.focusRegex,\n\t}\n\tginkgoArgs := append([]string{\n\t\t\"--nodes=\" + t.parallel,\n\t\te2eTestPath,\n\t\t\"--\"}, e2eTestArgs...)\n\n\tlog.Printf(\"Running ginkgo test as %s %+v\", binary, ginkgoArgs)\n\tcmd := exec.Command(binary, ginkgoArgs...)\n\texec.InheritOutput(cmd)\n\treturn cmd.Run()\n}", "func TestMain(m *testing.M) {\n\n\tos.Exit(m.Run())\n}", "func (t *Test) Run(tc *TestSuite) error {\n\n\tmqutil.Logger.Print(\"\\n--- \" + t.Name)\n\tfmt.Printf(\"\\nRunning test case: %s\\n\", t.Name)\n\terr := t.ResolveParameters(tc)\n\tif err != nil {\n\t\tfmt.Printf(\"... Fail\\n... %s\\n\", err.Error())\n\t\treturn err\n\t}\n\n\treq := resty.R()\n\tif len(tc.ApiToken) > 0 {\n\t\treq.SetAuthToken(tc.ApiToken)\n\t} else if len(tc.Username) > 0 {\n\t\treq.SetBasicAuth(tc.Username, tc.Password)\n\t}\n\n\tpath := GetBaseURL(t.db.Swagger) + t.SetRequestParameters(req)\n\tvar resp *resty.Response\n\n\tt.startTime = time.Now()\n\tswitch t.Method {\n\tcase mqswag.MethodGet:\n\t\tresp, err = req.Get(path)\n\tcase mqswag.MethodPost:\n\t\tresp, err = req.Post(path)\n\tcase mqswag.MethodPut:\n\t\tresp, err = req.Put(path)\n\tcase mqswag.MethodDelete:\n\t\tresp, err = req.Delete(path)\n\tcase mqswag.MethodPatch:\n\t\tresp, err = req.Patch(path)\n\tcase mqswag.MethodHead:\n\t\tresp, err = req.Head(path)\n\tcase mqswag.MethodOptions:\n\t\tresp, err = req.Options(path)\n\tdefault:\n\t\treturn mqutil.NewError(mqutil.ErrInvalid, fmt.Sprintf(\"Unknown method in test %s: %v\", t.Name, t.Method))\n\t}\n\tt.stopTime = time.Now()\n\tfmt.Printf(\"... call completed: %f seconds\\n\", t.stopTime.Sub(t.startTime).Seconds())\n\n\tif err != nil {\n\t\tt.err = mqutil.NewError(mqutil.ErrHttp, err.Error())\n\t} else {\n\t\tmqutil.Logger.Print(resp.Status())\n\t\tmqutil.Logger.Println(string(resp.Body()))\n\t}\n\terr = t.ProcessResult(resp)\n\treturn err\n}", "func (t *Test) Run(ctx context.Context, opts ...TestOption) (*TestResult, error) {\n\tparsedOpts := &testOptions{\n\t\tvars: &starlark.Dict{},\n\t}\n\tfor _, opt := range opts {\n\t\topt.applyTest(parsedOpts)\n\t}\n\n\tthread := &starlark.Thread{\n\t\tPrint: skyPrint,\n\t}\n\tthread.SetLocal(\"context\", ctx)\n\n\tassertModule := assertmodule.AssertModule()\n\ttestCtx := &starlarkstruct.Module{\n\t\tName: \"skycfg_test_ctx\",\n\t\tMembers: starlark.StringDict(map[string]starlark.Value{\n\t\t\t\"vars\": parsedOpts.vars,\n\t\t\t\"assert\": assertModule,\n\t\t}),\n\t}\n\targs := starlark.Tuple([]starlark.Value{testCtx})\n\n\tresult := TestResult{\n\t\tTestName: t.Name(),\n\t}\n\n\tstartTime := time.Now()\n\t_, err := starlark.Call(thread, t.callable, args, nil)\n\tresult.Duration = time.Since(startTime)\n\tif err != nil {\n\t\t// if there is no assertion error, there was something wrong with the execution itself\n\t\tif len(assertModule.Failures) == 0 {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// there should only be one failure, because each test run gets its own *TestContext\n\t\t// and each assertion failure halts execution.\n\t\tif len(assertModule.Failures) > 1 {\n\t\t\tpanic(\"A test run should only have one assertion failure. Something went wrong with the test infrastructure.\")\n\t\t}\n\t\tresult.Failure = assertModule.Failures[0]\n\t}\n\n\treturn &result, nil\n}", "func TestMain(m *testing.M) {\n\tos.Exit(m.Run())\n}", "func TestMain(m *testing.M) {\n\tos.Exit(m.Run())\n}", "func TestMain(m *testing.M) {\n\tos.Exit(m.Run())\n}", "func (s *IntegrationSuite) TestRun(c *C) {\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\t// Execute e2e workflow\n\tlog.Info().Print(\"Running e2e integration test.\", field.M{\"app\": s.name, \"testName\": c.TestName()})\n\n\t// Check config\n\terr := s.app.Init(ctx)\n\tif err != nil {\n\t\tlog.Info().Print(\"Skipping integration test.\", field.M{\"app\": s.name, \"reason\": err.Error()})\n\t\ts.skip = true\n\t\tc.Skip(err.Error())\n\t}\n\n\t// Create namespace\n\terr = createNamespace(s.cli, s.namespace)\n\tc.Assert(err, IsNil)\n\n\t// Create profile\n\tif s.profile == nil {\n\t\tlog.Info().Print(\"Skipping integration test. Could not create profile. Please check if required credentials are set.\", field.M{\"app\": s.name})\n\t\ts.skip = true\n\t\tc.Skip(\"Could not create a Profile\")\n\t}\n\tprofileName := s.createProfile(c, ctx)\n\n\t// Install db\n\terr = s.app.Install(ctx, s.namespace)\n\tc.Assert(err, IsNil)\n\n\t// Check if ready\n\tok, err := s.app.IsReady(ctx)\n\tc.Assert(err, IsNil)\n\tc.Assert(ok, Equals, true)\n\n\t// Create blueprint\n\tbp := s.bp.Blueprint()\n\tc.Assert(bp, NotNil)\n\t_, err = s.crCli.Blueprints(kontroller.namespace).Create(ctx, bp, metav1.CreateOptions{})\n\tc.Assert(err, IsNil)\n\n\tvar configMaps, secrets map[string]crv1alpha1.ObjectReference\n\ttestEntries := 3\n\t// Add test entries to DB\n\tif a, ok := s.app.(app.DatabaseApp); ok {\n\t\t// wait for application to be actually ready\n\t\terr = pingAppAndWait(ctx, a)\n\t\tc.Assert(err, IsNil)\n\n\t\terr = a.Reset(ctx)\n\t\tc.Assert(err, IsNil)\n\n\t\terr = a.Initialize(ctx)\n\t\tc.Assert(err, IsNil)\n\n\t\t// Add few entries\n\t\tfor i := 0; i < testEntries; i++ {\n\t\t\tc.Assert(a.Insert(ctx), IsNil)\n\t\t}\n\n\t\tcount, err := a.Count(ctx)\n\t\tc.Assert(err, IsNil)\n\t\tc.Assert(count, Equals, testEntries)\n\t}\n\n\t// Get Secret and ConfigMap object references\n\tif a, ok := s.app.(app.ConfigApp); ok {\n\t\tconfigMaps = a.ConfigMaps()\n\t\tsecrets = a.Secrets()\n\t}\n\n\t// Validate Blueprint\n\tvalidateBlueprint(c, *bp, configMaps, secrets)\n\n\t// Create ActionSet specs\n\tas := newActionSet(bp.GetName(), profileName, kontroller.namespace, s.app.Object(), configMaps, secrets)\n\t// Take backup\n\tbackup := s.createActionset(ctx, c, as, \"backup\", nil)\n\tc.Assert(len(backup), Not(Equals), 0)\n\n\t// Save timestamp for PITR\n\tvar restoreOptions map[string]string\n\tif b, ok := s.bp.(app.PITRBlueprinter); ok {\n\t\tpitr := b.FormatPITR(time.Now())\n\t\tlog.Info().Print(\"Saving timestamp for PITR\", field.M{\"pitr\": pitr})\n\t\trestoreOptions = map[string]string{\n\t\t\t\"pitr\": pitr,\n\t\t}\n\t\t// Add few more entries with timestamp > pitr\n\t\ttime.Sleep(time.Second)\n\t\tif a, ok := s.app.(app.DatabaseApp); ok {\n\t\t\tc.Assert(a.Insert(ctx), IsNil)\n\t\t\tc.Assert(a.Insert(ctx), IsNil)\n\n\t\t\tcount, err := a.Count(ctx)\n\t\t\tc.Assert(err, IsNil)\n\t\t\tc.Assert(count, Equals, testEntries+2)\n\t\t}\n\t}\n\n\t// Reset DB\n\tif a, ok := s.app.(app.DatabaseApp); ok {\n\t\terr = a.Reset(ctx)\n\t\tc.Assert(err, IsNil)\n\t}\n\n\t// Restore backup\n\tpas, err := s.crCli.ActionSets(kontroller.namespace).Get(ctx, backup, metav1.GetOptions{})\n\tc.Assert(err, IsNil)\n\ts.createActionset(ctx, c, pas, \"restore\", restoreOptions)\n\n\t// Verify data\n\tif a, ok := s.app.(app.DatabaseApp); ok {\n\t\t// wait for application to be actually ready\n\t\terr = pingAppAndWait(ctx, a)\n\t\tc.Assert(err, IsNil)\n\n\t\tcount, err := a.Count(ctx)\n\t\tc.Assert(err, IsNil)\n\t\tc.Assert(count, Equals, testEntries)\n\t}\n\n\t// Delete snapshots\n\ts.createActionset(ctx, c, pas, \"delete\", nil)\n}", "func runTest(ctx context.Context, c autotest.Config, a *autotest.AutoservArgs, w io.Writer) (*Result, error) {\n\tr, err := runTask(ctx, c, a, w)\n\tif !r.Started {\n\t\treturn r, err\n\t}\n\tp := filepath.Join(a.ResultsDir, autoservPidFile)\n\tif i, err2 := readTestsFailed(p); err2 != nil {\n\t\tif err == nil {\n\t\t\terr = err2\n\t\t}\n\t} else {\n\t\tr.TestsFailed = i\n\t}\n\tif err2 := appendJobFinished(a.ResultsDir); err == nil {\n\t\terr = err2\n\t}\n\treturn r, err\n}", "func TestMain(t *testing.T) {\n\tvar ran bool\n\trun = func() {\n\t\tran = true\n\t}\n\tmain()\n\tif !ran {\n\t\tt.Error(\"Expected Run() to be called, but it wasn't\")\n\t}\n}", "func TestMain(t *testing.T) {\n\tvar ran bool\n\trun = func() {\n\t\tran = true\n\t}\n\tmain()\n\tif !ran {\n\t\tt.Error(\"Expected Run() to be called, but it wasn't\")\n\t}\n}", "func (controller TestController) RunTest() (setupErr error, testErr error) {\n\ttests := controller.testSuite.GetTests()\n\tlogrus.Debugf(\"Test configs: %v\", tests)\n\ttest, found := tests[controller.testName]\n\tif !found {\n\t\treturn stacktrace.NewError(\"Nonexistent test: %v\", controller.testName), nil\n\t}\n\n\tnetworkLoader, err := test.GetNetworkLoader()\n\tif err != nil {\n\t\treturn stacktrace.Propagate(err, \"Could not get network loader\"), nil\n\t}\n\n\tlogrus.Info(\"Connecting to Docker environment...\")\n\t// Initialize a Docker client\n\tdockerClient, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())\n\tif err != nil {\n\t\treturn stacktrace.Propagate(err,\"Failed to initialize Docker client from environment.\"), nil\n\t}\n\tdockerManager, err := docker.NewDockerManager(logrus.StandardLogger(), dockerClient)\n\tif err != nil {\n\t\treturn stacktrace.Propagate(err, \"An error occurred when constructing the Docker manager\"), nil\n\t}\n\tlogrus.Info(\"Connected to Docker environment\")\n\n\tlogrus.Infof(\"Configuring test network in Docker network %v...\", controller.networkId)\n\talreadyTakenIps := map[string]bool{\n\t\tcontroller.gatewayIp: true,\n\t\tcontroller.testControllerIp: true,\n\t}\n\tfreeIpTracker, err := networks.NewFreeIpAddrTracker(logrus.StandardLogger(), controller.subnetMask, alreadyTakenIps)\n\tif err != nil {\n\t\treturn stacktrace.Propagate(err, \"An error occurred creating the free IP address tracker\"), nil\n\t}\n\n\tbuilder := networks.NewServiceNetworkBuilder(\n\t\t\tdockerManager,\n\t\t\tcontroller.networkId,\n\t\t\tfreeIpTracker,\n\t\t\tcontroller.testVolumeName,\n\t\t\tcontroller.testVolumeFilepath)\n\tif err := networkLoader.ConfigureNetwork(builder); err != nil {\n\t\treturn stacktrace.Propagate(err, \"Could not configure test network in Docker network %v\", controller.networkId), nil\n\t}\n\tnetwork := builder.Build()\n\tdefer func() {\n\t\tlogrus.Info(\"Stopping test network...\")\n\t\terr := network.RemoveAll(CONTAINER_STOP_TIMEOUT)\n\t\tif err != nil {\n\t\t\tlogrus.Error(\"An error occurred stopping the network\")\n\t\t\tfmt.Fprintln(logrus.StandardLogger().Out, err)\n\t\t} else {\n\t\t\tlogrus.Info(\"Successfully stopped the test network\")\n\t\t}\n\t}()\n\tlogrus.Info(\"Test network configured\")\n\n\tlogrus.Info(\"Initializing test network...\")\n\tavailabilityCheckers, err := networkLoader.InitializeNetwork(network);\n\tif err != nil {\n\t\treturn stacktrace.Propagate(err, \"An error occurred initialized the network to its starting state\"), nil\n\t}\n\tlogrus.Info(\"Test network initialized\")\n\n\t// Second pass: wait for all services to come up\n\tlogrus.Info(\"Waiting for test network to become available...\")\n\tfor serviceId, availabilityChecker := range availabilityCheckers {\n\t\tlogrus.Debugf(\"Waiting for service %v to become available...\", serviceId)\n\t\tif err := availabilityChecker.WaitForStartup(); err != nil {\n\t\t\treturn stacktrace.Propagate(err, \"An error occurred waiting for service with ID %v to start up\", serviceId), nil\n\t\t}\n\t\tlogrus.Debugf(\"Service %v is available\", serviceId)\n\t}\n\tlogrus.Info(\"Test network is available\")\n\n\tlogrus.Info(\"Executing test...\")\n\tuntypedNetwork, err := networkLoader.WrapNetwork(network)\n\tif err != nil {\n\t\treturn stacktrace.Propagate(err, \"Error occurred wrapping network in user-defined network type\"), nil\n\t}\n\n\ttestResultChan := make(chan error)\n\n\tgo func() {\n\t\ttestResultChan <- runTest(test, untypedNetwork)\n\t}()\n\n\t// Time out the test so a poorly-written test doesn't run forever\n\ttestTimeout := test.GetExecutionTimeout()\n\tvar timedOut bool\n\tvar testResultErr error\n\tselect {\n\tcase testResultErr = <- testResultChan:\n\t\tlogrus.Tracef(\"Test returned result before timeout: %v\", testResultErr)\n\t\ttimedOut = false\n\tcase <- time.After(testTimeout):\n\t\tlogrus.Tracef(\"Hit timeout %v before getting a result from the test\", testTimeout)\n\t\ttimedOut = true\n\t}\n\n\tlogrus.Tracef(\"After running test w/timeout: resultErr: %v, timedOut: %v\", testResultErr, timedOut)\n\n\tif timedOut {\n\t\treturn nil, stacktrace.NewError(\"Timed out after %v waiting for test to complete\", testTimeout)\n\t}\n\n\tlogrus.Info(\"Test execution completed\")\n\n\tif testResultErr != nil {\n\t\treturn nil, stacktrace.Propagate(testResultErr, \"An error occurred when running the test\")\n\t}\n\n\treturn nil, nil\n}", "func TestMain(m *testing.M) {\n\tsetup()\n\tcode := m.Run() \n os.Exit(code)\n}", "func (r *runner) UpdateTestRun(ctrl controller.Interface, testRun *v1alpha1.TestRun) error {\n\n\tif testRun.Status.Status == v1alpha1.TestRunComplete {\n\t\tlog.Printf(\" | '%v/%v' is already Complete - Skipping\", testRun.Namespace, testRun.Name)\n\t\treturn nil\n\t}\n\n\tif testRun.Status.Status == \"\" {\n\t\terr := initializeStatus(ctrl, testRun)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tstats := runStats{0, 0}\n\tfor _, record := range testRun.Status.Records {\n\t\tif record.EndTime != nil {\n\t\t\tstats.CompletedCount++\n\t\t\tif record.Result != string(corev1.PodSucceeded) {\n\t\t\t\tstats.FailCount++\n\t\t\t}\n\t\t}\n\t}\n\tif stats.CompletedCount == len(testRun.Status.Records) {\n\t\treturn testRunComplete(ctrl, testRun, stats)\n\t}\n\n\tlog.Printf(\"Running '%v/%v'\", testRun.Namespace, testRun.Name)\n\n\tlog.Printf(\" | %v/%v\", testRun.Namespace, testRun.Name)\n\n\ttests, err := getTestsForTestRun(ctrl, testRun)\n\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error getting list of tests: %s\", err.Error())\n\t}\n\tlog.Printf(\" | Test Count: %v\", len(tests))\n\n\tJobsSlots := getJobSlots(testRun)\n\n\treturn runNextNTests(ctrl, testRun, tests, JobsSlots)\n}", "func TestMain(m *testing.M) {\n\tcode := m.Run()\n\tos.Exit(code)\n}", "func TestMain(m *testing.M) {\n\tcode := m.Run()\n\tos.Exit(code)\n}", "func TestSimpleRun(t *testing.T) {\n\tctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)\n\tdefer cancel()\n\n\tns := \"sonobuoy-\" + strings.ToLower(t.Name())\n\tdefer cleanup(t, ns)\n\n\targs := fmt.Sprintf(\"run --image-pull-policy IfNotPresent --wait -p testImage/yaml/job-junit-passing-singlefile.yaml -n %v\", ns)\n\terr, _, stderr := runSonobuoyCommandWithContext(ctx, t, args)\n\n\tif err != nil {\n\t\tt.Errorf(\"Sonobuoy exited with an error: %q\\n\", err)\n\t\tt.Log(stderr.String())\n\t}\n}", "func TestMain(m *testing.M) {\n\tgin.SetMode(gin.TestMode)\n\tos.Exit(m.Run()) // run the tests, then exit\n}", "func (r FakeTestRunner) RunTest(ctx context.Context, test v1alpha3.TestConfiguration, podSec bool) (result *v1alpha3.TestStatus, err error) {\n\tselect {\n\tcase <-time.After(r.Sleep):\n\t\treturn r.TestStatus, r.Error\n\tcase <-ctx.Done():\n\t\treturn nil, ctx.Err()\n\t}\n}", "func (c *TestController) RunTest(id TestID) error {\n\n\t// Check if the test is already running.\n\tif job := c.lookupJob(id); job != nil {\n\t\treturn fmt.Errorf(\"test %s already running\", job.Name)\n\t}\n\n\t// First we need determine the runtime configuration.\n\t//\n\t// If we find multiple configurations, we don't ask the user to choose,\n\t// but simply use the last candidate. The assumption is: The latest\n\t// configuration has a higher probability to be the complete one.\n\tcandidates := c.suites.Owners(protocol.DocumentURI(id.URI))\n\tif len(candidates) == 0 {\n\t\treturn fmt.Errorf(\"cannot run %s: no configuration found\", id.URI)\n\t}\n\tif len(candidates) > 1 {\n\t\tlog.Printf(\"multiple configurations found for %s: %v\\n\", id.URI, candidates)\n\t}\n\tconfig := candidates[0].Config\n\tlog.Printf(\"using configuration from %s\\n\", config.Root)\n\n\t// TODO(5nord): Use project.ApplyPreset to retrieve the configuration,\n\t// like expected verdict for the job.\n\tjob := &control.Job{\n\t\tName: id.Name,\n\t\tConfig: config,\n\t}\n\n\tc.mu.Lock()\n\tc.running[id] = job\n\tc.mu.Unlock()\n\n\tc.jobs <- job\n\n\treturn nil\n}", "func Test() error {\n\treturn sh.RunV(\"go\", \"test\", \"-v\", \"-cover\", \"./...\", \"-coverprofile=coverage.out\")\n}", "func (runner *suiteRunner) runTest(method *reflect.FuncValue) *C {\n c := runner.forkTest(method)\n <-c.done\n return c\n}", "func TestMain(m *testing.M) {\n\tflag.Parse()\n\t// if *databaseTest {\n\tif isIntegrationTest() {\n\t\tsetupTestDB()\n\t}\n\texitCode := m.Run()\n\tos.Exit(exitCode)\n}", "func runTest(test TestCase) TestResult {\n\t// cut = command under test\n\tcut := cmd.NewCommand(test.Command.Cmd)\n\tcut.SetTimeout(test.Command.Timeout)\n\tcut.Dir = test.Command.Dir\n\tfor k, v := range test.Command.Env {\n\t\tcut.AddEnv(k, v)\n\t}\n\n\tif err := cut.Execute(); err != nil {\n\t\tlog.Println(test.Title, \" failed \", err.Error())\n\t\ttest.Result = CommandResult{\n\t\t\tError: err,\n\t\t}\n\n\t\treturn TestResult{\n\t\t\tTestCase: test,\n\t\t}\n\t}\n\n\tlog.Println(\"title: '\"+test.Title+\"'\", \" Command: \", cut.Cmd)\n\tlog.Println(\"title: '\"+test.Title+\"'\", \" Directory: \", cut.Dir)\n\tlog.Println(\"title: '\"+test.Title+\"'\", \" Env: \", cut.Env)\n\n\t// Write test result\n\ttest.Result = CommandResult{\n\t\tExitCode: cut.ExitCode(),\n\t\tStdout: strings.Replace(cut.Stdout(), \"\\r\\n\", \"\\n\", -1),\n\t\tStderr: strings.Replace(cut.Stderr(), \"\\r\\n\", \"\\n\", -1),\n\t}\n\n\tlog.Println(\"title: '\"+test.Title+\"'\", \" ExitCode: \", test.Result.ExitCode)\n\tlog.Println(\"title: '\"+test.Title+\"'\", \" Stdout: \", test.Result.Stdout)\n\tlog.Println(\"title: '\"+test.Title+\"'\", \" Stderr: \", test.Result.Stderr)\n\n\treturn Validate(test)\n}", "func runScenarioTest(assert *asserts.Asserts, param scenarioParam) {\n\tapplog.Infof(param.String())\n\tmonitoring.Reset()\n\n\t// Prepare test.\n\tdone := make(chan bool)\n\tprovider := config.NewMapConfigurationProvider()\n\tconfig := config.New(provider)\n\n\tconfig.Set(\"backend\", \"single\")\n\n\tassert.Nil(ebus.Init(config), \"single node backend started\")\n\n\tStartShopAgent()\n\tStartWarehouseAgent()\n\tStartManufacturerAgent()\n\tStartDeliveryAgent()\n\tStartWaitAgent(done)\n\n\t// Run orders.\n\tfor on := 0; on < param.Orders; on++ {\n\t\torder := generateOrder(on)\n\t\terr := ebus.Emit(order, \"OrderReceived\")\n\t\tassert.Nil(err, \"order emitted\")\n\t}\n\n\tselect {\n\tcase <-done:\n\t\tapplog.Infof(\"order processing done\")\n\tcase <-time.After(param.Timeout):\n\t\tassert.Fail(\"timeout during wait for processed orders\")\n\t}\n\n\t// Finalize test.\n\terr := ebus.Stop()\n\tassert.Nil(err, \"stopped the bus\")\n\ttime.Sleep(time.Second)\n\tmonitoring.MeasuringPointsPrintAll()\n}", "func (Tests) Run(ctx context.Context) error {\n\targ := BuildDockerComposeArgs(ProjectName, ProjectType, \"test\", DockerComposeTestFile)\n\targ = append(arg, \"run\")\n\targ = append(arg,\n\t\t\"--rm\",\n\t\t\"--use-aliases\",\n\t)\n\targ = append(arg, \"app\", \"go\", \"test\", \"-mod=vendor\", \"-v\", \"-cover\")\n\tif err := Exec(ComposeBin, append(arg, \"./service\")...); err != nil {\n\t\treturn err\n\t}\n\tif err := Exec(ComposeBin, append(arg, \"./...\")...); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (s Suite) Run(t *testing.T) bool {\n\tt.Helper()\n\treturn s(\"\", nil, func(c *config) { c.t = t })\n}", "func Test(t *testing.T, command Runner, testCases []Case) {\n\tt.Helper()\n\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.Name, func(t *testing.T) {\n\t\t\tt.Helper() // TODO: make Helper working for subtests: issue #24128\n\n\t\t\tstdout := &bytes.Buffer{}\n\t\t\tstderr := &bytes.Buffer{}\n\n\t\t\tcommand.SetStdout(stdout)\n\t\t\tcommand.SetStderr(stderr)\n\n\t\t\tm := newMatch(t, tc.wantFail)\n\n\t\t\tif tc.WantFile != \"\" {\n\t\t\t\tif !m.removeFile(tc.WantFile) {\n\t\t\t\t\ttc.WantFile = \"\" // stop testing File match\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tvar gotErr string\n\t\t\tgotPanic := m.run(func() {\n\t\t\t\tif err := command.Run(tc.Args); err != nil {\n\t\t\t\t\tgotErr = err.Error()\n\t\t\t\t}\n\t\t\t})\n\n\t\t\tif tc.WantFile != \"\" {\n\t\t\t\tif gotFile, ext, ok := m.getFile(tc.WantFile); ok {\n\t\t\t\t\tm.match(\"File golden\"+ext, gotFile, \"golden\"+ext)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tm.match(\"WantStdout\", stdout.String(), tc.WantStdout)\n\t\t\tm.match(\"WantStderr\", stderr.String(), tc.WantStderr)\n\t\t\tm.match(\"WantPanic\", gotPanic, tc.WantPanic)\n\t\t\tm.match(\"WantErr\", gotErr, tc.WantErr)\n\t\t\tm.equal(\"WantExitCode\", command.ExitCode(), tc.WantExitCode)\n\n\t\t\tm.done()\n\t\t})\n\t}\n}", "func TestRunTestAllReal(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"Skipping test in short mode.\")\n\t}\n\n\ttaskData := agent.TaskData{\n\t\tStringValues: map[string]string{\n\t\t\tCFG_TEST_TYPE: CFG_TYPE_ALL,\n\t\t\tCFG_SERVER_HOST: \"speedtest.nyc.rr.com:8080\",\n\t\t\tCFG_SERVER_ID: \"16976\",\n\t\t},\n\t\tIntValues: map[string]int{\n\t\t\tCFG_SERVER_ID: 16976,\n\t\t\tCFG_TIME_OUT: 5,\n\t\t},\n\t\tFloatValues: map[string]float64{CFG_MAX_SECONDS: 6},\n\t\tIntSlices: map[string][]int{\n\t\t\tCFG_DOWNLOAD_SIZES: {245388, 505544},\n\t\t\tCFG_UPLOAD_SIZES: {32768, 65536},\n\t\t},\n\t}\n\n\tspdTestRunner := SpeedTestRunner{}\n\n\tspTestResults, err := spdTestRunner.Run(taskData)\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected Error: \\n%s\", err.Error())\n\t}\n\n\tresults := spTestResults.Latency.Seconds()\n\n\tif results <= 0 {\n\t\tt.Errorf(\"Error: Expected a positive Latency result, but got: %f\", results)\n\t} else {\n\t\tfmt.Printf(\"\\nLatency test results for server %d ... %f\\n\", taskData.IntValues[CFG_SERVER_ID], results)\n\t}\n\n\tresults = spTestResults.Download\n\tif results <= 0 {\n\t\tt.Errorf(\"Error: Expected a positive Download result, but got: %f\", results)\n\t} else {\n\t\tfmt.Printf(\"\\nDownload test results for server %d ... %f\\n\", taskData.IntValues[CFG_SERVER_ID], results)\n\t}\n\n\tresults = spTestResults.Upload\n\tif results <= 0 {\n\t\tt.Errorf(\"Error: Expected a positive Upload result, but got: %f\", results)\n\t} else {\n\t\tfmt.Printf(\"\\nUpload test results for server %d ... %f\\n\", taskData.IntValues[CFG_SERVER_ID], results)\n\t}\n}", "func TestExecute(t *testing.T) {\n\tctx := context.Background()\n\n\t// Clear pre-existing golden files to avoid leaving stale ones around.\n\tif *updateGoldens {\n\t\tfiles, err := filepath.Glob(filepath.Join(*goldensDir, \"*.golden.json\"))\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tfor _, f := range files {\n\t\t\tif err := os.Remove(f); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t}\n\t}\n\n\ttestCases := []struct {\n\t\tname string\n\t\tflags testsharderFlags\n\t\ttestSpecs []build.TestSpec\n\t\ttestDurations []build.TestDuration\n\t\ttestList []build.TestListEntry\n\t\tmodifiers []testsharder.TestModifier\n\t\tpackageRepos []build.PackageRepo\n\t\taffectedTests []string\n\t}{\n\t\t{\n\t\t\tname: \"no tests\",\n\t\t},\n\t\t{\n\t\t\tname: \"mixed device types\",\n\t\t\ttestSpecs: []build.TestSpec{\n\t\t\t\tfuchsiaTestSpec(\"foo\"),\n\t\t\t\thostTestSpec(\"bar\"),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"multiply\",\n\t\t\tflags: testsharderFlags{\n\t\t\t\ttargetDurationSecs: 5,\n\t\t\t},\n\t\t\ttestSpecs: []build.TestSpec{\n\t\t\t\tfuchsiaTestSpec(\"foo\"),\n\t\t\t\tfuchsiaTestSpec(\"bar\"),\n\t\t\t},\n\t\t\tmodifiers: []testsharder.TestModifier{\n\t\t\t\t{\n\t\t\t\t\tName: \"foo\",\n\t\t\t\t\tTotalRuns: 50,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName: \"bar\",\n\t\t\t\t},\n\t\t\t},\n\t\t\ttestDurations: []build.TestDuration{\n\t\t\t\t{\n\t\t\t\t\tName: \"*\",\n\t\t\t\t\tMedianDuration: time.Millisecond,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"affected tests\",\n\t\t\ttestSpecs: []build.TestSpec{\n\t\t\t\tfuchsiaTestSpec(\"affected-hermetic\"),\n\t\t\t\tfuchsiaTestSpec(\"not-affected\"),\n\t\t\t},\n\t\t\ttestList: []build.TestListEntry{\n\t\t\t\ttestListEntry(\"affected-hermetic\", true),\n\t\t\t\ttestListEntry(\"not-affected\", false),\n\t\t\t},\n\t\t\taffectedTests: []string{\n\t\t\t\tpackageURL(\"affected-hermetic\"),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"affected nonhermetic tests\",\n\t\t\ttestSpecs: []build.TestSpec{\n\t\t\t\tfuchsiaTestSpec(\"affected-nonhermetic\"),\n\t\t\t\tfuchsiaTestSpec(\"not-affected\"),\n\t\t\t},\n\t\t\taffectedTests: []string{\n\t\t\t\tpackageURL(\"affected-nonhermetic\"),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"target test count\",\n\t\t\tflags: testsharderFlags{\n\t\t\t\ttargetTestCount: 2,\n\t\t\t},\n\t\t\ttestSpecs: []build.TestSpec{\n\t\t\t\tfuchsiaTestSpec(\"foo1\"),\n\t\t\t\tfuchsiaTestSpec(\"foo2\"),\n\t\t\t\tfuchsiaTestSpec(\"foo3\"),\n\t\t\t\tfuchsiaTestSpec(\"foo4\"),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"sharding by time\",\n\t\t\tflags: testsharderFlags{\n\t\t\t\ttargetDurationSecs: int((4 * time.Minute).Seconds()),\n\t\t\t\tperTestTimeoutSecs: int((10 * time.Minute).Seconds()),\n\t\t\t},\n\t\t\ttestSpecs: []build.TestSpec{\n\t\t\t\tfuchsiaTestSpec(\"slow\"),\n\t\t\t\tfuchsiaTestSpec(\"fast1\"),\n\t\t\t\tfuchsiaTestSpec(\"fast2\"),\n\t\t\t\tfuchsiaTestSpec(\"fast3\"),\n\t\t\t},\n\t\t\ttestDurations: []build.TestDuration{\n\t\t\t\t{\n\t\t\t\t\tName: \"*\",\n\t\t\t\t\tMedianDuration: 2 * time.Second,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName: packageURL(\"slow\"),\n\t\t\t\t\tMedianDuration: 5 * time.Minute,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"max shards per env\",\n\t\t\tflags: testsharderFlags{\n\t\t\t\t// Given expected test durations of 4 minutes for each test it's\n\t\t\t\t// impossible to satisfy both the target shard duration and the\n\t\t\t\t// max shards per environment, so the target shard duration\n\t\t\t\t// should effectively be ignored.\n\t\t\t\ttargetDurationSecs: int((5 * time.Minute).Seconds()),\n\t\t\t\tmaxShardsPerEnvironment: 2,\n\t\t\t\tskipUnaffected: true,\n\t\t\t},\n\t\t\ttestSpecs: []build.TestSpec{\n\t\t\t\tfuchsiaTestSpec(\"affected1\"),\n\t\t\t\tfuchsiaTestSpec(\"affected2\"),\n\t\t\t\tfuchsiaTestSpec(\"affected3\"),\n\t\t\t\tfuchsiaTestSpec(\"affected4\"),\n\t\t\t\tfuchsiaTestSpec(\"unaffected1\"),\n\t\t\t\tfuchsiaTestSpec(\"unaffected2\"),\n\t\t\t\tfuchsiaTestSpec(\"nonhermetic1\"),\n\t\t\t\tfuchsiaTestSpec(\"nonhermetic2\"),\n\t\t\t},\n\t\t\ttestDurations: []build.TestDuration{\n\t\t\t\t{\n\t\t\t\t\tName: \"*\",\n\t\t\t\t\tMedianDuration: 4 * time.Minute,\n\t\t\t\t},\n\t\t\t},\n\t\t\taffectedTests: []string{\n\t\t\t\tpackageURL(\"affected1\"),\n\t\t\t\tpackageURL(\"affected2\"),\n\t\t\t\tpackageURL(\"affected3\"),\n\t\t\t\tpackageURL(\"affected4\"),\n\t\t\t},\n\t\t\ttestList: []build.TestListEntry{\n\t\t\t\ttestListEntry(\"affected1\", true),\n\t\t\t\ttestListEntry(\"affected2\", true),\n\t\t\t\ttestListEntry(\"affected3\", true),\n\t\t\t\ttestListEntry(\"affected4\", true),\n\t\t\t\ttestListEntry(\"unaffected1\", true),\n\t\t\t\ttestListEntry(\"unaffected2\", true),\n\t\t\t\ttestListEntry(\"nonhermetic1\", false),\n\t\t\t\ttestListEntry(\"nonhermetic2\", false),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"hermetic deps\",\n\t\t\tflags: testsharderFlags{\n\t\t\t\thermeticDeps: true,\n\t\t\t},\n\t\t\ttestSpecs: []build.TestSpec{\n\t\t\t\tfuchsiaTestSpec(\"foo\"),\n\t\t\t\tfuchsiaTestSpec(\"bar\"),\n\t\t\t\tfuchsiaTestSpec(\"baz\"),\n\t\t\t},\n\t\t\tpackageRepos: []build.PackageRepo{\n\t\t\t\t{\n\t\t\t\t\tPath: \"pkg_repo1\",\n\t\t\t\t\tBlobs: filepath.Join(\"pkg_repo1\", \"blobs\"),\n\t\t\t\t\tTargets: filepath.Join(\"pkg_repo1\", \"targets.json\"),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"ffx deps\",\n\t\t\tflags: testsharderFlags{\n\t\t\t\tffxDeps: true,\n\t\t\t},\n\t\t\ttestSpecs: []build.TestSpec{\n\t\t\t\tfuchsiaTestSpec(\"foo\"),\n\t\t\t\tfuchsiaTestSpec(\"bar\"),\n\t\t\t\tfuchsiaTestSpec(\"baz\"),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"multiply affected test\",\n\t\t\tflags: testsharderFlags{\n\t\t\t\taffectedTestsMultiplyThreshold: 3,\n\t\t\t\ttargetDurationSecs: int(2 * time.Minute.Seconds()),\n\t\t\t},\n\t\t\ttestSpecs: []build.TestSpec{\n\t\t\t\tfuchsiaTestSpec(\"multiplied-affected-test\"),\n\t\t\t\tfuchsiaTestSpec(\"affected-test\"),\n\t\t\t\tfuchsiaTestSpec(\"unaffected-test\"),\n\t\t\t},\n\t\t\ttestDurations: []build.TestDuration{\n\t\t\t\t{\n\t\t\t\t\tName: \"*\",\n\t\t\t\t\tMedianDuration: time.Second,\n\t\t\t\t},\n\t\t\t},\n\t\t\taffectedTests: []string{\n\t\t\t\tpackageURL(\"multiplied-affected-test\"),\n\t\t\t\tpackageURL(\"affected-test\"),\n\t\t\t},\n\t\t\tmodifiers: []testsharder.TestModifier{\n\t\t\t\t{\n\t\t\t\t\tName: \"multiplied-affected-test\",\n\t\t\t\t\tTotalRuns: 100,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"test list with tags\",\n\t\t\ttestSpecs: []build.TestSpec{\n\t\t\t\tfuchsiaTestSpec(\"hermetic-test\"),\n\t\t\t\tfuchsiaTestSpec(\"nonhermetic-test\"),\n\t\t\t},\n\t\t\ttestList: []build.TestListEntry{\n\t\t\t\ttestListEntry(\"hermetic-test\", true),\n\t\t\t\ttestListEntry(\"nonhermetic-test\", false),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"skip unaffected tests\",\n\t\t\tflags: testsharderFlags{\n\t\t\t\tskipUnaffected: true,\n\t\t\t},\n\t\t\ttestSpecs: []build.TestSpec{\n\t\t\t\tfuchsiaTestSpec(\"affected-hermetic-test\"),\n\t\t\t\tfuchsiaTestSpec(\"unaffected-hermetic-test\"),\n\t\t\t\tfuchsiaTestSpec(\"affected-nonhermetic-test\"),\n\t\t\t\tfuchsiaTestSpec(\"unaffected-nonhermetic-test\"),\n\t\t\t},\n\t\t\ttestList: []build.TestListEntry{\n\t\t\t\ttestListEntry(\"affected-hermetic-test\", true),\n\t\t\t\ttestListEntry(\"unaffected-hermetic-test\", true),\n\t\t\t\ttestListEntry(\"affected-nonhermetic-test\", false),\n\t\t\t\ttestListEntry(\"unaffected-nonhermetic-test\", false),\n\t\t\t},\n\t\t\taffectedTests: []string{\n\t\t\t\tfuchsiaTestSpec(\"affected-hermetic-test\").Name,\n\t\t\t\tfuchsiaTestSpec(\"affected-nonhermetic-test\").Name,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"run all tests if no affected tests\",\n\t\t\tflags: testsharderFlags{\n\t\t\t\tskipUnaffected: true,\n\t\t\t},\n\t\t\ttestSpecs: []build.TestSpec{\n\t\t\t\tfuchsiaTestSpec(\"affected-hermetic-test\"),\n\t\t\t\tfuchsiaTestSpec(\"unaffected-hermetic-test\"),\n\t\t\t\tfuchsiaTestSpec(\"affected-nonhermetic-test\"),\n\t\t\t\tfuchsiaTestSpec(\"unaffected-nonhermetic-test\"),\n\t\t\t},\n\t\t\ttestList: []build.TestListEntry{\n\t\t\t\ttestListEntry(\"affected-hermetic-test\", true),\n\t\t\t\ttestListEntry(\"unaffected-hermetic-test\", true),\n\t\t\t\ttestListEntry(\"affected-nonhermetic-test\", false),\n\t\t\t\ttestListEntry(\"unaffected-nonhermetic-test\", false),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"multiply unaffected hermetic tests\",\n\t\t\tflags: testsharderFlags{\n\t\t\t\tskipUnaffected: true,\n\t\t\t},\n\t\t\ttestSpecs: []build.TestSpec{\n\t\t\t\tfuchsiaTestSpec(\"unaffected-hermetic-test\"),\n\t\t\t\tfuchsiaTestSpec(\"affected-nonhermetic-test\"),\n\t\t\t\tfuchsiaTestSpec(\"unaffected-hermetic-multiplied-test\"),\n\t\t\t},\n\t\t\ttestList: []build.TestListEntry{\n\t\t\t\ttestListEntry(\"unaffected-hermetic-test\", true),\n\t\t\t\ttestListEntry(\"affected-nonhermetic-test\", false),\n\t\t\t\ttestListEntry(\"unaffected-hermetic-multiplied-test\", true),\n\t\t\t},\n\t\t\taffectedTests: []string{\n\t\t\t\tfuchsiaTestSpec(\"affected-nonhermetic-test\").Name,\n\t\t\t},\n\t\t\tmodifiers: []testsharder.TestModifier{\n\t\t\t\t{\n\t\t\t\t\tName: \"unaffected-hermetic-multiplied-test\",\n\t\t\t\t\tTotalRuns: 100,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"various modifiers\",\n\t\t\tflags: testsharderFlags{\n\t\t\t\ttargetDurationSecs: 5,\n\t\t\t},\n\t\t\ttestSpecs: []build.TestSpec{\n\t\t\t\tfuchsiaTestSpec(\"foo\"),\n\t\t\t\tfuchsiaTestSpec(\"bar\"),\n\t\t\t\tfuchsiaTestSpec(\"baz\"),\n\t\t\t},\n\t\t\tmodifiers: []testsharder.TestModifier{\n\t\t\t\t// default modifier\n\t\t\t\t{\n\t\t\t\t\tName: \"*\",\n\t\t\t\t\tTotalRuns: -1,\n\t\t\t\t\tMaxAttempts: 2,\n\t\t\t\t},\n\t\t\t\t// multiplier\n\t\t\t\t{\n\t\t\t\t\tName: \"foo\",\n\t\t\t\t\tMaxAttempts: 1,\n\t\t\t\t},\n\t\t\t\t// change maxAttempts (but multiplier takes precedence)\n\t\t\t\t{\n\t\t\t\t\tName: \"foo\",\n\t\t\t\t\tTotalRuns: -1,\n\t\t\t\t\tMaxAttempts: 1,\n\t\t\t\t},\n\t\t\t\t// change maxAttempts, set affected\n\t\t\t\t{\n\t\t\t\t\tName: \"bar\",\n\t\t\t\t\tAffected: true,\n\t\t\t\t\tTotalRuns: -1,\n\t\t\t\t\tMaxAttempts: 1,\n\t\t\t\t},\n\t\t\t},\n\t\t\ttestList: []build.TestListEntry{\n\t\t\t\ttestListEntry(\"foo\", false),\n\t\t\t\ttestListEntry(\"bar\", true),\n\t\t\t\ttestListEntry(\"baz\", false),\n\t\t\t},\n\t\t\ttestDurations: []build.TestDuration{\n\t\t\t\t{\n\t\t\t\t\tName: \"*\",\n\t\t\t\t\tMedianDuration: time.Millisecond,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tgoldenBasename := strings.ReplaceAll(tc.name, \" \", \"_\") + \".golden.json\"\n\t\t\tgoldenFile := filepath.Join(*goldensDir, goldenBasename)\n\n\t\t\tif *updateGoldens {\n\t\t\t\ttc.flags.outputFile = goldenFile\n\t\t\t} else {\n\t\t\t\ttc.flags.outputFile = filepath.Join(t.TempDir(), goldenBasename)\n\t\t\t}\n\n\t\t\ttc.flags.buildDir = t.TempDir()\n\t\t\tif len(tc.modifiers) > 0 {\n\t\t\t\ttc.flags.modifiersPath = writeTempJSONFile(t, tc.modifiers)\n\t\t\t}\n\t\t\tif len(tc.affectedTests) > 0 {\n\t\t\t\t// Add a newline to the end of the file to test that it still calculates the\n\t\t\t\t// correct number of affected tests even with extra whitespace.\n\t\t\t\ttc.flags.affectedTestsPath = writeTempFile(t, strings.Join(tc.affectedTests, \"\\n\")+\"\\n\")\n\t\t\t}\n\t\t\tif tc.flags.ffxDeps {\n\t\t\t\tsdkManifest := map[string]interface{}{\n\t\t\t\t\t\"atoms\": []interface{}{},\n\t\t\t\t}\n\t\t\t\tsdkManifestPath := filepath.Join(tc.flags.buildDir, \"sdk\", \"manifest\", \"core\")\n\t\t\t\tif err := os.MkdirAll(filepath.Dir(sdkManifestPath), os.ModePerm); err != nil {\n\t\t\t\t\tt.Fatal(err)\n\t\t\t\t}\n\t\t\t\tif err := jsonutil.WriteToFile(sdkManifestPath, sdkManifest); err != nil {\n\t\t\t\t\tt.Fatal(err)\n\t\t\t\t}\n\t\t\t}\n\t\t\t// Write test-list.json.\n\t\t\tif err := jsonutil.WriteToFile(\n\t\t\t\tfilepath.Join(tc.flags.buildDir, testListPath),\n\t\t\t\tbuild.TestList{Data: tc.testList, SchemaID: \"experimental\"},\n\t\t\t); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\twriteDepFiles(t, tc.flags.buildDir, tc.testSpecs)\n\t\t\tfor _, repo := range tc.packageRepos {\n\t\t\t\tif err := os.MkdirAll(filepath.Join(tc.flags.buildDir, repo.Path), 0o700); err != nil {\n\t\t\t\t\tt.Fatal(err)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tm := &fakeModules{\n\t\t\t\ttestSpecs: tc.testSpecs,\n\t\t\t\ttestDurations: tc.testDurations,\n\t\t\t\tpackageRepositories: tc.packageRepos,\n\t\t\t}\n\t\t\tif err := execute(ctx, tc.flags, m); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\n\t\t\tif !*updateGoldens {\n\t\t\t\twant := readShards(t, goldenFile)\n\t\t\t\tgot := readShards(t, tc.flags.outputFile)\n\t\t\t\tif diff := cmp.Diff(want, got); diff != \"\" {\n\t\t\t\t\tt.Errorf(strings.Join([]string{\n\t\t\t\t\t\t\"Golden file mismatch!\",\n\t\t\t\t\t\t\"To fix, run `tools/integration/testsharder/update_goldens.sh\",\n\t\t\t\t\t\tdiff,\n\t\t\t\t\t}, \"\\n\"))\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}", "func TestMain(t *testing.T) { TestingT(t) }", "func TestMain(m *testing.M) {\n\ttestsuite.RevelTestHelper(m, \"dev\", run.Run)\n}", "func TestMain(m *testing.M) {\n\tDropTestData(0)\n\tanswer := m.Run()\n\tDropTestData(0)\n\tos.Exit(answer)\n}", "func TestMain(m *testing.M) {\n\t// Code here runs before testing starts\n\tmux = GetMux()\n\t// Run tests\n\texitCode := m.Run()\n\t// Code here runs after testing finishes\n\tos.Exit(exitCode)\n}", "func TestMain(m *testing.M) {\n\n\t// Run Setup\n\tSetup()\n\n\t// Run all the tests\n\treturnCode := m.Run()\n\n\t// Run teardown\n\tTearDown()\n\n\t// Pass on the exit codes\n\tos.Exit(returnCode)\n}", "func TestMain(m *testing.M) {\n\tframework.Run(\"pilot_test\", m)\n}", "func TestMain(m *testing.M) {\n\tflag.Parse()\n\n\tresult := m.Run()\n\n\tos.Exit(result)\n}", "func RunTest(ctx context.Context, fn testFuncType, isGuest bool) error {\n\t// We lose connectivity along the way here, and if that races with the\n\t// recover_duts network-recovery hooks, it may interrupt us.\n\tunlock, err := network.LockCheckNetworkHook(ctx)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed locking the check network hook\")\n\t}\n\tdefer unlock()\n\n\tvar env TestEnv\n\n\tdefer tearDown(ctx, &env)\n\n\tif err := setUp(ctx, &env, isGuest); err != nil {\n\t\treturn errors.Wrap(err, \"failed starting the test\")\n\t}\n\n\treturn fn(ctx, &env)\n}", "func TestMain(m *testing.M) {\n\trt, _ := framework.Run(\"echo_test\", m)\n\tos.Exit(rt)\n}", "func TestMain(m *testing.M) {\n\tfstest.TestMain(m)\n}", "func TestMain(m *testing.M) {\n\tfstest.TestMain(m)\n}", "func TestMain(m *testing.M) {\n\tfstest.TestMain(m)\n}", "func (suite *AddCommandTestSuite) TestExecuteWithMultipleURLs() {\n\n}", "func TestMain(m *testing.M) {\n\t_Init();\n\tresult := m.Run();\n\t_TearDown();\n\tos.Exit(result);\n}", "func TestMain(m *testing.M) {\n\tsetUp()\n\tretCode := m.Run()\n\ttearDown()\n\tos.Exit(retCode)\n}", "func (sfs *SuiteFS) RunTests(t *testing.T, userName string, stFuncs ...SuiteTestFunc) {\n\tvfs := sfs.vfsSetup\n\n\t_, _ = sfs.User(t, userName)\n\tdefer sfs.User(t, sfs.initUser.Name())\n\n\tfor _, stFunc := range stFuncs {\n\t\tfuncName := runtime.FuncForPC(reflect.ValueOf(stFunc).Pointer()).Name()\n\t\tfuncName = funcName[strings.LastIndex(funcName, \".\")+1 : strings.LastIndex(funcName, \"-\")]\n\t\ttestDir := vfs.Join(sfs.rootDir, funcName)\n\n\t\tsfs.CreateTestDir(t, testDir)\n\n\t\tt.Run(funcName, func(t *testing.T) {\n\t\t\tstFunc(t, testDir)\n\t\t})\n\n\t\tsfs.RemoveTestDir(t, testDir)\n\t}\n}", "func runTestMain(m *testing.M) int {\n\tisLess, err := test_helpers.IsTarantoolVersionLess(2, 2, 0)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to extract Tarantool version: %s\", err)\n\t}\n\n\tif isLess {\n\t\tlog.Println(\"Skipping decimal tests...\")\n\t\tisDecimalSupported = false\n\t\treturn m.Run()\n\t} else {\n\t\tisDecimalSupported = true\n\t}\n\n\tinstance, err := test_helpers.StartTarantool(test_helpers.StartOpts{\n\t\tInitScript: \"config.lua\",\n\t\tListen: server,\n\t\tUser: opts.User,\n\t\tPass: opts.Pass,\n\t\tWaitStart: 100 * time.Millisecond,\n\t\tConnectRetry: 10,\n\t\tRetryTimeout: 500 * time.Millisecond,\n\t})\n\tdefer test_helpers.StopTarantoolWithCleanup(instance)\n\n\tif err != nil {\n\t\tlog.Printf(\"Failed to prepare test Tarantool: %s\", err)\n\t\treturn 1\n\t}\n\n\treturn m.Run()\n}", "func (o Scorecard) RunTests(ctx context.Context) (testOutput v1alpha3.Test, err error) {\n\n\terr = o.TestRunner.Initialize(ctx)\n\tif err != nil {\n\t\treturn testOutput, err\n\t}\n\n\ttests := o.selectTests()\n\tif len(tests) == 0 {\n\t\treturn testOutput, nil\n\t}\n\n\tfor _, test := range tests {\n\t\tresult, err := o.TestRunner.RunTest(ctx, test)\n\t\tif err != nil {\n\t\t\tresult = convertErrorToStatus(test.Name, err)\n\t\t}\n\t\ttestOutput.Status.Results = append(testOutput.Status.Results, result.Results...)\n\t}\n\n\tif !o.SkipCleanup {\n\t\terr = o.TestRunner.Cleanup(ctx)\n\t\tif err != nil {\n\t\t\treturn testOutput, err\n\t\t}\n\t}\n\treturn testOutput, nil\n}", "func RunTests(m *testing.M, version *int) {\n\tflag.IntVar(version, \"v\", 0, \"The anwork version that should be used with these tests\")\n\tflag.Parse()\n\n\tif *version == 0 {\n\t\tpanic(\"Version (-v) must be passed with a legitimate anwork version number\")\n\t}\n\n\tos.Exit(m.Run())\n}", "func (s *Service) RunTest(ctx context.Context, req *conformance.Request) (*conformance.Response, error) {\n\tvar config test_gen.ServiceMesh\n\n\tconfig = linkerdConfig\n\tswitch req.Mesh.Type {\n\tcase smp.ServiceMesh_LINKERD:\n\t\tconfig = linkerdConfig\n\t\treq.Mesh.Annotations[\"linkerd.io/inject\"] = \"enabled\"\n\tcase smp.ServiceMesh_APP_MESH:\n\t\tconfig = linkerdConfig\n\t\treq.Mesh.Labels[\"appmesh.k8s.aws/sidecarInjectorWebhook\"] = \"enabled\"\n\tcase smp.ServiceMesh_MAESH:\n\t\tconfig = maeshConfig\n\tcase smp.ServiceMesh_ISTIO:\n\t\tconfig = istioConfig\n\t\treq.Mesh.Labels[\"istio-injection\"] = \"enabled\"\n\tcase smp.ServiceMesh_OPEN_SERVICE_MESH:\n\t\tconfig = osmConfig\n\t\treq.Mesh.Labels[\"openservicemesh.io/monitored-by\"] = \"osm\"\n\tcase smp.ServiceMesh_KUMA:\n\t\treq.Mesh.Annotations[\"kuma.io/sidecar-injection\"] = \"enabled\"\n\tcase smp.ServiceMesh_NGINX_SERVICE_MESH:\n\t\treq.Mesh.Annotations[\"njector.nsm.nginx.com/auto-inject\"] = \"true\"\n\n\t}\n\n\tresult := test_gen.RunTest(config, req.Mesh.Annotations, req.Mesh.Labels)\n\ttotalSteps := 24\n\ttotalFailures := 0\n\tstepsCount := map[string]int{\n\t\t\"traffic-access\": 7,\n\t\t\"traffic-split\": 11,\n\t\t\"traffic-spec\": 6,\n\t}\n\tspecVersion := map[string]string{\n\t\t\"traffic-access\": \"v0.6.0/v1alpha3\",\n\t\t\"traffic-split\": \"v0.6.0/v1alpha4\",\n\t\t\"traffic-spec\": \"v0.6.0/v1alpha4\",\n\t}\n\n\tdetails := make([]*conformance.Detail, 0)\n\tfor _, res := range result.Testsuite[0].Testcase {\n\t\td := &conformance.Detail{\n\t\t\tSmispec: res.Name,\n\t\t\tSpecversion: specVersion[res.Name],\n\t\t\tAssertion: strconv.Itoa(stepsCount[res.Name]),\n\t\t\tDuration: res.Time,\n\t\t\tCapability: conformance.Capability_FULL,\n\t\t\tStatus: conformance.ResultStatus_PASSED,\n\t\t\tResult: &conformance.Result{\n\t\t\t\tResult: &conformance.Result_Message{\n\t\t\t\t\tMessage: \"All test passed\",\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\tif len(res.Failure.Text) > 2 {\n\t\t\td.Result = &conformance.Result{\n\t\t\t\tResult: &conformance.Result_Error{\n\t\t\t\t\tError: &service.CommonError{\n\t\t\t\t\t\tCode: \"\",\n\t\t\t\t\t\tSeverity: \"\",\n\t\t\t\t\t\tShortDescription: res.Failure.Text,\n\t\t\t\t\t\tLongDescription: res.Failure.Message,\n\t\t\t\t\t\tProbableCause: \"\",\n\t\t\t\t\t\tSuggestedRemediation: \"\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\t\t\td.Status = conformance.ResultStatus_FAILED\n\t\t\td.Capability = conformance.Capability_NONE\n\n\t\t\t// A hacky way to see the testStep Failed, since KUDO only provides it in Failure.Message\n\t\t\tre := regexp.MustCompile(`[0-9]+`)\n\t\t\tif res.Failure.Message != \"\" {\n\t\t\t\tstepFailed := re.FindAllString(res.Failure.Message, 1)\n\t\t\t\tif len(stepFailed) != 0 {\n\t\t\t\t\tpassed, _ := strconv.Atoi(stepFailed[0])\n\t\t\t\t\tpassed = passed - 1\n\t\t\t\t\tfailures := stepsCount[res.Name] - passed\n\t\t\t\t\ttotalFailures += failures\n\t\t\t\t\tif (passed) >= (stepsCount[res.Name] / 2) {\n\t\t\t\t\t\td.Capability = conformance.Capability_HALF\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tdetails = append(details, d)\n\t}\n\n\treturn &conformance.Response{\n\t\tCasespassed: strconv.Itoa(totalSteps - totalFailures),\n\t\tPasspercent: strconv.FormatFloat(float64(totalSteps-totalFailures)/float64(totalSteps)*100, 'f', 2, 64),\n\t\tMesh: req.Mesh,\n\t\tDetails: details,\n\t}, nil\n}", "func TestMain(m *testing.M) {\n\tos.Exit(testscript.RunMain(m, map[string]func() int{\n\t\t\"main\": main1,\n\t}))\n}", "func runTest(m *testing.M) int {\n\t// In order to get a Mongo session we need the name of the database we\n\t// are using. The web framework middleware is using this by convention.\n\tdbName, err := cfg.String(\"MONGO_DB\")\n\tif err != nil {\n\t\tfmt.Println(\"MongoDB is not configured\")\n\t\treturn 1\n\t}\n\n\tdb, err := db.NewMGO(\"context\", dbName)\n\tif err != nil {\n\t\tfmt.Println(\"Unable to get Mongo session\")\n\t\treturn 1\n\t}\n\n\tdefer db.CloseMGO(\"context\")\n\n\ttstdata.Generate(db)\n\tdefer tstdata.Drop(db)\n\n\tloadQuery(db, \"basic.json\")\n\tloadQuery(db, \"basic_var.json\")\n\tdefer qfix.Remove(db, \"QTEST_O\")\n\n\tloadScript(db, \"basic_script_pre.json\")\n\tloadScript(db, \"basic_script_pst.json\")\n\tdefer sfix.Remove(db, \"STEST_O\")\n\n\tloadMasks(db, \"basic.json\")\n\tdefer mfix.Remove(db, \"test_xenia_data\")\n\n\treturn m.Run()\n}", "func (f *VRFTest) Run() error {\n\tif err := f.createChainlinkJobs(); err != nil {\n\t\treturn err\n\t}\n\tvar ctx context.Context\n\tvar testCtxCancel context.CancelFunc\n\tif f.TestOptions.TestDuration.Seconds() > 0 {\n\t\tctx, testCtxCancel = context.WithTimeout(context.Background(), f.TestOptions.TestDuration)\n\t} else {\n\t\tctx, testCtxCancel = context.WithCancel(context.Background())\n\t}\n\tdefer testCtxCancel()\n\tcancelPerfEvents := f.watchPerfEvents()\n\tcurrentRound := 0\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\tlog.Info().Msg(\"Test finished\")\n\t\t\ttime.Sleep(f.TestOptions.GracefulStopDuration)\n\t\t\tcancelPerfEvents()\n\t\t\treturn nil\n\t\tdefault:\n\t\t\tlog.Info().Int(\"RoundID\", currentRound).Msg(\"New round\")\n\t\t\tif err := f.requestRandomness(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err := f.waitRoundFulfilled(currentRound + 1); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif f.TestOptions.NumberOfRounds != 0 && currentRound >= f.TestOptions.NumberOfRounds {\n\t\t\t\tlog.Info().Msg(\"Final round is reached\")\n\t\t\t\ttestCtxCancel()\n\t\t\t}\n\t\t\tcurrentRound++\n\t\t}\n\t}\n}", "func TestRun(t *testing.T) {\n\tsandbox, cleanup := cmdtest.TestSetupWithSandbox(t, false)\n\tdefer cleanup()\n\n\t// first add the test repo index\n\t_, err := cmdtest.AddLocalRepo(sandbox, \"LocalTestRepo\", filepath.Join(sandbox.TestDataPath, \"dev.local-index.yaml\"))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tstacksList := cmdtest.GetEnvStacksList()\n\n\tif stacksList == \"dev.local/starter\" {\n\t\tt.Skip()\n\t}\n\n\t// appsody init nodejs-express\n\t_, err = cmdtest.RunAppsody(sandbox, \"init\", \"nodejs-express\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// appsody run\n\trunChannel := make(chan error)\n\tgo func() {\n\t\t_, err = cmdtest.RunAppsody(sandbox, \"run\")\n\t\trunChannel <- err\n\t\tclose(runChannel)\n\t}()\n\n\t// defer the appsody stop to close the docker container\n\tdefer func() {\n\t\t_, err = cmdtest.RunAppsody(sandbox, \"stop\")\n\t\tif err != nil {\n\t\t\tt.Logf(\"Ignoring error running appsody stop: %s\", err)\n\t\t}\n\t\t// wait for the appsody command/goroutine to finish\n\t\trunErr := <-runChannel\n\t\tif runErr != nil {\n\t\t\tt.Logf(\"Ignoring error from the appsody command: %s\", runErr)\n\t\t}\n\t}()\n\n\thealthCheckFrequency := 2 // in seconds\n\thealthCheckTimeout := 60 // in seconds\n\thealthCheckWait := 0\n\thealthCheckOK := false\n\tfor !(healthCheckOK || healthCheckWait >= healthCheckTimeout) {\n\t\tselect {\n\t\tcase err = <-runChannel:\n\t\t\t// appsody run exited, probably with an error\n\t\t\tt.Fatalf(\"appsody run quit unexpectedly: %s\", err)\n\t\tcase <-time.After(time.Duration(healthCheckFrequency) * time.Second):\n\t\t\t// check the health endpoint\n\t\t\thealthCheckWait += healthCheckFrequency\n\t\t\tresp, err := http.Get(\"http://localhost:3000/health\")\n\t\t\tif err != nil {\n\t\t\t\tt.Logf(\"Health check error. Ignore and retry: %s\", err)\n\t\t\t} else {\n\t\t\t\tresp.Body.Close()\n\t\t\t\tif resp.StatusCode != 200 {\n\t\t\t\t\tt.Logf(\"Health check response code %d. Ignore and retry.\", resp.StatusCode)\n\t\t\t\t} else {\n\t\t\t\t\tt.Logf(\"Health check OK\")\n\t\t\t\t\t// may want to check body\n\t\t\t\t\thealthCheckOK = true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tif !healthCheckOK {\n\t\tt.Errorf(\"Did not receive an OK health check within %d seconds.\", healthCheckTimeout)\n\t}\n}", "func (t *SelfTester) RunSelfTest() ([]string, []string, map[string]*serializers.EventSerializer, error) {\n\tif err := t.BeginWaitingForEvent(); err != nil {\n\t\treturn nil, nil, nil, fmt.Errorf(\"failed to run self test: %w\", err)\n\t}\n\tdefer t.EndWaitingForEvent()\n\n\tt.lastTimestamp = time.Now()\n\n\t// launch the self tests\n\tvar success []string\n\tvar fails []string\n\ttestEvents := make(map[string]*serializers.EventSerializer)\n\n\tfor _, selftest := range FileSelfTests {\n\t\tdef := selftest.GetRuleDefinition(t.targetFilePath)\n\n\t\tpredicate, err := selftest.GenerateEvent(t.targetFilePath)\n\t\tif err != nil {\n\t\t\tfails = append(fails, def.ID)\n\t\t\tlog.Errorf(\"Self test failed: %s\", def.ID)\n\t\t\tcontinue\n\t\t}\n\t\tevent, err2 := t.expectEvent(predicate)\n\t\ttestEvents[def.ID] = event\n\t\tif err2 != nil {\n\t\t\tfails = append(fails, def.ID)\n\t\t\tlog.Errorf(\"Self test failed: %s\", def.ID)\n\t\t} else {\n\t\t\tsuccess = append(success, def.ID)\n\t\t}\n\t}\n\n\t// save the results for get status command\n\tt.success = success\n\tt.fails = fails\n\n\treturn success, fails, testEvents, nil\n}", "func (suite *AddCommandTestSuite) TestExecuteWhenTrackFound() {\n\n}", "func Run(name string, t testing.TB, f func(testing.TB)) {\n\tif tt, ok := t.(*testing.T); ok {\n\t\ttt.Run(name, func(ttt *testing.T) { f(ttt) })\n\t\treturn\n\t}\n\tif tb, ok := t.(*testing.B); ok {\n\t\ttb.Run(name, func(ttb *testing.B) { f(ttb) })\n\t\treturn\n\t}\n\tt.Error(\"invalid test harness\")\n\tt.FailNow()\n}", "func RunSubtests(ctx *Context) {\n\tfor name, fn := range tests {\n\t\tctx.Run(name, fn)\n\t}\n}", "func RunSingleFixtureTest(file string, t *testing.T) {\n\tvar fixtureSteps []FixtureStep\n\tbyteValue := ReadFile(file, t)\n\n\terr := json.Unmarshal([]byte(byteValue), &fixtureSteps)\n\tt.WithFields(testing.Fields{\n\t\t\"raw_json\": string(byteValue),\n\t}).MustNil(err, \"error decoding fixture steps\")\n\n\tt.Run(file, func(t *testing.T) {\n\t\tif FixtureTestOpts.IsParallel {\n\t\t\tt.Parallel()\n\t\t}\n\n\t\tfor idx := range fixtureSteps {\n\t\t\tUpdateWorkQueueStatus(file, idx, fixtureSteps, InProgress, t)\n\t\t}\n\t})\n}", "func TestMain(m *testing.M) {\n\tlog.SetOutput(os.Stdout)\n\tos.Exit(m.Run())\n}", "func TestMain(m *testing.M) {\n\tlog.SetOutput(os.Stdout)\n\tos.Exit(m.Run())\n}", "func TestMain(m *testing.M) {\n\trest.StartMockupServer()\n\tos.Exit(m.Run())\n}", "func TestMain(m *testing.M) {\n\tflag.Parse()\n\tos.Exit(m.Run())\n}", "func (g Go) Test(ctx context.Context) error {\n\tmg.CtxDeps(ctx, g.CheckVersion)\n\treturn sh.RunV(\"go\", append([]string{\"test\"}, strings.Split(TestArgs, \" \")...)...)\n}", "func (t Test) Unit() error {\n\tmg.Deps(t.GenerateModules)\n\treturn sh.RunWithV(ENV, \"go\", \"test\", \"-v\", \"-short\", \"-coverprofile=coverage.txt\", \"-covermode=atomic\", \"./...\")\n}", "func RunTests(opts Options) {\n\tif opts.Cleanup {\n\t\terr := CleanupTests(opts.Driver, opts.DSN, opts.Verbose)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Cleanup failed: %s\\n\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tos.Exit(0)\n\t}\n\t_ = flag.Set(\"test.run\", opts.Match)\n\tif opts.Verbose {\n\t\t_ = flag.Set(\"test.v\", \"true\")\n\t}\n\ttests := []testing.InternalTest{\n\t\t{\n\t\t\tName: \"MainTest\",\n\t\t\tF: func(t *testing.T) {\n\t\t\t\tTest(t, opts.Driver, opts.DSN, opts.Suites, opts.RW)\n\t\t\t},\n\t\t},\n\t}\n\n\tmainStart(tests)\n}", "func TestMain(m *testing.M) {\n\tflag.BoolVar(&realTest, \"real\", false, \"Test with real uHunt API server\")\n\tflag.Parse()\n\tos.Exit(m.Run())\n}", "func TestMain(m *testing.M) {\n\tos.Exit(testMain(m))\n}", "func TestMain(m *testing.M) {\n\tos.Exit(testMain(m))\n}", "func TestMain(m *testing.M) {\n\tos.Exit(testMain(m))\n}", "func (s *ScenarioRunnerSuite) TestRun(t sweet.T) {\n\tpaths := []string{\"/t1\", \"/t2\", \"/t3\"}\n\treqBodies := []string{`{\"req\": \"r1\"}`, `{\"req\": \"r2\"}`, `{\"req\": \"r3\"}`}\n\trespBodies := []string{`{\"resp\": \"r1\"}`, `{\"resp\": \"r2\"}`, `{\"resp\": \"r3\"}`}\n\n\tserver := ghttp.NewServer()\n\tserver.AppendHandlers(ghttp.RespondWith(http.StatusOK, respBodies[0]))\n\tserver.AppendHandlers(ghttp.RespondWith(http.StatusOK, respBodies[1]))\n\tserver.AppendHandlers(ghttp.RespondWith(http.StatusOK, respBodies[2]))\n\n\tscenario := &config.Scenario{\n\t\tTests: []*config.Test{\n\t\t\t&config.Test{\n\t\t\t\tRequest: &config.Request{\n\t\t\t\t\tURL: testTemplate(server.URL() + paths[0]),\n\t\t\t\t\tBody: testTemplate(reqBodies[0]),\n\t\t\t\t},\n\t\t\t\tResponse: &config.Response{\n\t\t\t\t\tStatus: testPattern(\"2..\"),\n\t\t\t\t},\n\t\t\t\tEnabled: true,\n\t\t\t},\n\t\t\t&config.Test{\n\t\t\t\tRequest: &config.Request{\n\t\t\t\t\tURL: testTemplate(server.URL() + paths[1]),\n\t\t\t\t\tBody: testTemplate(reqBodies[1]),\n\t\t\t\t},\n\t\t\t\tResponse: &config.Response{\n\t\t\t\t\tStatus: testPattern(\"2..\"),\n\t\t\t\t},\n\t\t\t\tEnabled: true,\n\t\t\t},\n\t\t\t&config.Test{\n\t\t\t\tRequest: &config.Request{\n\t\t\t\t\tURL: testTemplate(server.URL() + paths[2]),\n\t\t\t\t\tBody: testTemplate(reqBodies[2]),\n\t\t\t\t},\n\t\t\t\tResponse: &config.Response{\n\t\t\t\t\tStatus: testPattern(\"2..\"),\n\t\t\t\t},\n\t\t\t\tEnabled: true,\n\t\t\t},\n\t\t},\n\t}\n\n\trunner := NewScenarioRunner(scenario, logging.NilLogger, logging.VerbosityLevelNone, nil)\n\trunner.Run(http.DefaultClient, map[string]interface{}{})\n\tExpect(runner.Resolved()).To(BeTrue())\n\tExpect(runner.Errored()).To(BeFalse())\n\tExpect(runner.Failed()).To(BeFalse())\n\n\tExpect(server.ReceivedRequests()).To(HaveLen(3))\n\tExpect(server.ReceivedRequests()[0].URL.Path).To(Equal(paths[0]))\n\tExpect(server.ReceivedRequests()[1].URL.Path).To(Equal(paths[1]))\n\tExpect(server.ReceivedRequests()[2].URL.Path).To(Equal(paths[2]))\n\n\tfor i, result := range runner.Results() {\n\t\tExpect(result.Index).To(Equal(i))\n\t\tExpect(result.Disabled).To(BeFalse())\n\t\tExpect(result.Skipped).To(BeFalse())\n\t\tExpect(result.Request.URL.Path).To(Equal(paths[i]))\n\t\tExpect(result.RequestBody).To(Equal(reqBodies[i]))\n\t\tExpect(result.Response.StatusCode).To(Equal(http.StatusOK))\n\t\tExpect(result.ResponseBody).To(Equal(respBodies[i]))\n\t\tExpect(result.RequestMatchErrors).To(HaveLen(0))\n\t\tExpect(result.Err).To(BeNil())\n\t}\n}" ]
[ "0.78072757", "0.74127185", "0.733165", "0.72698194", "0.72393966", "0.7207766", "0.70724136", "0.70590204", "0.69538665", "0.68821836", "0.68679804", "0.6842752", "0.6730383", "0.67301863", "0.6718925", "0.6653832", "0.66490334", "0.6636336", "0.662904", "0.6625863", "0.66137713", "0.65735805", "0.65671086", "0.6550095", "0.65284956", "0.65223753", "0.65213495", "0.6500113", "0.6487818", "0.64858246", "0.6481236", "0.6475876", "0.64600885", "0.6449342", "0.64389896", "0.64389896", "0.64389896", "0.6438042", "0.6436945", "0.64242953", "0.64242953", "0.6407624", "0.6386636", "0.63853484", "0.6379424", "0.6379424", "0.63793266", "0.63711274", "0.6370928", "0.6366782", "0.63564575", "0.63489586", "0.63369244", "0.6333775", "0.6327005", "0.6325735", "0.6319616", "0.63089275", "0.6304526", "0.6294847", "0.62899995", "0.6283981", "0.628343", "0.628116", "0.6278807", "0.6268872", "0.6247989", "0.6235533", "0.62247866", "0.62151587", "0.62151587", "0.62151587", "0.6209572", "0.62034255", "0.62005305", "0.6198643", "0.6191122", "0.6184009", "0.6179438", "0.61728203", "0.615294", "0.61464626", "0.6137694", "0.6120872", "0.61159885", "0.61156124", "0.6110245", "0.6106215", "0.610384", "0.61031", "0.61031", "0.6099301", "0.6098078", "0.60839057", "0.6081867", "0.60771483", "0.607559", "0.607248", "0.607248", "0.607248", "0.6054415" ]
0.0
-1
RunTest executes a single test
func (r FakeTestRunner) RunTest(ctx context.Context, test Test) (result *v1alpha3.TestStatus, err error) { return r.TestStatus, r.Error }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func TestRun(t *testing.T) {\n\tRun()\n}", "func (m *Main) RunTest(name, command string, run func(t *Test) error) error {\n\tt := m.NewTest(name, command, run)\n\treturn t.Run()\n}", "func TestRunMain(t *testing.T) {\n\tmain()\n}", "func (envManager *TestEnvManager) RunTest(m runnable) (ret int) {\n\tdefer envManager.TearDown()\n\tif err := envManager.StartUp(); err != nil {\n\t\tlog.Printf(\"Failed to setup framework: %s\", err)\n\t\tret = 1\n\t} else {\n\t\tlog.Printf(\"\\nStart testing ......\")\n\t\tret = m.Run()\n\t}\n\treturn ret\n}", "func (t *Test) Run() error {\n\treturn t.Wrap(t.run)\n}", "func (test Test) Run(t *testing.T) {\n\tt.Logf(\"Starting test %v\", t.Name())\n\tt.Helper()\n\t// Double negative cannot be helped, this is intended to mitigate test failures where a global\n\t// resource is manipulated, e.g.: the default AWS security group.\n\tif !test.RunOptions.NoParallel {\n\t\tt.Parallel()\n\t}\n\tt.Run(\"Python\", func(t *testing.T) {\n\t\trunOpts := integration.ProgramTestOptions{}\n\t\tif test.RunOptions != nil {\n\t\t\trunOpts = *test.RunOptions\n\t\t}\n\t\tconvertOpts := test.Options\n\t\tif test.Python != nil {\n\t\t\tconvertOpts = convertOpts.With(*test.Python)\n\t\t}\n\n\t\ttargetTest := targetTest{\n\t\t\trunOpts: &runOpts,\n\t\t\tconvertOpts: &convertOpts,\n\t\t\tprojectName: test.ProjectName,\n\t\t\tlanguage: \"python\",\n\t\t\truntime: \"python\",\n\t\t}\n\t\ttargetTest.Run(t)\n\t})\n\tt.Run(\"TypeScript\", func(t *testing.T) {\n\t\trunOpts := integration.ProgramTestOptions{}\n\t\tif test.RunOptions != nil {\n\t\t\trunOpts = *test.RunOptions\n\t\t}\n\t\tconvertOpts := test.Options\n\t\tif test.TypeScript != nil {\n\t\t\tconvertOpts = convertOpts.With(*test.TypeScript)\n\t\t}\n\n\t\ttargetTest := targetTest{\n\t\t\trunOpts: &runOpts,\n\t\t\tconvertOpts: &convertOpts,\n\t\t\tprojectName: test.ProjectName,\n\t\t\tlanguage: \"typescript\",\n\t\t\truntime: \"nodejs\",\n\t\t}\n\t\ttargetTest.Run(t)\n\t})\n}", "func runSingleTest(s *search.Search, sl *search.Limits, t *Test) {\n\t// reset search and search limits\n\ts.NewGame()\n\tsl.Mate = 0\n\t// create position\n\tp, _ := position.NewPositionFen(t.fen)\n\tswitch t.tType {\n\tcase DM:\n\t\tdirectMateTest(s, sl, p, t)\n\tcase BM:\n\t\tbestMoveTest(s, sl, p, t)\n\tcase AM:\n\t\tavoidMoveMateTest(s, sl, p, t)\n\tdefault:\n\t\tlog.Warningf(\"Unknown Test type: %d\", t.tType)\n\t}\n}", "func (s *FakeJujuRunnerSuite) TestRun(c *gc.C) {\n\ts.runner.Run()\n\ts.runner.Stop()\n\tresult := s.runner.Wait()\n \n\tc.Assert(result.String(), gc.Equals, \"OK: 1 passed\")\n\tc.Assert(result.Succeeded, gc.Equals, 1)\n\tc.Assert(result.RunError, gc.IsNil)\n\tc.Assert(\n\t\tstrings.Contains(s.output.String(), \"Starting service\"), gc.Equals, true)\n}", "func RunTest(t *testing.T, name string, f Func, testCases []TestCase) {\n\tt.Run(name, func(t *testing.T) {\n\t\tfor _, test := range testCases {\n\t\t\tif actual := f(test.Input); actual != test.Expected {\n\t\t\t\tt.Errorf(\"\\nfor n=%d, expected: %t, actual: %t\", test.Input, test.Expected, actual)\n\t\t\t}\n\t\t}\n\t})\n}", "func RunTest(client pb.GNMIClient, testCase *common.TestCase, timeout time.Duration, stateUpdateDelay time.Duration) error {\n\tif client == nil {\n\t\treturn errors.New(\"gNMI client is not available\")\n\t}\n\tif testCase == nil {\n\t\treturn errors.New(\"empty test case\")\n\t}\n\tif len(testCase.OPs) == 0 {\n\t\t// Succeed if no operation specified in this test case.\n\t\treturn nil\n\t}\n\t// Determine the test case type.\n\tswitch testCase.OPs[0].Type {\n\tcase common.OPReplace, common.OPUpdate, common.OPDelete:\n\t\t// This is a config test.\n\t\treturn runConfigTest(client, testCase, timeout, stateUpdateDelay)\n\tcase common.OPGet:\n\t\t// This is a state fetching test.\n\t\treturn runStateTest(client, testCase, timeout)\n\tcase common.OPSubscribe:\n\t\treturn errors.New(\"not support telemetry streaming test cases\")\n\tdefault:\n\t\treturn fmt.Errorf(\"invalid operation type %s\", testCase.OPs[0].Type)\n\t}\n}", "func TestRun(t *testing.T) {\n\tsuite.Run(t, new(CategoryTestSuite))\n\tsuite.Run(t, new(ProductTestSuite))\n}", "func RunUnitTest(cobraCmd *cobra.Command, args []string) {\n\terr := CommandWithStdout(\"go\", \"test\", \"./...\").Run()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}", "func (c *actionTests) actionRun(t *testing.T) {\n\te2e.EnsureImage(t, c.env)\n\n\ttests := []struct {\n\t\tname string\n\t\targv []string\n\t\texit int\n\t}{\n\t\t{\n\t\t\tname: \"NoCommand\",\n\t\t\targv: []string{c.env.ImagePath},\n\t\t\texit: 0,\n\t\t},\n\t\t{\n\t\t\tname: \"True\",\n\t\t\targv: []string{c.env.ImagePath, \"true\"},\n\t\t\texit: 0,\n\t\t},\n\t\t{\n\t\t\tname: \"False\",\n\t\t\targv: []string{c.env.ImagePath, \"false\"},\n\t\t\texit: 1,\n\t\t},\n\t\t{\n\t\t\tname: \"ScifTestAppGood\",\n\t\t\targv: []string{\"--app\", \"testapp\", c.env.ImagePath},\n\t\t\texit: 0,\n\t\t},\n\t\t{\n\t\t\tname: \"ScifTestAppBad\",\n\t\t\targv: []string{\"--app\", \"fakeapp\", c.env.ImagePath},\n\t\t\texit: 1,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tc.env.RunSingularity(\n\t\t\tt,\n\t\t\te2e.AsSubtest(tt.name),\n\t\t\te2e.WithProfile(e2e.UserProfile),\n\t\t\te2e.WithCommand(\"run\"),\n\t\t\te2e.WithArgs(tt.argv...),\n\t\t\te2e.ExpectExit(tt.exit),\n\t\t)\n\t}\n}", "func (t testCommand) Run() error {\n\tif t.shouldFail {\n\t\treturn errors.New(\"I AM ERROR\")\n\t}\n\treturn nil\n}", "func Run(t *testing.T, s suite.TestingSuite) {\n\tsuite.Run(t, s)\n}", "func RunTest(flags *Flags) error {\n\tswitch flags.Mode {\n\tcase constants.ManagerMode:\n\t\treturn workermanager.New().RunTest()\n\tcase constants.WorkerMode:\n\t\tslackURL := flags.SlackURL\n\t\tvar slacks []string\n\t\tif len(slackURL) > 0 {\n\t\t\tslacks = append(slacks, slackURL)\n\t\t}\n\t\treturn worker.NewWorker().RunTest(flags.Type, slacks)\n\t}\n\n\treturn nil\n}", "func TestMain(m *testing.M) {\n\tprintln(\"do stuff before all tests\")\n\tm.Run()\n\tprintln(\"do stuff after all tests\")\n}", "func RunTest(ctx context.Context, target, location string, nodeIDs []int, limit int, debug, outputJSON bool, runTest runFunc, runOutput runOutputFunc) error {\n\trunReq := &perfops.RunRequest{\n\t\tTarget: target,\n\t\tLocation: location,\n\t\tNodes: nodeIDs,\n\t\tLimit: limit,\n\t}\n\n\tf := NewFormatter(debug && !outputJSON)\n\tf.StartSpinner()\n\ttestID, err := runTest(ctx, runReq)\n\tf.StopSpinner()\n\tif err != nil {\n\t\treturn err\n\t}\n\tres := &RunOutputResult{}\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-time.After(200 * time.Millisecond):\n\t\t\t}\n\t\t\toutput, err := runOutput(ctx, testID)\n\t\t\tres.SetOutput(output, err)\n\t\t\tif err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}()\n\n\tif outputJSON {\n\t\tf.StartSpinner()\n\t}\n\tvar o *perfops.RunOutput\n\tfor {\n\t\tselect {\n\t\tcase <-time.After(50 * time.Millisecond):\n\t\t}\n\t\tif o, err = res.Output(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !outputJSON && o != nil {\n\t\t\tPrintOutput(f, o)\n\t\t}\n\t\tif o != nil && o.IsFinished() {\n\t\t\tbreak\n\t\t}\n\t}\n\tif outputJSON {\n\t\tf.StopSpinner()\n\t\tPrintOutputJSON(o)\n\t}\n\treturn nil\n}", "func (runner *suiteRunner) run() *Result {\n if runner.tracker.result.RunError == nil && len(runner.tests) > 0 {\n runner.tracker.start()\n if runner.checkFixtureArgs() {\n if runner.runFixture(runner.setUpSuite) {\n for i := 0; i != len(runner.tests); i++ {\n c := runner.runTest(runner.tests[i])\n if c.status == fixturePanickedSt {\n runner.missTests(runner.tests[i+1:])\n break\n }\n }\n } else {\n runner.missTests(runner.tests)\n }\n runner.runFixture(runner.tearDownSuite)\n } else {\n runner.missTests(runner.tests)\n }\n runner.tracker.waitAndStop()\n runner.tempDir.removeAll()\n }\n return &runner.tracker.result\n}", "func RunTest(t *testing.T, dir string, opts ...TestOptionsFunc) {\n\ttest := Test{}\n\t// Apply common defaults.\n\ttest.ProjectName = filepath.Base(dir)\n\ttest.Options.Compile = nil\n\ttest.Options.FilterName = \"name\"\n\ttest.RunOptions = &integration.ProgramTestOptions{\n\t\tDir: dir,\n\t\tExpectRefreshChanges: true,\n\t}\n\tfor _, opt := range opts {\n\t\topt(t, &test)\n\t}\n\n\ttest.Run(t)\n}", "func TestMain(m *testing.M) {\n\tos.Exit(runTest(m))\n}", "func Test1IsATest(t *testing.T) {\n}", "func (scenTest *GetStartedFunctionsScenarioTest) RunSubTest(stubber *testtools.AwsmStubber) {\n\tmockQuestioner := demotools.MockQuestioner{Answers: scenTest.Answers}\n\tscenario := NewGetStartedFunctionsScenario(*stubber.SdkConfig, &mockQuestioner, &scenTest.helper)\n\tscenario.isTestRun = true\n\tscenario.Run()\n}", "func Test() error {\n\treturn sh.RunWith(map[string]string{\"GORACE\": \"halt_on_error=1\"},\n\t\t\"go\", \"test\", \"-race\", \"-v\", \"./...\")\n}", "func (o *Options) Run() error {\n\terr := o.Validate()\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"failed to validate\")\n\t}\n\n\ttest := &v1alpha1.TestRun{}\n\terr = o.PopulateTest(test)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"failed to populate the TestRun resource\")\n\t}\n\n\to.TestRun, err = o.TestClient.JxtestV1alpha1().TestRuns(o.Namespace).Create(test)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"failed to create the TestRun CRD\")\n\t}\n\treturn nil\n}", "func (ts *TestSuite) RunTests() {\n\n\tif len(ts.Tests) == 0 {\n\t\tout.Printf(\"No tests to run\\n\")\n\t\treturn\n\t}\n\n\tstartTime := time.Now()\n\n\t// setup search\n\ts := search.NewSearch()\n\tsl := search.NewSearchLimits()\n\tsl.MoveTime = ts.Time\n\tsl.Depth = ts.Depth\n\tif sl.MoveTime > 0 {\n\t\tsl.TimeControl = true\n\t}\n\n\tout.Printf(\"Running Test Suite\\n\")\n\tout.Printf(\"==================================================================\\n\")\n\tout.Printf(\"EPD File: %s\\n\", ts.FilePath)\n\tout.Printf(\"SearchTime: %d ms\\n\", ts.Time.Milliseconds())\n\tout.Printf(\"MaxDepth: %d\\n\", ts.Depth)\n\tout.Printf(\"Date: %s\\n\", time.Now().Local())\n\tout.Printf(\"No of tests: %d\\n\", len(ts.Tests))\n\tout.Println()\n\n\t// execute all tests and store results in the\n\t// test instance\n\tfor i, t := range ts.Tests {\n\t\tout.Printf(\"Test %d of %d\\nTest: %s -- Target Result %s\\n\", i+1, len(ts.Tests), t.line, t.targetMoves.StringUci())\n\t\tstartTime2 := time.Now()\n\t\trunSingleTest(s, sl, t)\n\t\telapsedTime := time.Since(startTime2)\n\t\tt.nodes = s.NodesVisited()\n\t\tt.time = s.LastSearchResult().SearchTime\n\t\tt.nps = util.Nps(s.NodesVisited(), s.LastSearchResult().SearchTime)\n\t\tout.Printf(\"Test finished in %d ms with result %s (%s) - nps: %d\\n\\n\",\n\t\t\telapsedTime.Milliseconds(), t.rType.String(), t.actual.StringUci(), t.nps)\n\t}\n\n\t// sum up result for report\n\ttr := &SuiteResult{}\n\tfor _, t := range ts.Tests {\n\t\ttr.Counter++\n\t\tswitch t.rType {\n\t\tcase NotTested:\n\t\t\ttr.NotTestedCounter++\n\t\tcase Skipped:\n\t\t\ttr.SkippedCounter++\n\t\tcase Failed:\n\t\t\ttr.FailedCounter++\n\t\tcase Success:\n\t\t\ttr.SuccessCounter++\n\t\t}\n\t\ttr.Nodes += t.nodes\n\t\ttr.Time += t.time\n\t}\n\tts.LastResult = tr\n\n\telapsed := time.Since(startTime)\n\n\t// print report\n\tout.Printf(\"Results for Test Suite\\n\", ts.FilePath)\n\tout.Printf(\"------------------------------------------------------------------------------------------------------------------------------------\\n\")\n\tout.Printf(\"EPD File: %s\\n\", ts.FilePath)\n\tout.Printf(\"SearchTime: %d ms\\n\", ts.Time.Milliseconds())\n\tout.Printf(\"MaxDepth: %d\\n\", ts.Depth)\n\tout.Printf(\"Date: %s\\n\", time.Now().Local())\n\tout.Printf(\"====================================================================================================================================\\n\")\n\tout.Printf(\" %-4s | %-10s | %-8s | %-8s | %-15s | %s | %s\\n\", \" Nr.\", \"Result\", \"Move\", \"Value\", \"Expected Result\", \"Fen\", \"Id\")\n\tout.Printf(\"====================================================================================================================================\\n\")\n\tfor i, t := range ts.Tests {\n\t\tif t.tType == DM {\n\t\t\tout.Printf(\" %-4d | %-10s | %-8s | %-8s | %s%-15d | %s | %s\\n\",\n\t\t\t\ti+1, t.rType.String(), t.actual.StringUci(), t.value.String(), \"dm \", t.mateDepth, t.fen, t.id)\n\t\t} else {\n\t\t\tout.Printf(\" %-4d | %-10s | %-8s | %-8s | %s %-15s | %s | %s\\n\",\n\t\t\t\ti+1, t.rType.String(), t.actual.StringUci(), t.value.String(), t.tType.String(), t.targetMoves.StringUci(), t.fen, t.id)\n\t\t}\n\t}\n\tout.Printf(\"====================================================================================================================================\\n\")\n\tout.Printf(\"Summary:\\n\")\n\tout.Printf(\"EPD File: %s\\n\", ts.FilePath)\n\tout.Printf(\"SearchTime: %d ms\\n\", ts.Time.Milliseconds())\n\tout.Printf(\"MaxDepth: %d\\n\", ts.Depth)\n\tout.Printf(\"Date: %s\\n\", time.Now().Local())\n\tout.Printf(\"Successful: %-3d (%d %%)\\n\", tr.SuccessCounter, 100*tr.SuccessCounter/tr.Counter)\n\tout.Printf(\"Failed: %-3d (%d %%)\\n\", tr.FailedCounter, 100*tr.FailedCounter/tr.Counter)\n\tout.Printf(\"Skipped: %-3d (%d %%)\\n\", tr.SkippedCounter, 100*tr.SkippedCounter/tr.Counter)\n\tout.Printf(\"Not tested: %-3d (%d %%)\\n\", tr.NotTestedCounter, 100*tr.NotTestedCounter/tr.Counter)\n\tout.Printf(\"Test time: %s\\n\", elapsed)\n\tout.Printf(\"Configuration: %s\\n\", config.Settings.String())\n}", "func TestOne(t *testing.T) {\n\ttest(t, 42)\n}", "func (t *Test) Run(fn func(ctx TestContext)) {\n\tstart := time.Now()\n\n\tscopes.CI.Infof(\"=== BEGIN: Test: '%s[%s]' ===\", rt.SuiteContext().Settings().TestID, t.t.Name())\n\tdefer func() {\n\t\tend := time.Now()\n\t\tscopes.CI.Infof(\"=== DONE: Test: '%s[%s] (%v)' ===\", rt.SuiteContext().Settings().TestID, t.t.Name(), end.Sub(start))\n\t}()\n\n\tctx := NewContext(t.t, t.labels...)\n\tdefer ctx.Done(t.t)\n\tfn(ctx)\n}", "func TestMain(t *testing.T) {\n}", "func (t *Tester) Test() error {\n\tif err := t.pretestSetup(); err != nil {\n\t\treturn err\n\t}\n\n\te2eTestArgs := []string{\n\t\t\"--host=\" + t.host,\n\t\t\"--provider=\" + t.provider,\n\t\t\"--kubeconfig=\" + t.kubeconfigPath,\n\t\t\"--ginkgo.flakeAttempts=\" + t.flakeAttempts,\n\t\t\"--ginkgo.skip=\" + t.skipRegex,\n\t\t\"--ginkgo.focus=\" + t.focusRegex,\n\t}\n\tginkgoArgs := append([]string{\n\t\t\"--nodes=\" + t.parallel,\n\t\te2eTestPath,\n\t\t\"--\"}, e2eTestArgs...)\n\n\tlog.Printf(\"Running ginkgo test as %s %+v\", binary, ginkgoArgs)\n\tcmd := exec.Command(binary, ginkgoArgs...)\n\texec.InheritOutput(cmd)\n\treturn cmd.Run()\n}", "func TestMain(m *testing.M) {\n\n\tos.Exit(m.Run())\n}", "func (t *Test) Run(tc *TestSuite) error {\n\n\tmqutil.Logger.Print(\"\\n--- \" + t.Name)\n\tfmt.Printf(\"\\nRunning test case: %s\\n\", t.Name)\n\terr := t.ResolveParameters(tc)\n\tif err != nil {\n\t\tfmt.Printf(\"... Fail\\n... %s\\n\", err.Error())\n\t\treturn err\n\t}\n\n\treq := resty.R()\n\tif len(tc.ApiToken) > 0 {\n\t\treq.SetAuthToken(tc.ApiToken)\n\t} else if len(tc.Username) > 0 {\n\t\treq.SetBasicAuth(tc.Username, tc.Password)\n\t}\n\n\tpath := GetBaseURL(t.db.Swagger) + t.SetRequestParameters(req)\n\tvar resp *resty.Response\n\n\tt.startTime = time.Now()\n\tswitch t.Method {\n\tcase mqswag.MethodGet:\n\t\tresp, err = req.Get(path)\n\tcase mqswag.MethodPost:\n\t\tresp, err = req.Post(path)\n\tcase mqswag.MethodPut:\n\t\tresp, err = req.Put(path)\n\tcase mqswag.MethodDelete:\n\t\tresp, err = req.Delete(path)\n\tcase mqswag.MethodPatch:\n\t\tresp, err = req.Patch(path)\n\tcase mqswag.MethodHead:\n\t\tresp, err = req.Head(path)\n\tcase mqswag.MethodOptions:\n\t\tresp, err = req.Options(path)\n\tdefault:\n\t\treturn mqutil.NewError(mqutil.ErrInvalid, fmt.Sprintf(\"Unknown method in test %s: %v\", t.Name, t.Method))\n\t}\n\tt.stopTime = time.Now()\n\tfmt.Printf(\"... call completed: %f seconds\\n\", t.stopTime.Sub(t.startTime).Seconds())\n\n\tif err != nil {\n\t\tt.err = mqutil.NewError(mqutil.ErrHttp, err.Error())\n\t} else {\n\t\tmqutil.Logger.Print(resp.Status())\n\t\tmqutil.Logger.Println(string(resp.Body()))\n\t}\n\terr = t.ProcessResult(resp)\n\treturn err\n}", "func (t *Test) Run(ctx context.Context, opts ...TestOption) (*TestResult, error) {\n\tparsedOpts := &testOptions{\n\t\tvars: &starlark.Dict{},\n\t}\n\tfor _, opt := range opts {\n\t\topt.applyTest(parsedOpts)\n\t}\n\n\tthread := &starlark.Thread{\n\t\tPrint: skyPrint,\n\t}\n\tthread.SetLocal(\"context\", ctx)\n\n\tassertModule := assertmodule.AssertModule()\n\ttestCtx := &starlarkstruct.Module{\n\t\tName: \"skycfg_test_ctx\",\n\t\tMembers: starlark.StringDict(map[string]starlark.Value{\n\t\t\t\"vars\": parsedOpts.vars,\n\t\t\t\"assert\": assertModule,\n\t\t}),\n\t}\n\targs := starlark.Tuple([]starlark.Value{testCtx})\n\n\tresult := TestResult{\n\t\tTestName: t.Name(),\n\t}\n\n\tstartTime := time.Now()\n\t_, err := starlark.Call(thread, t.callable, args, nil)\n\tresult.Duration = time.Since(startTime)\n\tif err != nil {\n\t\t// if there is no assertion error, there was something wrong with the execution itself\n\t\tif len(assertModule.Failures) == 0 {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// there should only be one failure, because each test run gets its own *TestContext\n\t\t// and each assertion failure halts execution.\n\t\tif len(assertModule.Failures) > 1 {\n\t\t\tpanic(\"A test run should only have one assertion failure. Something went wrong with the test infrastructure.\")\n\t\t}\n\t\tresult.Failure = assertModule.Failures[0]\n\t}\n\n\treturn &result, nil\n}", "func TestMain(m *testing.M) {\n\tos.Exit(m.Run())\n}", "func TestMain(m *testing.M) {\n\tos.Exit(m.Run())\n}", "func TestMain(m *testing.M) {\n\tos.Exit(m.Run())\n}", "func (s *IntegrationSuite) TestRun(c *C) {\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\t// Execute e2e workflow\n\tlog.Info().Print(\"Running e2e integration test.\", field.M{\"app\": s.name, \"testName\": c.TestName()})\n\n\t// Check config\n\terr := s.app.Init(ctx)\n\tif err != nil {\n\t\tlog.Info().Print(\"Skipping integration test.\", field.M{\"app\": s.name, \"reason\": err.Error()})\n\t\ts.skip = true\n\t\tc.Skip(err.Error())\n\t}\n\n\t// Create namespace\n\terr = createNamespace(s.cli, s.namespace)\n\tc.Assert(err, IsNil)\n\n\t// Create profile\n\tif s.profile == nil {\n\t\tlog.Info().Print(\"Skipping integration test. Could not create profile. Please check if required credentials are set.\", field.M{\"app\": s.name})\n\t\ts.skip = true\n\t\tc.Skip(\"Could not create a Profile\")\n\t}\n\tprofileName := s.createProfile(c, ctx)\n\n\t// Install db\n\terr = s.app.Install(ctx, s.namespace)\n\tc.Assert(err, IsNil)\n\n\t// Check if ready\n\tok, err := s.app.IsReady(ctx)\n\tc.Assert(err, IsNil)\n\tc.Assert(ok, Equals, true)\n\n\t// Create blueprint\n\tbp := s.bp.Blueprint()\n\tc.Assert(bp, NotNil)\n\t_, err = s.crCli.Blueprints(kontroller.namespace).Create(ctx, bp, metav1.CreateOptions{})\n\tc.Assert(err, IsNil)\n\n\tvar configMaps, secrets map[string]crv1alpha1.ObjectReference\n\ttestEntries := 3\n\t// Add test entries to DB\n\tif a, ok := s.app.(app.DatabaseApp); ok {\n\t\t// wait for application to be actually ready\n\t\terr = pingAppAndWait(ctx, a)\n\t\tc.Assert(err, IsNil)\n\n\t\terr = a.Reset(ctx)\n\t\tc.Assert(err, IsNil)\n\n\t\terr = a.Initialize(ctx)\n\t\tc.Assert(err, IsNil)\n\n\t\t// Add few entries\n\t\tfor i := 0; i < testEntries; i++ {\n\t\t\tc.Assert(a.Insert(ctx), IsNil)\n\t\t}\n\n\t\tcount, err := a.Count(ctx)\n\t\tc.Assert(err, IsNil)\n\t\tc.Assert(count, Equals, testEntries)\n\t}\n\n\t// Get Secret and ConfigMap object references\n\tif a, ok := s.app.(app.ConfigApp); ok {\n\t\tconfigMaps = a.ConfigMaps()\n\t\tsecrets = a.Secrets()\n\t}\n\n\t// Validate Blueprint\n\tvalidateBlueprint(c, *bp, configMaps, secrets)\n\n\t// Create ActionSet specs\n\tas := newActionSet(bp.GetName(), profileName, kontroller.namespace, s.app.Object(), configMaps, secrets)\n\t// Take backup\n\tbackup := s.createActionset(ctx, c, as, \"backup\", nil)\n\tc.Assert(len(backup), Not(Equals), 0)\n\n\t// Save timestamp for PITR\n\tvar restoreOptions map[string]string\n\tif b, ok := s.bp.(app.PITRBlueprinter); ok {\n\t\tpitr := b.FormatPITR(time.Now())\n\t\tlog.Info().Print(\"Saving timestamp for PITR\", field.M{\"pitr\": pitr})\n\t\trestoreOptions = map[string]string{\n\t\t\t\"pitr\": pitr,\n\t\t}\n\t\t// Add few more entries with timestamp > pitr\n\t\ttime.Sleep(time.Second)\n\t\tif a, ok := s.app.(app.DatabaseApp); ok {\n\t\t\tc.Assert(a.Insert(ctx), IsNil)\n\t\t\tc.Assert(a.Insert(ctx), IsNil)\n\n\t\t\tcount, err := a.Count(ctx)\n\t\t\tc.Assert(err, IsNil)\n\t\t\tc.Assert(count, Equals, testEntries+2)\n\t\t}\n\t}\n\n\t// Reset DB\n\tif a, ok := s.app.(app.DatabaseApp); ok {\n\t\terr = a.Reset(ctx)\n\t\tc.Assert(err, IsNil)\n\t}\n\n\t// Restore backup\n\tpas, err := s.crCli.ActionSets(kontroller.namespace).Get(ctx, backup, metav1.GetOptions{})\n\tc.Assert(err, IsNil)\n\ts.createActionset(ctx, c, pas, \"restore\", restoreOptions)\n\n\t// Verify data\n\tif a, ok := s.app.(app.DatabaseApp); ok {\n\t\t// wait for application to be actually ready\n\t\terr = pingAppAndWait(ctx, a)\n\t\tc.Assert(err, IsNil)\n\n\t\tcount, err := a.Count(ctx)\n\t\tc.Assert(err, IsNil)\n\t\tc.Assert(count, Equals, testEntries)\n\t}\n\n\t// Delete snapshots\n\ts.createActionset(ctx, c, pas, \"delete\", nil)\n}", "func runTest(ctx context.Context, c autotest.Config, a *autotest.AutoservArgs, w io.Writer) (*Result, error) {\n\tr, err := runTask(ctx, c, a, w)\n\tif !r.Started {\n\t\treturn r, err\n\t}\n\tp := filepath.Join(a.ResultsDir, autoservPidFile)\n\tif i, err2 := readTestsFailed(p); err2 != nil {\n\t\tif err == nil {\n\t\t\terr = err2\n\t\t}\n\t} else {\n\t\tr.TestsFailed = i\n\t}\n\tif err2 := appendJobFinished(a.ResultsDir); err == nil {\n\t\terr = err2\n\t}\n\treturn r, err\n}", "func TestMain(t *testing.T) {\n\tvar ran bool\n\trun = func() {\n\t\tran = true\n\t}\n\tmain()\n\tif !ran {\n\t\tt.Error(\"Expected Run() to be called, but it wasn't\")\n\t}\n}", "func TestMain(t *testing.T) {\n\tvar ran bool\n\trun = func() {\n\t\tran = true\n\t}\n\tmain()\n\tif !ran {\n\t\tt.Error(\"Expected Run() to be called, but it wasn't\")\n\t}\n}", "func (controller TestController) RunTest() (setupErr error, testErr error) {\n\ttests := controller.testSuite.GetTests()\n\tlogrus.Debugf(\"Test configs: %v\", tests)\n\ttest, found := tests[controller.testName]\n\tif !found {\n\t\treturn stacktrace.NewError(\"Nonexistent test: %v\", controller.testName), nil\n\t}\n\n\tnetworkLoader, err := test.GetNetworkLoader()\n\tif err != nil {\n\t\treturn stacktrace.Propagate(err, \"Could not get network loader\"), nil\n\t}\n\n\tlogrus.Info(\"Connecting to Docker environment...\")\n\t// Initialize a Docker client\n\tdockerClient, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())\n\tif err != nil {\n\t\treturn stacktrace.Propagate(err,\"Failed to initialize Docker client from environment.\"), nil\n\t}\n\tdockerManager, err := docker.NewDockerManager(logrus.StandardLogger(), dockerClient)\n\tif err != nil {\n\t\treturn stacktrace.Propagate(err, \"An error occurred when constructing the Docker manager\"), nil\n\t}\n\tlogrus.Info(\"Connected to Docker environment\")\n\n\tlogrus.Infof(\"Configuring test network in Docker network %v...\", controller.networkId)\n\talreadyTakenIps := map[string]bool{\n\t\tcontroller.gatewayIp: true,\n\t\tcontroller.testControllerIp: true,\n\t}\n\tfreeIpTracker, err := networks.NewFreeIpAddrTracker(logrus.StandardLogger(), controller.subnetMask, alreadyTakenIps)\n\tif err != nil {\n\t\treturn stacktrace.Propagate(err, \"An error occurred creating the free IP address tracker\"), nil\n\t}\n\n\tbuilder := networks.NewServiceNetworkBuilder(\n\t\t\tdockerManager,\n\t\t\tcontroller.networkId,\n\t\t\tfreeIpTracker,\n\t\t\tcontroller.testVolumeName,\n\t\t\tcontroller.testVolumeFilepath)\n\tif err := networkLoader.ConfigureNetwork(builder); err != nil {\n\t\treturn stacktrace.Propagate(err, \"Could not configure test network in Docker network %v\", controller.networkId), nil\n\t}\n\tnetwork := builder.Build()\n\tdefer func() {\n\t\tlogrus.Info(\"Stopping test network...\")\n\t\terr := network.RemoveAll(CONTAINER_STOP_TIMEOUT)\n\t\tif err != nil {\n\t\t\tlogrus.Error(\"An error occurred stopping the network\")\n\t\t\tfmt.Fprintln(logrus.StandardLogger().Out, err)\n\t\t} else {\n\t\t\tlogrus.Info(\"Successfully stopped the test network\")\n\t\t}\n\t}()\n\tlogrus.Info(\"Test network configured\")\n\n\tlogrus.Info(\"Initializing test network...\")\n\tavailabilityCheckers, err := networkLoader.InitializeNetwork(network);\n\tif err != nil {\n\t\treturn stacktrace.Propagate(err, \"An error occurred initialized the network to its starting state\"), nil\n\t}\n\tlogrus.Info(\"Test network initialized\")\n\n\t// Second pass: wait for all services to come up\n\tlogrus.Info(\"Waiting for test network to become available...\")\n\tfor serviceId, availabilityChecker := range availabilityCheckers {\n\t\tlogrus.Debugf(\"Waiting for service %v to become available...\", serviceId)\n\t\tif err := availabilityChecker.WaitForStartup(); err != nil {\n\t\t\treturn stacktrace.Propagate(err, \"An error occurred waiting for service with ID %v to start up\", serviceId), nil\n\t\t}\n\t\tlogrus.Debugf(\"Service %v is available\", serviceId)\n\t}\n\tlogrus.Info(\"Test network is available\")\n\n\tlogrus.Info(\"Executing test...\")\n\tuntypedNetwork, err := networkLoader.WrapNetwork(network)\n\tif err != nil {\n\t\treturn stacktrace.Propagate(err, \"Error occurred wrapping network in user-defined network type\"), nil\n\t}\n\n\ttestResultChan := make(chan error)\n\n\tgo func() {\n\t\ttestResultChan <- runTest(test, untypedNetwork)\n\t}()\n\n\t// Time out the test so a poorly-written test doesn't run forever\n\ttestTimeout := test.GetExecutionTimeout()\n\tvar timedOut bool\n\tvar testResultErr error\n\tselect {\n\tcase testResultErr = <- testResultChan:\n\t\tlogrus.Tracef(\"Test returned result before timeout: %v\", testResultErr)\n\t\ttimedOut = false\n\tcase <- time.After(testTimeout):\n\t\tlogrus.Tracef(\"Hit timeout %v before getting a result from the test\", testTimeout)\n\t\ttimedOut = true\n\t}\n\n\tlogrus.Tracef(\"After running test w/timeout: resultErr: %v, timedOut: %v\", testResultErr, timedOut)\n\n\tif timedOut {\n\t\treturn nil, stacktrace.NewError(\"Timed out after %v waiting for test to complete\", testTimeout)\n\t}\n\n\tlogrus.Info(\"Test execution completed\")\n\n\tif testResultErr != nil {\n\t\treturn nil, stacktrace.Propagate(testResultErr, \"An error occurred when running the test\")\n\t}\n\n\treturn nil, nil\n}", "func TestMain(m *testing.M) {\n\tsetup()\n\tcode := m.Run() \n os.Exit(code)\n}", "func (r *runner) UpdateTestRun(ctrl controller.Interface, testRun *v1alpha1.TestRun) error {\n\n\tif testRun.Status.Status == v1alpha1.TestRunComplete {\n\t\tlog.Printf(\" | '%v/%v' is already Complete - Skipping\", testRun.Namespace, testRun.Name)\n\t\treturn nil\n\t}\n\n\tif testRun.Status.Status == \"\" {\n\t\terr := initializeStatus(ctrl, testRun)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tstats := runStats{0, 0}\n\tfor _, record := range testRun.Status.Records {\n\t\tif record.EndTime != nil {\n\t\t\tstats.CompletedCount++\n\t\t\tif record.Result != string(corev1.PodSucceeded) {\n\t\t\t\tstats.FailCount++\n\t\t\t}\n\t\t}\n\t}\n\tif stats.CompletedCount == len(testRun.Status.Records) {\n\t\treturn testRunComplete(ctrl, testRun, stats)\n\t}\n\n\tlog.Printf(\"Running '%v/%v'\", testRun.Namespace, testRun.Name)\n\n\tlog.Printf(\" | %v/%v\", testRun.Namespace, testRun.Name)\n\n\ttests, err := getTestsForTestRun(ctrl, testRun)\n\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error getting list of tests: %s\", err.Error())\n\t}\n\tlog.Printf(\" | Test Count: %v\", len(tests))\n\n\tJobsSlots := getJobSlots(testRun)\n\n\treturn runNextNTests(ctrl, testRun, tests, JobsSlots)\n}", "func TestMain(m *testing.M) {\n\tcode := m.Run()\n\tos.Exit(code)\n}", "func TestMain(m *testing.M) {\n\tcode := m.Run()\n\tos.Exit(code)\n}", "func TestSimpleRun(t *testing.T) {\n\tctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)\n\tdefer cancel()\n\n\tns := \"sonobuoy-\" + strings.ToLower(t.Name())\n\tdefer cleanup(t, ns)\n\n\targs := fmt.Sprintf(\"run --image-pull-policy IfNotPresent --wait -p testImage/yaml/job-junit-passing-singlefile.yaml -n %v\", ns)\n\terr, _, stderr := runSonobuoyCommandWithContext(ctx, t, args)\n\n\tif err != nil {\n\t\tt.Errorf(\"Sonobuoy exited with an error: %q\\n\", err)\n\t\tt.Log(stderr.String())\n\t}\n}", "func TestMain(m *testing.M) {\n\tgin.SetMode(gin.TestMode)\n\tos.Exit(m.Run()) // run the tests, then exit\n}", "func (r FakeTestRunner) RunTest(ctx context.Context, test v1alpha3.TestConfiguration, podSec bool) (result *v1alpha3.TestStatus, err error) {\n\tselect {\n\tcase <-time.After(r.Sleep):\n\t\treturn r.TestStatus, r.Error\n\tcase <-ctx.Done():\n\t\treturn nil, ctx.Err()\n\t}\n}", "func (c *TestController) RunTest(id TestID) error {\n\n\t// Check if the test is already running.\n\tif job := c.lookupJob(id); job != nil {\n\t\treturn fmt.Errorf(\"test %s already running\", job.Name)\n\t}\n\n\t// First we need determine the runtime configuration.\n\t//\n\t// If we find multiple configurations, we don't ask the user to choose,\n\t// but simply use the last candidate. The assumption is: The latest\n\t// configuration has a higher probability to be the complete one.\n\tcandidates := c.suites.Owners(protocol.DocumentURI(id.URI))\n\tif len(candidates) == 0 {\n\t\treturn fmt.Errorf(\"cannot run %s: no configuration found\", id.URI)\n\t}\n\tif len(candidates) > 1 {\n\t\tlog.Printf(\"multiple configurations found for %s: %v\\n\", id.URI, candidates)\n\t}\n\tconfig := candidates[0].Config\n\tlog.Printf(\"using configuration from %s\\n\", config.Root)\n\n\t// TODO(5nord): Use project.ApplyPreset to retrieve the configuration,\n\t// like expected verdict for the job.\n\tjob := &control.Job{\n\t\tName: id.Name,\n\t\tConfig: config,\n\t}\n\n\tc.mu.Lock()\n\tc.running[id] = job\n\tc.mu.Unlock()\n\n\tc.jobs <- job\n\n\treturn nil\n}", "func Test() error {\n\treturn sh.RunV(\"go\", \"test\", \"-v\", \"-cover\", \"./...\", \"-coverprofile=coverage.out\")\n}", "func (runner *suiteRunner) runTest(method *reflect.FuncValue) *C {\n c := runner.forkTest(method)\n <-c.done\n return c\n}", "func TestMain(m *testing.M) {\n\tflag.Parse()\n\t// if *databaseTest {\n\tif isIntegrationTest() {\n\t\tsetupTestDB()\n\t}\n\texitCode := m.Run()\n\tos.Exit(exitCode)\n}", "func runTest(test TestCase) TestResult {\n\t// cut = command under test\n\tcut := cmd.NewCommand(test.Command.Cmd)\n\tcut.SetTimeout(test.Command.Timeout)\n\tcut.Dir = test.Command.Dir\n\tfor k, v := range test.Command.Env {\n\t\tcut.AddEnv(k, v)\n\t}\n\n\tif err := cut.Execute(); err != nil {\n\t\tlog.Println(test.Title, \" failed \", err.Error())\n\t\ttest.Result = CommandResult{\n\t\t\tError: err,\n\t\t}\n\n\t\treturn TestResult{\n\t\t\tTestCase: test,\n\t\t}\n\t}\n\n\tlog.Println(\"title: '\"+test.Title+\"'\", \" Command: \", cut.Cmd)\n\tlog.Println(\"title: '\"+test.Title+\"'\", \" Directory: \", cut.Dir)\n\tlog.Println(\"title: '\"+test.Title+\"'\", \" Env: \", cut.Env)\n\n\t// Write test result\n\ttest.Result = CommandResult{\n\t\tExitCode: cut.ExitCode(),\n\t\tStdout: strings.Replace(cut.Stdout(), \"\\r\\n\", \"\\n\", -1),\n\t\tStderr: strings.Replace(cut.Stderr(), \"\\r\\n\", \"\\n\", -1),\n\t}\n\n\tlog.Println(\"title: '\"+test.Title+\"'\", \" ExitCode: \", test.Result.ExitCode)\n\tlog.Println(\"title: '\"+test.Title+\"'\", \" Stdout: \", test.Result.Stdout)\n\tlog.Println(\"title: '\"+test.Title+\"'\", \" Stderr: \", test.Result.Stderr)\n\n\treturn Validate(test)\n}", "func runScenarioTest(assert *asserts.Asserts, param scenarioParam) {\n\tapplog.Infof(param.String())\n\tmonitoring.Reset()\n\n\t// Prepare test.\n\tdone := make(chan bool)\n\tprovider := config.NewMapConfigurationProvider()\n\tconfig := config.New(provider)\n\n\tconfig.Set(\"backend\", \"single\")\n\n\tassert.Nil(ebus.Init(config), \"single node backend started\")\n\n\tStartShopAgent()\n\tStartWarehouseAgent()\n\tStartManufacturerAgent()\n\tStartDeliveryAgent()\n\tStartWaitAgent(done)\n\n\t// Run orders.\n\tfor on := 0; on < param.Orders; on++ {\n\t\torder := generateOrder(on)\n\t\terr := ebus.Emit(order, \"OrderReceived\")\n\t\tassert.Nil(err, \"order emitted\")\n\t}\n\n\tselect {\n\tcase <-done:\n\t\tapplog.Infof(\"order processing done\")\n\tcase <-time.After(param.Timeout):\n\t\tassert.Fail(\"timeout during wait for processed orders\")\n\t}\n\n\t// Finalize test.\n\terr := ebus.Stop()\n\tassert.Nil(err, \"stopped the bus\")\n\ttime.Sleep(time.Second)\n\tmonitoring.MeasuringPointsPrintAll()\n}", "func (Tests) Run(ctx context.Context) error {\n\targ := BuildDockerComposeArgs(ProjectName, ProjectType, \"test\", DockerComposeTestFile)\n\targ = append(arg, \"run\")\n\targ = append(arg,\n\t\t\"--rm\",\n\t\t\"--use-aliases\",\n\t)\n\targ = append(arg, \"app\", \"go\", \"test\", \"-mod=vendor\", \"-v\", \"-cover\")\n\tif err := Exec(ComposeBin, append(arg, \"./service\")...); err != nil {\n\t\treturn err\n\t}\n\tif err := Exec(ComposeBin, append(arg, \"./...\")...); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (s Suite) Run(t *testing.T) bool {\n\tt.Helper()\n\treturn s(\"\", nil, func(c *config) { c.t = t })\n}", "func Test(t *testing.T, command Runner, testCases []Case) {\n\tt.Helper()\n\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.Name, func(t *testing.T) {\n\t\t\tt.Helper() // TODO: make Helper working for subtests: issue #24128\n\n\t\t\tstdout := &bytes.Buffer{}\n\t\t\tstderr := &bytes.Buffer{}\n\n\t\t\tcommand.SetStdout(stdout)\n\t\t\tcommand.SetStderr(stderr)\n\n\t\t\tm := newMatch(t, tc.wantFail)\n\n\t\t\tif tc.WantFile != \"\" {\n\t\t\t\tif !m.removeFile(tc.WantFile) {\n\t\t\t\t\ttc.WantFile = \"\" // stop testing File match\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tvar gotErr string\n\t\t\tgotPanic := m.run(func() {\n\t\t\t\tif err := command.Run(tc.Args); err != nil {\n\t\t\t\t\tgotErr = err.Error()\n\t\t\t\t}\n\t\t\t})\n\n\t\t\tif tc.WantFile != \"\" {\n\t\t\t\tif gotFile, ext, ok := m.getFile(tc.WantFile); ok {\n\t\t\t\t\tm.match(\"File golden\"+ext, gotFile, \"golden\"+ext)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tm.match(\"WantStdout\", stdout.String(), tc.WantStdout)\n\t\t\tm.match(\"WantStderr\", stderr.String(), tc.WantStderr)\n\t\t\tm.match(\"WantPanic\", gotPanic, tc.WantPanic)\n\t\t\tm.match(\"WantErr\", gotErr, tc.WantErr)\n\t\t\tm.equal(\"WantExitCode\", command.ExitCode(), tc.WantExitCode)\n\n\t\t\tm.done()\n\t\t})\n\t}\n}", "func TestRunTestAllReal(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"Skipping test in short mode.\")\n\t}\n\n\ttaskData := agent.TaskData{\n\t\tStringValues: map[string]string{\n\t\t\tCFG_TEST_TYPE: CFG_TYPE_ALL,\n\t\t\tCFG_SERVER_HOST: \"speedtest.nyc.rr.com:8080\",\n\t\t\tCFG_SERVER_ID: \"16976\",\n\t\t},\n\t\tIntValues: map[string]int{\n\t\t\tCFG_SERVER_ID: 16976,\n\t\t\tCFG_TIME_OUT: 5,\n\t\t},\n\t\tFloatValues: map[string]float64{CFG_MAX_SECONDS: 6},\n\t\tIntSlices: map[string][]int{\n\t\t\tCFG_DOWNLOAD_SIZES: {245388, 505544},\n\t\t\tCFG_UPLOAD_SIZES: {32768, 65536},\n\t\t},\n\t}\n\n\tspdTestRunner := SpeedTestRunner{}\n\n\tspTestResults, err := spdTestRunner.Run(taskData)\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected Error: \\n%s\", err.Error())\n\t}\n\n\tresults := spTestResults.Latency.Seconds()\n\n\tif results <= 0 {\n\t\tt.Errorf(\"Error: Expected a positive Latency result, but got: %f\", results)\n\t} else {\n\t\tfmt.Printf(\"\\nLatency test results for server %d ... %f\\n\", taskData.IntValues[CFG_SERVER_ID], results)\n\t}\n\n\tresults = spTestResults.Download\n\tif results <= 0 {\n\t\tt.Errorf(\"Error: Expected a positive Download result, but got: %f\", results)\n\t} else {\n\t\tfmt.Printf(\"\\nDownload test results for server %d ... %f\\n\", taskData.IntValues[CFG_SERVER_ID], results)\n\t}\n\n\tresults = spTestResults.Upload\n\tif results <= 0 {\n\t\tt.Errorf(\"Error: Expected a positive Upload result, but got: %f\", results)\n\t} else {\n\t\tfmt.Printf(\"\\nUpload test results for server %d ... %f\\n\", taskData.IntValues[CFG_SERVER_ID], results)\n\t}\n}", "func TestExecute(t *testing.T) {\n\tctx := context.Background()\n\n\t// Clear pre-existing golden files to avoid leaving stale ones around.\n\tif *updateGoldens {\n\t\tfiles, err := filepath.Glob(filepath.Join(*goldensDir, \"*.golden.json\"))\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tfor _, f := range files {\n\t\t\tif err := os.Remove(f); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t}\n\t}\n\n\ttestCases := []struct {\n\t\tname string\n\t\tflags testsharderFlags\n\t\ttestSpecs []build.TestSpec\n\t\ttestDurations []build.TestDuration\n\t\ttestList []build.TestListEntry\n\t\tmodifiers []testsharder.TestModifier\n\t\tpackageRepos []build.PackageRepo\n\t\taffectedTests []string\n\t}{\n\t\t{\n\t\t\tname: \"no tests\",\n\t\t},\n\t\t{\n\t\t\tname: \"mixed device types\",\n\t\t\ttestSpecs: []build.TestSpec{\n\t\t\t\tfuchsiaTestSpec(\"foo\"),\n\t\t\t\thostTestSpec(\"bar\"),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"multiply\",\n\t\t\tflags: testsharderFlags{\n\t\t\t\ttargetDurationSecs: 5,\n\t\t\t},\n\t\t\ttestSpecs: []build.TestSpec{\n\t\t\t\tfuchsiaTestSpec(\"foo\"),\n\t\t\t\tfuchsiaTestSpec(\"bar\"),\n\t\t\t},\n\t\t\tmodifiers: []testsharder.TestModifier{\n\t\t\t\t{\n\t\t\t\t\tName: \"foo\",\n\t\t\t\t\tTotalRuns: 50,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName: \"bar\",\n\t\t\t\t},\n\t\t\t},\n\t\t\ttestDurations: []build.TestDuration{\n\t\t\t\t{\n\t\t\t\t\tName: \"*\",\n\t\t\t\t\tMedianDuration: time.Millisecond,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"affected tests\",\n\t\t\ttestSpecs: []build.TestSpec{\n\t\t\t\tfuchsiaTestSpec(\"affected-hermetic\"),\n\t\t\t\tfuchsiaTestSpec(\"not-affected\"),\n\t\t\t},\n\t\t\ttestList: []build.TestListEntry{\n\t\t\t\ttestListEntry(\"affected-hermetic\", true),\n\t\t\t\ttestListEntry(\"not-affected\", false),\n\t\t\t},\n\t\t\taffectedTests: []string{\n\t\t\t\tpackageURL(\"affected-hermetic\"),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"affected nonhermetic tests\",\n\t\t\ttestSpecs: []build.TestSpec{\n\t\t\t\tfuchsiaTestSpec(\"affected-nonhermetic\"),\n\t\t\t\tfuchsiaTestSpec(\"not-affected\"),\n\t\t\t},\n\t\t\taffectedTests: []string{\n\t\t\t\tpackageURL(\"affected-nonhermetic\"),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"target test count\",\n\t\t\tflags: testsharderFlags{\n\t\t\t\ttargetTestCount: 2,\n\t\t\t},\n\t\t\ttestSpecs: []build.TestSpec{\n\t\t\t\tfuchsiaTestSpec(\"foo1\"),\n\t\t\t\tfuchsiaTestSpec(\"foo2\"),\n\t\t\t\tfuchsiaTestSpec(\"foo3\"),\n\t\t\t\tfuchsiaTestSpec(\"foo4\"),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"sharding by time\",\n\t\t\tflags: testsharderFlags{\n\t\t\t\ttargetDurationSecs: int((4 * time.Minute).Seconds()),\n\t\t\t\tperTestTimeoutSecs: int((10 * time.Minute).Seconds()),\n\t\t\t},\n\t\t\ttestSpecs: []build.TestSpec{\n\t\t\t\tfuchsiaTestSpec(\"slow\"),\n\t\t\t\tfuchsiaTestSpec(\"fast1\"),\n\t\t\t\tfuchsiaTestSpec(\"fast2\"),\n\t\t\t\tfuchsiaTestSpec(\"fast3\"),\n\t\t\t},\n\t\t\ttestDurations: []build.TestDuration{\n\t\t\t\t{\n\t\t\t\t\tName: \"*\",\n\t\t\t\t\tMedianDuration: 2 * time.Second,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName: packageURL(\"slow\"),\n\t\t\t\t\tMedianDuration: 5 * time.Minute,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"max shards per env\",\n\t\t\tflags: testsharderFlags{\n\t\t\t\t// Given expected test durations of 4 minutes for each test it's\n\t\t\t\t// impossible to satisfy both the target shard duration and the\n\t\t\t\t// max shards per environment, so the target shard duration\n\t\t\t\t// should effectively be ignored.\n\t\t\t\ttargetDurationSecs: int((5 * time.Minute).Seconds()),\n\t\t\t\tmaxShardsPerEnvironment: 2,\n\t\t\t\tskipUnaffected: true,\n\t\t\t},\n\t\t\ttestSpecs: []build.TestSpec{\n\t\t\t\tfuchsiaTestSpec(\"affected1\"),\n\t\t\t\tfuchsiaTestSpec(\"affected2\"),\n\t\t\t\tfuchsiaTestSpec(\"affected3\"),\n\t\t\t\tfuchsiaTestSpec(\"affected4\"),\n\t\t\t\tfuchsiaTestSpec(\"unaffected1\"),\n\t\t\t\tfuchsiaTestSpec(\"unaffected2\"),\n\t\t\t\tfuchsiaTestSpec(\"nonhermetic1\"),\n\t\t\t\tfuchsiaTestSpec(\"nonhermetic2\"),\n\t\t\t},\n\t\t\ttestDurations: []build.TestDuration{\n\t\t\t\t{\n\t\t\t\t\tName: \"*\",\n\t\t\t\t\tMedianDuration: 4 * time.Minute,\n\t\t\t\t},\n\t\t\t},\n\t\t\taffectedTests: []string{\n\t\t\t\tpackageURL(\"affected1\"),\n\t\t\t\tpackageURL(\"affected2\"),\n\t\t\t\tpackageURL(\"affected3\"),\n\t\t\t\tpackageURL(\"affected4\"),\n\t\t\t},\n\t\t\ttestList: []build.TestListEntry{\n\t\t\t\ttestListEntry(\"affected1\", true),\n\t\t\t\ttestListEntry(\"affected2\", true),\n\t\t\t\ttestListEntry(\"affected3\", true),\n\t\t\t\ttestListEntry(\"affected4\", true),\n\t\t\t\ttestListEntry(\"unaffected1\", true),\n\t\t\t\ttestListEntry(\"unaffected2\", true),\n\t\t\t\ttestListEntry(\"nonhermetic1\", false),\n\t\t\t\ttestListEntry(\"nonhermetic2\", false),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"hermetic deps\",\n\t\t\tflags: testsharderFlags{\n\t\t\t\thermeticDeps: true,\n\t\t\t},\n\t\t\ttestSpecs: []build.TestSpec{\n\t\t\t\tfuchsiaTestSpec(\"foo\"),\n\t\t\t\tfuchsiaTestSpec(\"bar\"),\n\t\t\t\tfuchsiaTestSpec(\"baz\"),\n\t\t\t},\n\t\t\tpackageRepos: []build.PackageRepo{\n\t\t\t\t{\n\t\t\t\t\tPath: \"pkg_repo1\",\n\t\t\t\t\tBlobs: filepath.Join(\"pkg_repo1\", \"blobs\"),\n\t\t\t\t\tTargets: filepath.Join(\"pkg_repo1\", \"targets.json\"),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"ffx deps\",\n\t\t\tflags: testsharderFlags{\n\t\t\t\tffxDeps: true,\n\t\t\t},\n\t\t\ttestSpecs: []build.TestSpec{\n\t\t\t\tfuchsiaTestSpec(\"foo\"),\n\t\t\t\tfuchsiaTestSpec(\"bar\"),\n\t\t\t\tfuchsiaTestSpec(\"baz\"),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"multiply affected test\",\n\t\t\tflags: testsharderFlags{\n\t\t\t\taffectedTestsMultiplyThreshold: 3,\n\t\t\t\ttargetDurationSecs: int(2 * time.Minute.Seconds()),\n\t\t\t},\n\t\t\ttestSpecs: []build.TestSpec{\n\t\t\t\tfuchsiaTestSpec(\"multiplied-affected-test\"),\n\t\t\t\tfuchsiaTestSpec(\"affected-test\"),\n\t\t\t\tfuchsiaTestSpec(\"unaffected-test\"),\n\t\t\t},\n\t\t\ttestDurations: []build.TestDuration{\n\t\t\t\t{\n\t\t\t\t\tName: \"*\",\n\t\t\t\t\tMedianDuration: time.Second,\n\t\t\t\t},\n\t\t\t},\n\t\t\taffectedTests: []string{\n\t\t\t\tpackageURL(\"multiplied-affected-test\"),\n\t\t\t\tpackageURL(\"affected-test\"),\n\t\t\t},\n\t\t\tmodifiers: []testsharder.TestModifier{\n\t\t\t\t{\n\t\t\t\t\tName: \"multiplied-affected-test\",\n\t\t\t\t\tTotalRuns: 100,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"test list with tags\",\n\t\t\ttestSpecs: []build.TestSpec{\n\t\t\t\tfuchsiaTestSpec(\"hermetic-test\"),\n\t\t\t\tfuchsiaTestSpec(\"nonhermetic-test\"),\n\t\t\t},\n\t\t\ttestList: []build.TestListEntry{\n\t\t\t\ttestListEntry(\"hermetic-test\", true),\n\t\t\t\ttestListEntry(\"nonhermetic-test\", false),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"skip unaffected tests\",\n\t\t\tflags: testsharderFlags{\n\t\t\t\tskipUnaffected: true,\n\t\t\t},\n\t\t\ttestSpecs: []build.TestSpec{\n\t\t\t\tfuchsiaTestSpec(\"affected-hermetic-test\"),\n\t\t\t\tfuchsiaTestSpec(\"unaffected-hermetic-test\"),\n\t\t\t\tfuchsiaTestSpec(\"affected-nonhermetic-test\"),\n\t\t\t\tfuchsiaTestSpec(\"unaffected-nonhermetic-test\"),\n\t\t\t},\n\t\t\ttestList: []build.TestListEntry{\n\t\t\t\ttestListEntry(\"affected-hermetic-test\", true),\n\t\t\t\ttestListEntry(\"unaffected-hermetic-test\", true),\n\t\t\t\ttestListEntry(\"affected-nonhermetic-test\", false),\n\t\t\t\ttestListEntry(\"unaffected-nonhermetic-test\", false),\n\t\t\t},\n\t\t\taffectedTests: []string{\n\t\t\t\tfuchsiaTestSpec(\"affected-hermetic-test\").Name,\n\t\t\t\tfuchsiaTestSpec(\"affected-nonhermetic-test\").Name,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"run all tests if no affected tests\",\n\t\t\tflags: testsharderFlags{\n\t\t\t\tskipUnaffected: true,\n\t\t\t},\n\t\t\ttestSpecs: []build.TestSpec{\n\t\t\t\tfuchsiaTestSpec(\"affected-hermetic-test\"),\n\t\t\t\tfuchsiaTestSpec(\"unaffected-hermetic-test\"),\n\t\t\t\tfuchsiaTestSpec(\"affected-nonhermetic-test\"),\n\t\t\t\tfuchsiaTestSpec(\"unaffected-nonhermetic-test\"),\n\t\t\t},\n\t\t\ttestList: []build.TestListEntry{\n\t\t\t\ttestListEntry(\"affected-hermetic-test\", true),\n\t\t\t\ttestListEntry(\"unaffected-hermetic-test\", true),\n\t\t\t\ttestListEntry(\"affected-nonhermetic-test\", false),\n\t\t\t\ttestListEntry(\"unaffected-nonhermetic-test\", false),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"multiply unaffected hermetic tests\",\n\t\t\tflags: testsharderFlags{\n\t\t\t\tskipUnaffected: true,\n\t\t\t},\n\t\t\ttestSpecs: []build.TestSpec{\n\t\t\t\tfuchsiaTestSpec(\"unaffected-hermetic-test\"),\n\t\t\t\tfuchsiaTestSpec(\"affected-nonhermetic-test\"),\n\t\t\t\tfuchsiaTestSpec(\"unaffected-hermetic-multiplied-test\"),\n\t\t\t},\n\t\t\ttestList: []build.TestListEntry{\n\t\t\t\ttestListEntry(\"unaffected-hermetic-test\", true),\n\t\t\t\ttestListEntry(\"affected-nonhermetic-test\", false),\n\t\t\t\ttestListEntry(\"unaffected-hermetic-multiplied-test\", true),\n\t\t\t},\n\t\t\taffectedTests: []string{\n\t\t\t\tfuchsiaTestSpec(\"affected-nonhermetic-test\").Name,\n\t\t\t},\n\t\t\tmodifiers: []testsharder.TestModifier{\n\t\t\t\t{\n\t\t\t\t\tName: \"unaffected-hermetic-multiplied-test\",\n\t\t\t\t\tTotalRuns: 100,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"various modifiers\",\n\t\t\tflags: testsharderFlags{\n\t\t\t\ttargetDurationSecs: 5,\n\t\t\t},\n\t\t\ttestSpecs: []build.TestSpec{\n\t\t\t\tfuchsiaTestSpec(\"foo\"),\n\t\t\t\tfuchsiaTestSpec(\"bar\"),\n\t\t\t\tfuchsiaTestSpec(\"baz\"),\n\t\t\t},\n\t\t\tmodifiers: []testsharder.TestModifier{\n\t\t\t\t// default modifier\n\t\t\t\t{\n\t\t\t\t\tName: \"*\",\n\t\t\t\t\tTotalRuns: -1,\n\t\t\t\t\tMaxAttempts: 2,\n\t\t\t\t},\n\t\t\t\t// multiplier\n\t\t\t\t{\n\t\t\t\t\tName: \"foo\",\n\t\t\t\t\tMaxAttempts: 1,\n\t\t\t\t},\n\t\t\t\t// change maxAttempts (but multiplier takes precedence)\n\t\t\t\t{\n\t\t\t\t\tName: \"foo\",\n\t\t\t\t\tTotalRuns: -1,\n\t\t\t\t\tMaxAttempts: 1,\n\t\t\t\t},\n\t\t\t\t// change maxAttempts, set affected\n\t\t\t\t{\n\t\t\t\t\tName: \"bar\",\n\t\t\t\t\tAffected: true,\n\t\t\t\t\tTotalRuns: -1,\n\t\t\t\t\tMaxAttempts: 1,\n\t\t\t\t},\n\t\t\t},\n\t\t\ttestList: []build.TestListEntry{\n\t\t\t\ttestListEntry(\"foo\", false),\n\t\t\t\ttestListEntry(\"bar\", true),\n\t\t\t\ttestListEntry(\"baz\", false),\n\t\t\t},\n\t\t\ttestDurations: []build.TestDuration{\n\t\t\t\t{\n\t\t\t\t\tName: \"*\",\n\t\t\t\t\tMedianDuration: time.Millisecond,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tgoldenBasename := strings.ReplaceAll(tc.name, \" \", \"_\") + \".golden.json\"\n\t\t\tgoldenFile := filepath.Join(*goldensDir, goldenBasename)\n\n\t\t\tif *updateGoldens {\n\t\t\t\ttc.flags.outputFile = goldenFile\n\t\t\t} else {\n\t\t\t\ttc.flags.outputFile = filepath.Join(t.TempDir(), goldenBasename)\n\t\t\t}\n\n\t\t\ttc.flags.buildDir = t.TempDir()\n\t\t\tif len(tc.modifiers) > 0 {\n\t\t\t\ttc.flags.modifiersPath = writeTempJSONFile(t, tc.modifiers)\n\t\t\t}\n\t\t\tif len(tc.affectedTests) > 0 {\n\t\t\t\t// Add a newline to the end of the file to test that it still calculates the\n\t\t\t\t// correct number of affected tests even with extra whitespace.\n\t\t\t\ttc.flags.affectedTestsPath = writeTempFile(t, strings.Join(tc.affectedTests, \"\\n\")+\"\\n\")\n\t\t\t}\n\t\t\tif tc.flags.ffxDeps {\n\t\t\t\tsdkManifest := map[string]interface{}{\n\t\t\t\t\t\"atoms\": []interface{}{},\n\t\t\t\t}\n\t\t\t\tsdkManifestPath := filepath.Join(tc.flags.buildDir, \"sdk\", \"manifest\", \"core\")\n\t\t\t\tif err := os.MkdirAll(filepath.Dir(sdkManifestPath), os.ModePerm); err != nil {\n\t\t\t\t\tt.Fatal(err)\n\t\t\t\t}\n\t\t\t\tif err := jsonutil.WriteToFile(sdkManifestPath, sdkManifest); err != nil {\n\t\t\t\t\tt.Fatal(err)\n\t\t\t\t}\n\t\t\t}\n\t\t\t// Write test-list.json.\n\t\t\tif err := jsonutil.WriteToFile(\n\t\t\t\tfilepath.Join(tc.flags.buildDir, testListPath),\n\t\t\t\tbuild.TestList{Data: tc.testList, SchemaID: \"experimental\"},\n\t\t\t); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\twriteDepFiles(t, tc.flags.buildDir, tc.testSpecs)\n\t\t\tfor _, repo := range tc.packageRepos {\n\t\t\t\tif err := os.MkdirAll(filepath.Join(tc.flags.buildDir, repo.Path), 0o700); err != nil {\n\t\t\t\t\tt.Fatal(err)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tm := &fakeModules{\n\t\t\t\ttestSpecs: tc.testSpecs,\n\t\t\t\ttestDurations: tc.testDurations,\n\t\t\t\tpackageRepositories: tc.packageRepos,\n\t\t\t}\n\t\t\tif err := execute(ctx, tc.flags, m); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\n\t\t\tif !*updateGoldens {\n\t\t\t\twant := readShards(t, goldenFile)\n\t\t\t\tgot := readShards(t, tc.flags.outputFile)\n\t\t\t\tif diff := cmp.Diff(want, got); diff != \"\" {\n\t\t\t\t\tt.Errorf(strings.Join([]string{\n\t\t\t\t\t\t\"Golden file mismatch!\",\n\t\t\t\t\t\t\"To fix, run `tools/integration/testsharder/update_goldens.sh\",\n\t\t\t\t\t\tdiff,\n\t\t\t\t\t}, \"\\n\"))\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}", "func TestMain(t *testing.T) { TestingT(t) }", "func TestMain(m *testing.M) {\n\ttestsuite.RevelTestHelper(m, \"dev\", run.Run)\n}", "func TestMain(m *testing.M) {\n\tDropTestData(0)\n\tanswer := m.Run()\n\tDropTestData(0)\n\tos.Exit(answer)\n}", "func TestMain(m *testing.M) {\n\t// Code here runs before testing starts\n\tmux = GetMux()\n\t// Run tests\n\texitCode := m.Run()\n\t// Code here runs after testing finishes\n\tos.Exit(exitCode)\n}", "func TestMain(m *testing.M) {\n\n\t// Run Setup\n\tSetup()\n\n\t// Run all the tests\n\treturnCode := m.Run()\n\n\t// Run teardown\n\tTearDown()\n\n\t// Pass on the exit codes\n\tos.Exit(returnCode)\n}", "func TestMain(m *testing.M) {\n\tframework.Run(\"pilot_test\", m)\n}", "func TestMain(m *testing.M) {\n\tflag.Parse()\n\n\tresult := m.Run()\n\n\tos.Exit(result)\n}", "func RunTest(ctx context.Context, fn testFuncType, isGuest bool) error {\n\t// We lose connectivity along the way here, and if that races with the\n\t// recover_duts network-recovery hooks, it may interrupt us.\n\tunlock, err := network.LockCheckNetworkHook(ctx)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed locking the check network hook\")\n\t}\n\tdefer unlock()\n\n\tvar env TestEnv\n\n\tdefer tearDown(ctx, &env)\n\n\tif err := setUp(ctx, &env, isGuest); err != nil {\n\t\treturn errors.Wrap(err, \"failed starting the test\")\n\t}\n\n\treturn fn(ctx, &env)\n}", "func TestMain(m *testing.M) {\n\trt, _ := framework.Run(\"echo_test\", m)\n\tos.Exit(rt)\n}", "func TestMain(m *testing.M) {\n\tfstest.TestMain(m)\n}", "func TestMain(m *testing.M) {\n\tfstest.TestMain(m)\n}", "func TestMain(m *testing.M) {\n\tfstest.TestMain(m)\n}", "func (suite *AddCommandTestSuite) TestExecuteWithMultipleURLs() {\n\n}", "func TestMain(m *testing.M) {\n\t_Init();\n\tresult := m.Run();\n\t_TearDown();\n\tos.Exit(result);\n}", "func TestMain(m *testing.M) {\n\tsetUp()\n\tretCode := m.Run()\n\ttearDown()\n\tos.Exit(retCode)\n}", "func (sfs *SuiteFS) RunTests(t *testing.T, userName string, stFuncs ...SuiteTestFunc) {\n\tvfs := sfs.vfsSetup\n\n\t_, _ = sfs.User(t, userName)\n\tdefer sfs.User(t, sfs.initUser.Name())\n\n\tfor _, stFunc := range stFuncs {\n\t\tfuncName := runtime.FuncForPC(reflect.ValueOf(stFunc).Pointer()).Name()\n\t\tfuncName = funcName[strings.LastIndex(funcName, \".\")+1 : strings.LastIndex(funcName, \"-\")]\n\t\ttestDir := vfs.Join(sfs.rootDir, funcName)\n\n\t\tsfs.CreateTestDir(t, testDir)\n\n\t\tt.Run(funcName, func(t *testing.T) {\n\t\t\tstFunc(t, testDir)\n\t\t})\n\n\t\tsfs.RemoveTestDir(t, testDir)\n\t}\n}", "func runTestMain(m *testing.M) int {\n\tisLess, err := test_helpers.IsTarantoolVersionLess(2, 2, 0)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to extract Tarantool version: %s\", err)\n\t}\n\n\tif isLess {\n\t\tlog.Println(\"Skipping decimal tests...\")\n\t\tisDecimalSupported = false\n\t\treturn m.Run()\n\t} else {\n\t\tisDecimalSupported = true\n\t}\n\n\tinstance, err := test_helpers.StartTarantool(test_helpers.StartOpts{\n\t\tInitScript: \"config.lua\",\n\t\tListen: server,\n\t\tUser: opts.User,\n\t\tPass: opts.Pass,\n\t\tWaitStart: 100 * time.Millisecond,\n\t\tConnectRetry: 10,\n\t\tRetryTimeout: 500 * time.Millisecond,\n\t})\n\tdefer test_helpers.StopTarantoolWithCleanup(instance)\n\n\tif err != nil {\n\t\tlog.Printf(\"Failed to prepare test Tarantool: %s\", err)\n\t\treturn 1\n\t}\n\n\treturn m.Run()\n}", "func (o Scorecard) RunTests(ctx context.Context) (testOutput v1alpha3.Test, err error) {\n\n\terr = o.TestRunner.Initialize(ctx)\n\tif err != nil {\n\t\treturn testOutput, err\n\t}\n\n\ttests := o.selectTests()\n\tif len(tests) == 0 {\n\t\treturn testOutput, nil\n\t}\n\n\tfor _, test := range tests {\n\t\tresult, err := o.TestRunner.RunTest(ctx, test)\n\t\tif err != nil {\n\t\t\tresult = convertErrorToStatus(test.Name, err)\n\t\t}\n\t\ttestOutput.Status.Results = append(testOutput.Status.Results, result.Results...)\n\t}\n\n\tif !o.SkipCleanup {\n\t\terr = o.TestRunner.Cleanup(ctx)\n\t\tif err != nil {\n\t\t\treturn testOutput, err\n\t\t}\n\t}\n\treturn testOutput, nil\n}", "func RunTests(m *testing.M, version *int) {\n\tflag.IntVar(version, \"v\", 0, \"The anwork version that should be used with these tests\")\n\tflag.Parse()\n\n\tif *version == 0 {\n\t\tpanic(\"Version (-v) must be passed with a legitimate anwork version number\")\n\t}\n\n\tos.Exit(m.Run())\n}", "func (s *Service) RunTest(ctx context.Context, req *conformance.Request) (*conformance.Response, error) {\n\tvar config test_gen.ServiceMesh\n\n\tconfig = linkerdConfig\n\tswitch req.Mesh.Type {\n\tcase smp.ServiceMesh_LINKERD:\n\t\tconfig = linkerdConfig\n\t\treq.Mesh.Annotations[\"linkerd.io/inject\"] = \"enabled\"\n\tcase smp.ServiceMesh_APP_MESH:\n\t\tconfig = linkerdConfig\n\t\treq.Mesh.Labels[\"appmesh.k8s.aws/sidecarInjectorWebhook\"] = \"enabled\"\n\tcase smp.ServiceMesh_MAESH:\n\t\tconfig = maeshConfig\n\tcase smp.ServiceMesh_ISTIO:\n\t\tconfig = istioConfig\n\t\treq.Mesh.Labels[\"istio-injection\"] = \"enabled\"\n\tcase smp.ServiceMesh_OPEN_SERVICE_MESH:\n\t\tconfig = osmConfig\n\t\treq.Mesh.Labels[\"openservicemesh.io/monitored-by\"] = \"osm\"\n\tcase smp.ServiceMesh_KUMA:\n\t\treq.Mesh.Annotations[\"kuma.io/sidecar-injection\"] = \"enabled\"\n\tcase smp.ServiceMesh_NGINX_SERVICE_MESH:\n\t\treq.Mesh.Annotations[\"njector.nsm.nginx.com/auto-inject\"] = \"true\"\n\n\t}\n\n\tresult := test_gen.RunTest(config, req.Mesh.Annotations, req.Mesh.Labels)\n\ttotalSteps := 24\n\ttotalFailures := 0\n\tstepsCount := map[string]int{\n\t\t\"traffic-access\": 7,\n\t\t\"traffic-split\": 11,\n\t\t\"traffic-spec\": 6,\n\t}\n\tspecVersion := map[string]string{\n\t\t\"traffic-access\": \"v0.6.0/v1alpha3\",\n\t\t\"traffic-split\": \"v0.6.0/v1alpha4\",\n\t\t\"traffic-spec\": \"v0.6.0/v1alpha4\",\n\t}\n\n\tdetails := make([]*conformance.Detail, 0)\n\tfor _, res := range result.Testsuite[0].Testcase {\n\t\td := &conformance.Detail{\n\t\t\tSmispec: res.Name,\n\t\t\tSpecversion: specVersion[res.Name],\n\t\t\tAssertion: strconv.Itoa(stepsCount[res.Name]),\n\t\t\tDuration: res.Time,\n\t\t\tCapability: conformance.Capability_FULL,\n\t\t\tStatus: conformance.ResultStatus_PASSED,\n\t\t\tResult: &conformance.Result{\n\t\t\t\tResult: &conformance.Result_Message{\n\t\t\t\t\tMessage: \"All test passed\",\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\tif len(res.Failure.Text) > 2 {\n\t\t\td.Result = &conformance.Result{\n\t\t\t\tResult: &conformance.Result_Error{\n\t\t\t\t\tError: &service.CommonError{\n\t\t\t\t\t\tCode: \"\",\n\t\t\t\t\t\tSeverity: \"\",\n\t\t\t\t\t\tShortDescription: res.Failure.Text,\n\t\t\t\t\t\tLongDescription: res.Failure.Message,\n\t\t\t\t\t\tProbableCause: \"\",\n\t\t\t\t\t\tSuggestedRemediation: \"\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\t\t\td.Status = conformance.ResultStatus_FAILED\n\t\t\td.Capability = conformance.Capability_NONE\n\n\t\t\t// A hacky way to see the testStep Failed, since KUDO only provides it in Failure.Message\n\t\t\tre := regexp.MustCompile(`[0-9]+`)\n\t\t\tif res.Failure.Message != \"\" {\n\t\t\t\tstepFailed := re.FindAllString(res.Failure.Message, 1)\n\t\t\t\tif len(stepFailed) != 0 {\n\t\t\t\t\tpassed, _ := strconv.Atoi(stepFailed[0])\n\t\t\t\t\tpassed = passed - 1\n\t\t\t\t\tfailures := stepsCount[res.Name] - passed\n\t\t\t\t\ttotalFailures += failures\n\t\t\t\t\tif (passed) >= (stepsCount[res.Name] / 2) {\n\t\t\t\t\t\td.Capability = conformance.Capability_HALF\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tdetails = append(details, d)\n\t}\n\n\treturn &conformance.Response{\n\t\tCasespassed: strconv.Itoa(totalSteps - totalFailures),\n\t\tPasspercent: strconv.FormatFloat(float64(totalSteps-totalFailures)/float64(totalSteps)*100, 'f', 2, 64),\n\t\tMesh: req.Mesh,\n\t\tDetails: details,\n\t}, nil\n}", "func TestMain(m *testing.M) {\n\tos.Exit(testscript.RunMain(m, map[string]func() int{\n\t\t\"main\": main1,\n\t}))\n}", "func runTest(m *testing.M) int {\n\t// In order to get a Mongo session we need the name of the database we\n\t// are using. The web framework middleware is using this by convention.\n\tdbName, err := cfg.String(\"MONGO_DB\")\n\tif err != nil {\n\t\tfmt.Println(\"MongoDB is not configured\")\n\t\treturn 1\n\t}\n\n\tdb, err := db.NewMGO(\"context\", dbName)\n\tif err != nil {\n\t\tfmt.Println(\"Unable to get Mongo session\")\n\t\treturn 1\n\t}\n\n\tdefer db.CloseMGO(\"context\")\n\n\ttstdata.Generate(db)\n\tdefer tstdata.Drop(db)\n\n\tloadQuery(db, \"basic.json\")\n\tloadQuery(db, \"basic_var.json\")\n\tdefer qfix.Remove(db, \"QTEST_O\")\n\n\tloadScript(db, \"basic_script_pre.json\")\n\tloadScript(db, \"basic_script_pst.json\")\n\tdefer sfix.Remove(db, \"STEST_O\")\n\n\tloadMasks(db, \"basic.json\")\n\tdefer mfix.Remove(db, \"test_xenia_data\")\n\n\treturn m.Run()\n}", "func (f *VRFTest) Run() error {\n\tif err := f.createChainlinkJobs(); err != nil {\n\t\treturn err\n\t}\n\tvar ctx context.Context\n\tvar testCtxCancel context.CancelFunc\n\tif f.TestOptions.TestDuration.Seconds() > 0 {\n\t\tctx, testCtxCancel = context.WithTimeout(context.Background(), f.TestOptions.TestDuration)\n\t} else {\n\t\tctx, testCtxCancel = context.WithCancel(context.Background())\n\t}\n\tdefer testCtxCancel()\n\tcancelPerfEvents := f.watchPerfEvents()\n\tcurrentRound := 0\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\tlog.Info().Msg(\"Test finished\")\n\t\t\ttime.Sleep(f.TestOptions.GracefulStopDuration)\n\t\t\tcancelPerfEvents()\n\t\t\treturn nil\n\t\tdefault:\n\t\t\tlog.Info().Int(\"RoundID\", currentRound).Msg(\"New round\")\n\t\t\tif err := f.requestRandomness(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err := f.waitRoundFulfilled(currentRound + 1); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif f.TestOptions.NumberOfRounds != 0 && currentRound >= f.TestOptions.NumberOfRounds {\n\t\t\t\tlog.Info().Msg(\"Final round is reached\")\n\t\t\t\ttestCtxCancel()\n\t\t\t}\n\t\t\tcurrentRound++\n\t\t}\n\t}\n}", "func TestRun(t *testing.T) {\n\tsandbox, cleanup := cmdtest.TestSetupWithSandbox(t, false)\n\tdefer cleanup()\n\n\t// first add the test repo index\n\t_, err := cmdtest.AddLocalRepo(sandbox, \"LocalTestRepo\", filepath.Join(sandbox.TestDataPath, \"dev.local-index.yaml\"))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tstacksList := cmdtest.GetEnvStacksList()\n\n\tif stacksList == \"dev.local/starter\" {\n\t\tt.Skip()\n\t}\n\n\t// appsody init nodejs-express\n\t_, err = cmdtest.RunAppsody(sandbox, \"init\", \"nodejs-express\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// appsody run\n\trunChannel := make(chan error)\n\tgo func() {\n\t\t_, err = cmdtest.RunAppsody(sandbox, \"run\")\n\t\trunChannel <- err\n\t\tclose(runChannel)\n\t}()\n\n\t// defer the appsody stop to close the docker container\n\tdefer func() {\n\t\t_, err = cmdtest.RunAppsody(sandbox, \"stop\")\n\t\tif err != nil {\n\t\t\tt.Logf(\"Ignoring error running appsody stop: %s\", err)\n\t\t}\n\t\t// wait for the appsody command/goroutine to finish\n\t\trunErr := <-runChannel\n\t\tif runErr != nil {\n\t\t\tt.Logf(\"Ignoring error from the appsody command: %s\", runErr)\n\t\t}\n\t}()\n\n\thealthCheckFrequency := 2 // in seconds\n\thealthCheckTimeout := 60 // in seconds\n\thealthCheckWait := 0\n\thealthCheckOK := false\n\tfor !(healthCheckOK || healthCheckWait >= healthCheckTimeout) {\n\t\tselect {\n\t\tcase err = <-runChannel:\n\t\t\t// appsody run exited, probably with an error\n\t\t\tt.Fatalf(\"appsody run quit unexpectedly: %s\", err)\n\t\tcase <-time.After(time.Duration(healthCheckFrequency) * time.Second):\n\t\t\t// check the health endpoint\n\t\t\thealthCheckWait += healthCheckFrequency\n\t\t\tresp, err := http.Get(\"http://localhost:3000/health\")\n\t\t\tif err != nil {\n\t\t\t\tt.Logf(\"Health check error. Ignore and retry: %s\", err)\n\t\t\t} else {\n\t\t\t\tresp.Body.Close()\n\t\t\t\tif resp.StatusCode != 200 {\n\t\t\t\t\tt.Logf(\"Health check response code %d. Ignore and retry.\", resp.StatusCode)\n\t\t\t\t} else {\n\t\t\t\t\tt.Logf(\"Health check OK\")\n\t\t\t\t\t// may want to check body\n\t\t\t\t\thealthCheckOK = true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tif !healthCheckOK {\n\t\tt.Errorf(\"Did not receive an OK health check within %d seconds.\", healthCheckTimeout)\n\t}\n}", "func (t *SelfTester) RunSelfTest() ([]string, []string, map[string]*serializers.EventSerializer, error) {\n\tif err := t.BeginWaitingForEvent(); err != nil {\n\t\treturn nil, nil, nil, fmt.Errorf(\"failed to run self test: %w\", err)\n\t}\n\tdefer t.EndWaitingForEvent()\n\n\tt.lastTimestamp = time.Now()\n\n\t// launch the self tests\n\tvar success []string\n\tvar fails []string\n\ttestEvents := make(map[string]*serializers.EventSerializer)\n\n\tfor _, selftest := range FileSelfTests {\n\t\tdef := selftest.GetRuleDefinition(t.targetFilePath)\n\n\t\tpredicate, err := selftest.GenerateEvent(t.targetFilePath)\n\t\tif err != nil {\n\t\t\tfails = append(fails, def.ID)\n\t\t\tlog.Errorf(\"Self test failed: %s\", def.ID)\n\t\t\tcontinue\n\t\t}\n\t\tevent, err2 := t.expectEvent(predicate)\n\t\ttestEvents[def.ID] = event\n\t\tif err2 != nil {\n\t\t\tfails = append(fails, def.ID)\n\t\t\tlog.Errorf(\"Self test failed: %s\", def.ID)\n\t\t} else {\n\t\t\tsuccess = append(success, def.ID)\n\t\t}\n\t}\n\n\t// save the results for get status command\n\tt.success = success\n\tt.fails = fails\n\n\treturn success, fails, testEvents, nil\n}", "func (suite *AddCommandTestSuite) TestExecuteWhenTrackFound() {\n\n}", "func Run(name string, t testing.TB, f func(testing.TB)) {\n\tif tt, ok := t.(*testing.T); ok {\n\t\ttt.Run(name, func(ttt *testing.T) { f(ttt) })\n\t\treturn\n\t}\n\tif tb, ok := t.(*testing.B); ok {\n\t\ttb.Run(name, func(ttb *testing.B) { f(ttb) })\n\t\treturn\n\t}\n\tt.Error(\"invalid test harness\")\n\tt.FailNow()\n}", "func RunSubtests(ctx *Context) {\n\tfor name, fn := range tests {\n\t\tctx.Run(name, fn)\n\t}\n}", "func RunSingleFixtureTest(file string, t *testing.T) {\n\tvar fixtureSteps []FixtureStep\n\tbyteValue := ReadFile(file, t)\n\n\terr := json.Unmarshal([]byte(byteValue), &fixtureSteps)\n\tt.WithFields(testing.Fields{\n\t\t\"raw_json\": string(byteValue),\n\t}).MustNil(err, \"error decoding fixture steps\")\n\n\tt.Run(file, func(t *testing.T) {\n\t\tif FixtureTestOpts.IsParallel {\n\t\t\tt.Parallel()\n\t\t}\n\n\t\tfor idx := range fixtureSteps {\n\t\t\tUpdateWorkQueueStatus(file, idx, fixtureSteps, InProgress, t)\n\t\t}\n\t})\n}", "func TestMain(m *testing.M) {\n\tlog.SetOutput(os.Stdout)\n\tos.Exit(m.Run())\n}", "func TestMain(m *testing.M) {\n\tlog.SetOutput(os.Stdout)\n\tos.Exit(m.Run())\n}", "func TestMain(m *testing.M) {\n\trest.StartMockupServer()\n\tos.Exit(m.Run())\n}", "func TestMain(m *testing.M) {\n\tflag.Parse()\n\tos.Exit(m.Run())\n}", "func (g Go) Test(ctx context.Context) error {\n\tmg.CtxDeps(ctx, g.CheckVersion)\n\treturn sh.RunV(\"go\", append([]string{\"test\"}, strings.Split(TestArgs, \" \")...)...)\n}", "func (t Test) Unit() error {\n\tmg.Deps(t.GenerateModules)\n\treturn sh.RunWithV(ENV, \"go\", \"test\", \"-v\", \"-short\", \"-coverprofile=coverage.txt\", \"-covermode=atomic\", \"./...\")\n}", "func RunTests(opts Options) {\n\tif opts.Cleanup {\n\t\terr := CleanupTests(opts.Driver, opts.DSN, opts.Verbose)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Cleanup failed: %s\\n\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tos.Exit(0)\n\t}\n\t_ = flag.Set(\"test.run\", opts.Match)\n\tif opts.Verbose {\n\t\t_ = flag.Set(\"test.v\", \"true\")\n\t}\n\ttests := []testing.InternalTest{\n\t\t{\n\t\t\tName: \"MainTest\",\n\t\t\tF: func(t *testing.T) {\n\t\t\t\tTest(t, opts.Driver, opts.DSN, opts.Suites, opts.RW)\n\t\t\t},\n\t\t},\n\t}\n\n\tmainStart(tests)\n}", "func TestMain(m *testing.M) {\n\tflag.BoolVar(&realTest, \"real\", false, \"Test with real uHunt API server\")\n\tflag.Parse()\n\tos.Exit(m.Run())\n}", "func TestMain(m *testing.M) {\n\tos.Exit(testMain(m))\n}", "func TestMain(m *testing.M) {\n\tos.Exit(testMain(m))\n}", "func TestMain(m *testing.M) {\n\tos.Exit(testMain(m))\n}", "func (s *ScenarioRunnerSuite) TestRun(t sweet.T) {\n\tpaths := []string{\"/t1\", \"/t2\", \"/t3\"}\n\treqBodies := []string{`{\"req\": \"r1\"}`, `{\"req\": \"r2\"}`, `{\"req\": \"r3\"}`}\n\trespBodies := []string{`{\"resp\": \"r1\"}`, `{\"resp\": \"r2\"}`, `{\"resp\": \"r3\"}`}\n\n\tserver := ghttp.NewServer()\n\tserver.AppendHandlers(ghttp.RespondWith(http.StatusOK, respBodies[0]))\n\tserver.AppendHandlers(ghttp.RespondWith(http.StatusOK, respBodies[1]))\n\tserver.AppendHandlers(ghttp.RespondWith(http.StatusOK, respBodies[2]))\n\n\tscenario := &config.Scenario{\n\t\tTests: []*config.Test{\n\t\t\t&config.Test{\n\t\t\t\tRequest: &config.Request{\n\t\t\t\t\tURL: testTemplate(server.URL() + paths[0]),\n\t\t\t\t\tBody: testTemplate(reqBodies[0]),\n\t\t\t\t},\n\t\t\t\tResponse: &config.Response{\n\t\t\t\t\tStatus: testPattern(\"2..\"),\n\t\t\t\t},\n\t\t\t\tEnabled: true,\n\t\t\t},\n\t\t\t&config.Test{\n\t\t\t\tRequest: &config.Request{\n\t\t\t\t\tURL: testTemplate(server.URL() + paths[1]),\n\t\t\t\t\tBody: testTemplate(reqBodies[1]),\n\t\t\t\t},\n\t\t\t\tResponse: &config.Response{\n\t\t\t\t\tStatus: testPattern(\"2..\"),\n\t\t\t\t},\n\t\t\t\tEnabled: true,\n\t\t\t},\n\t\t\t&config.Test{\n\t\t\t\tRequest: &config.Request{\n\t\t\t\t\tURL: testTemplate(server.URL() + paths[2]),\n\t\t\t\t\tBody: testTemplate(reqBodies[2]),\n\t\t\t\t},\n\t\t\t\tResponse: &config.Response{\n\t\t\t\t\tStatus: testPattern(\"2..\"),\n\t\t\t\t},\n\t\t\t\tEnabled: true,\n\t\t\t},\n\t\t},\n\t}\n\n\trunner := NewScenarioRunner(scenario, logging.NilLogger, logging.VerbosityLevelNone, nil)\n\trunner.Run(http.DefaultClient, map[string]interface{}{})\n\tExpect(runner.Resolved()).To(BeTrue())\n\tExpect(runner.Errored()).To(BeFalse())\n\tExpect(runner.Failed()).To(BeFalse())\n\n\tExpect(server.ReceivedRequests()).To(HaveLen(3))\n\tExpect(server.ReceivedRequests()[0].URL.Path).To(Equal(paths[0]))\n\tExpect(server.ReceivedRequests()[1].URL.Path).To(Equal(paths[1]))\n\tExpect(server.ReceivedRequests()[2].URL.Path).To(Equal(paths[2]))\n\n\tfor i, result := range runner.Results() {\n\t\tExpect(result.Index).To(Equal(i))\n\t\tExpect(result.Disabled).To(BeFalse())\n\t\tExpect(result.Skipped).To(BeFalse())\n\t\tExpect(result.Request.URL.Path).To(Equal(paths[i]))\n\t\tExpect(result.RequestBody).To(Equal(reqBodies[i]))\n\t\tExpect(result.Response.StatusCode).To(Equal(http.StatusOK))\n\t\tExpect(result.ResponseBody).To(Equal(respBodies[i]))\n\t\tExpect(result.RequestMatchErrors).To(HaveLen(0))\n\t\tExpect(result.Err).To(BeNil())\n\t}\n}" ]
[ "0.78072983", "0.7412214", "0.73320264", "0.72707105", "0.72383493", "0.7207434", "0.7073435", "0.7059288", "0.6953264", "0.68833536", "0.6867685", "0.6841037", "0.6731276", "0.67177874", "0.66537756", "0.664927", "0.6636479", "0.6629489", "0.6625154", "0.6613278", "0.6574058", "0.6566816", "0.65514356", "0.6528768", "0.65218616", "0.6521026", "0.65008014", "0.64875853", "0.64857423", "0.6481197", "0.6476141", "0.6460832", "0.6448644", "0.64392644", "0.64392644", "0.64392644", "0.6438805", "0.6436182", "0.64240444", "0.64240444", "0.6408941", "0.6386842", "0.6385224", "0.6379686", "0.6379686", "0.6378986", "0.6371934", "0.6370618", "0.6367753", "0.6356507", "0.6348798", "0.6337882", "0.6332744", "0.6327355", "0.6325144", "0.6318793", "0.6306997", "0.63044226", "0.62944084", "0.62899494", "0.6284789", "0.62838525", "0.62814957", "0.62787235", "0.62693006", "0.62483215", "0.62371093", "0.6225439", "0.6216454", "0.6216454", "0.6216454", "0.6208962", "0.6203981", "0.6200441", "0.61996984", "0.61912775", "0.61837316", "0.6179332", "0.617432", "0.61531496", "0.61479926", "0.6138547", "0.6121312", "0.6115611", "0.6114897", "0.61097026", "0.61066335", "0.61042815", "0.61032957", "0.61032957", "0.6100043", "0.6098389", "0.6084854", "0.60813284", "0.60766053", "0.60761315", "0.60729486", "0.60729486", "0.60729486", "0.60532" ]
0.6729761
13
waitForTestToComplete waits for a fixed amount of time while checking for a test pod to complete
func (r PodTestRunner) waitForTestToComplete(ctx context.Context, p *v1.Pod) (err error) { podCheck := wait.ConditionFunc(func() (done bool, err error) { var tmp *v1.Pod tmp, err = r.Client.CoreV1().Pods(p.Namespace).Get(ctx, p.Name, metav1.GetOptions{}) if err != nil { return true, fmt.Errorf("error getting pod %s %w", p.Name, err) } if tmp.Status.Phase == v1.PodSucceeded || tmp.Status.Phase == v1.PodFailed { return true, nil } return false, nil }) err = wait.PollImmediateUntil(time.Duration(1*time.Second), podCheck, ctx.Done()) return err }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (r PodTestRunner) waitForTestToComplete(ctx context.Context, p *v1.Pod) (err error) {\n\n\tpodCheck := wait.ConditionFunc(func() (done bool, err error) {\n\t\tvar tmp *v1.Pod\n\t\ttmp, err = r.Client.CoreV1().Pods(p.Namespace).Get(ctx, p.Name, metav1.GetOptions{})\n\t\tif err != nil {\n\t\t\treturn true, fmt.Errorf(\"error getting pod %s %w\", p.Name, err)\n\t\t}\n\t\tfor _, s := range tmp.Status.ContainerStatuses {\n\t\t\tif s.Name == \"scorecard-test\" {\n\t\t\t\tif s.State.Terminated != nil {\n\t\t\t\t\treturn true, nil\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\treturn false, nil\n\t})\n\n\terr = wait.PollImmediateUntil(1*time.Second, podCheck, ctx.Done())\n\treturn err\n\n}", "func (o Scorecard) waitForTestsToComplete(tests []Test) (err error) {\n\twaitTimeInSeconds := int(o.WaitTime.Seconds())\n\tfor elapsedSeconds := 0; elapsedSeconds < waitTimeInSeconds; elapsedSeconds++ {\n\t\tallPodsCompleted := true\n\t\tfor _, test := range tests {\n\t\t\tp := test.TestPod\n\t\t\tvar tmp *v1.Pod\n\t\t\ttmp, err = o.Client.CoreV1().Pods(p.Namespace).Get(context.TODO(), p.Name, metav1.GetOptions{})\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"error getting pod %s %w\", p.Name, err)\n\t\t\t}\n\t\t\tif tmp.Status.Phase != v1.PodSucceeded {\n\t\t\t\tallPodsCompleted = false\n\t\t\t}\n\n\t\t}\n\t\tif allPodsCompleted {\n\t\t\treturn nil\n\t\t}\n\t\ttime.Sleep(1 * time.Second)\n\t}\n\treturn fmt.Errorf(\"error - wait time of %d seconds has been exceeded\", o.WaitTime)\n\n}", "func waitForPodSuccess(c *client.Client, podName string, contName string, tryFor time.Duration) error {\n\ttrySecs := int(tryFor.Seconds())\n\tfor i := 0; i <= trySecs; i += 5 {\n\t\tif i > 0 {\n\t\t\ttime.Sleep(5 * time.Second)\n\t\t}\n\t\tpod, err := c.Pods(api.NamespaceDefault).Get(podName)\n\t\tif err != nil {\n\t\t\tLogf(\"Get pod failed, ignoring for 5s: %v\", err)\n\t\t\tcontinue\n\t\t}\n\t\t// Cannot use pod.Status.Phase == api.PodSucceeded/api.PodFailed due to #2632\n\t\tci, ok := pod.Status.Info[contName]\n\t\tif !ok {\n\t\t\tLogf(\"No Status.Info for container %s in pod %s yet\", contName, podName)\n\t\t} else {\n\t\t\tif ci.State.Termination != nil {\n\t\t\t\tif ci.State.Termination.ExitCode == 0 {\n\t\t\t\t\tBy(\"Saw pod success\")\n\t\t\t\t\treturn nil\n\t\t\t\t} else {\n\t\t\t\t\tLogf(\"Saw pod failure: %+v\", ci.State.Termination)\n\t\t\t\t}\n\t\t\t\tLogf(\"Waiting for pod %q status to be success or failure\", podName)\n\t\t\t} else {\n\t\t\t\tLogf(\"Nil State.Termination for container %s in pod %s so far\", contName, podName)\n\t\t\t}\n\t\t}\n\t}\n\treturn fmt.Errorf(\"Gave up waiting for pod %q status to be success or failure after %d seconds\", podName, trySecs)\n}", "func (r *volumeReactor) waitTest(test controllerTest) error {\n\t// start with 10 ms, multiply by 2 each step, 10 steps = 10.23 seconds\n\tbackoff := wait.Backoff{\n\t\tDuration: 10 * time.Millisecond,\n\t\tJitter: 0,\n\t\tFactor: 2,\n\t\tSteps: 10,\n\t}\n\terr := wait.ExponentialBackoff(backoff, func() (done bool, err error) {\n\t\t// Finish all operations that are in progress\n\t\tr.ctrl.runningOperations.WaitForCompletion()\n\n\t\t// Return 'true' if the reactor reached the expected state\n\t\terr1 := r.checkClaims(test.expectedClaims)\n\t\terr2 := r.checkVolumes(test.expectedVolumes)\n\t\tif err1 == nil && err2 == nil {\n\t\t\treturn true, nil\n\t\t}\n\t\treturn false, nil\n\t})\n\treturn err\n}", "func (r *volumeReactor) waitTest(test controllerTest) error {\n\t// start with 10 ms, multiply by 2 each step, 10 steps = 10.23 seconds\n\tbackoff := wait.Backoff{\n\t\tDuration: 10 * time.Millisecond,\n\t\tJitter: 0,\n\t\tFactor: 2,\n\t\tSteps: 10,\n\t}\n\terr := wait.ExponentialBackoff(backoff, func() (done bool, err error) {\n\t\t// Finish all operations that are in progress\n\t\tr.ctrl.runningOperations.WaitForCompletion()\n\n\t\t// Return 'true' if the reactor reached the expected state\n\t\terr1 := r.CheckClaims(test.expectedClaims)\n\t\terr2 := r.CheckVolumes(test.expectedVolumes)\n\t\tif err1 == nil && err2 == nil {\n\t\t\treturn true, nil\n\t\t}\n\t\treturn false, nil\n\t})\n\treturn err\n}", "func waitForFailure(f *framework.Framework, name string, timeout time.Duration) {\n\tgomega.Expect(e2epod.WaitForPodCondition(f.ClientSet, f.Namespace.Name, name, fmt.Sprintf(\"%s or %s\", v1.PodSucceeded, v1.PodFailed), timeout,\n\t\tfunc(pod *v1.Pod) (bool, error) {\n\t\t\tswitch pod.Status.Phase {\n\t\t\tcase v1.PodFailed:\n\t\t\t\treturn true, nil\n\t\t\tcase v1.PodSucceeded:\n\t\t\t\treturn true, fmt.Errorf(\"pod %q successed with reason: %q, message: %q\", name, pod.Status.Reason, pod.Status.Message)\n\t\t\tdefault:\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t},\n\t)).To(gomega.Succeed(), \"wait for pod %q to fail\", name)\n}", "func (t *JobUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) {\n\t<-done\n\tBy(\"Ensuring active pods == parallelism\")\n\trunning, err := framework.CheckForAllJobPodsRunning(f.ClientSet, t.namespace, t.job.Name, 2)\n\tExpect(err).NotTo(HaveOccurred())\n\tExpect(running).To(BeTrue())\n}", "func (td *OsmTestData) WaitForPodsRunningReady(ns string, timeout time.Duration, nExpectedRunningPods int, labelSelector *metav1.LabelSelector) error {\n\ttd.T.Logf(\"Wait up to %v for %d pods ready in ns [%s]...\", timeout, nExpectedRunningPods, ns)\n\n\tlistOpts := metav1.ListOptions{\n\t\tFieldSelector: \"status.phase=Running\",\n\t}\n\n\tif labelSelector != nil {\n\t\tlabelMap, _ := metav1.LabelSelectorAsMap(labelSelector)\n\t\tlistOpts.LabelSelector = labels.SelectorFromSet(labelMap).String()\n\t}\n\n\tfor start := time.Now(); time.Since(start) < timeout; time.Sleep(2 * time.Second) {\n\t\tpods, err := td.Client.CoreV1().Pods(ns).List(context.TODO(), listOpts)\n\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to list pods\")\n\t\t}\n\n\t\tif len(pods.Items) < nExpectedRunningPods {\n\t\t\ttime.Sleep(time.Second)\n\t\t\tcontinue\n\t\t}\n\n\t\tnReadyPods := 0\n\t\tfor _, pod := range pods.Items {\n\t\t\tfor _, cond := range pod.Status.Conditions {\n\t\t\t\tif cond.Type == corev1.PodReady && cond.Status == corev1.ConditionTrue {\n\t\t\t\t\tnReadyPods++\n\t\t\t\t\tif nReadyPods == nExpectedRunningPods {\n\t\t\t\t\t\ttd.T.Logf(\"Finished waiting for NS [%s].\", ns)\n\t\t\t\t\t\treturn nil\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\ttime.Sleep(time.Second)\n\t}\n\n\tpods, err := td.Client.CoreV1().Pods(ns).List(context.Background(), metav1.ListOptions{})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to list pods\")\n\t}\n\ttd.T.Log(\"Pod Statuses in namespace\", ns)\n\tfor _, pod := range pods.Items {\n\t\tstatus, _ := json.MarshalIndent(pod.Status, \"\", \" \")\n\t\ttd.T.Logf(\"Pod %s:\\n%s\", pod.Name, status)\n\t}\n\n\treturn fmt.Errorf(\"not all pods were Running & Ready in NS %s after %v\", ns, timeout)\n}", "func waitForCompletion(sensor SensorInterface, i2c *i2c.I2C) (timeout bool, err error) {\n\tfor i := 0; i < 10; i++ {\n\t\tflag, err := sensor.IsBusy(i2c)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tif flag == false {\n\t\t\treturn false, nil\n\t\t}\n\t\ttime.Sleep(5 * time.Millisecond)\n\t}\n\treturn true, nil\n}", "func (d *deploymentTester) waitForDeploymentComplete() error {\n\treturn testutil.WaitForDeploymentComplete(d.c, d.deployment, d.t.Logf, pollInterval, pollTimeout)\n}", "func waitForPodsToBeInTerminatingPhase(sshClientConfig *ssh.ClientConfig, svcMasterIP string,\n\tpodName string, namespace string, timeout time.Duration) error {\n\tkubeConfigPath := GetAndExpectStringEnvVar(gcKubeConfigPath)\n\twaitErr := wait.PollImmediate(poll, timeout, func() (bool, error) {\n\t\tcmd := fmt.Sprintf(\"kubectl get pod %s --kubeconfig %s -n %s --no-headers|awk '{print $3}'\",\n\t\t\tpodName, kubeConfigPath, namespace)\n\t\tframework.Logf(\"Invoking command '%v' on host %v\", cmd,\n\t\t\tsvcMasterIP)\n\t\tcmdResult, err := sshExec(sshClientConfig, svcMasterIP,\n\t\t\tcmd)\n\t\tif err != nil || cmdResult.Code != 0 {\n\t\t\tfssh.LogResult(cmdResult)\n\t\t\treturn false, fmt.Errorf(\"couldn't execute command: %s on host: %v , error: %s\",\n\t\t\t\tcmd, svcMasterIP, err)\n\t\t}\n\n\t\tframework.Logf(\"result %v\", cmdResult)\n\t\tframework.Logf(\"stdout %s\", cmdResult.Stdout)\n\t\tpodPhase := strings.TrimSpace(cmdResult.Stdout)\n\t\tif podPhase == \"Terminating\" {\n\t\t\tframework.Logf(\"Pod %s is in terminating state\", podName)\n\t\t\treturn true, nil\n\t\t}\n\t\treturn false, nil\n\t})\n\treturn waitErr\n}", "func waitForApiServerToBeUp(svcMasterIp string, sshClientConfig *ssh.ClientConfig,\n\ttimeout time.Duration) error {\n\tkubeConfigPath := GetAndExpectStringEnvVar(gcKubeConfigPath)\n\twaitErr := wait.PollImmediate(poll, timeout, func() (bool, error) {\n\t\tcmd := fmt.Sprintf(\"kubectl get ns,sc --kubeconfig %s\",\n\t\t\tkubeConfigPath)\n\t\tframework.Logf(\"Invoking command '%v' on host %v\", cmd,\n\t\t\tsvcMasterIp)\n\t\tcmdResult, err := sshExec(sshClientConfig, svcMasterIp,\n\t\t\tcmd)\n\t\tframework.Logf(\"result %v\", cmdResult)\n\t\tif err != nil {\n\t\t\treturn false, nil\n\t\t}\n\t\tif err == nil {\n\t\t\tframework.Logf(\"Apiserver is fully up\")\n\t\t\treturn true, nil\n\t\t}\n\t\treturn false, nil\n\t})\n\treturn waitErr\n}", "func waitForDeploymentComplete(name, ns string, c kubernetes.Interface, t int) error {\n\tvar (\n\t\tdeployment *apps.Deployment\n\t\treason string\n\t\terr error\n\t)\n\ttimeout := time.Duration(t) * time.Minute\n\terr = wait.PollImmediate(poll, timeout, func() (bool, error) {\n\t\tdeployment, err = c.AppsV1().Deployments(ns).Get(context.TODO(), name, metav1.GetOptions{})\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\n\t\t// TODO need to check rolling update\n\n\t\t// When the deployment status and its underlying resources reach the\n\t\t// desired state, we're done\n\t\tif deployment.Status.Replicas == deployment.Status.ReadyReplicas {\n\t\t\treturn true, nil\n\t\t}\n\t\te2elog.Logf(\"deployment status: expected replica count %d running replica count %d\", deployment.Status.Replicas, deployment.Status.ReadyReplicas)\n\t\treason = fmt.Sprintf(\"deployment status: %#v\", deployment.Status.String())\n\t\treturn false, nil\n\t})\n\n\tif errors.Is(err, wait.ErrWaitTimeout) {\n\t\terr = fmt.Errorf(\"%s\", reason)\n\t}\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error waiting for deployment %q status to match expectation: %w\", name, err)\n\t}\n\treturn nil\n}", "func (r *HumioClusterReconciler) waitForNewPod(ctx context.Context, hnp *HumioNodePool, previousPodList []corev1.Pod, expectedPod *corev1.Pod) error {\n\t// We must check only pods that were running prior to the new pod being created, and we must only include pods that\n\t// were running the same revision as the newly created pod. This is because there may be pods under the previous\n\t// revision that were still terminating when the new pod was created\n\tvar expectedPodCount int\n\tfor _, pod := range previousPodList {\n\t\tif pod.Annotations[podHashAnnotation] == expectedPod.Annotations[podHashAnnotation] {\n\t\t\texpectedPodCount++\n\t\t}\n\t}\n\t// This will account for the newly created pod\n\texpectedPodCount++\n\n\tfor i := 0; i < waitForPodTimeoutSeconds; i++ {\n\t\tvar podsMatchingRevisionCount int\n\t\tlatestPodList, err := kubernetes.ListPods(ctx, r, hnp.GetNamespace(), hnp.GetNodePoolLabels())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfor _, pod := range latestPodList {\n\t\t\tif pod.Annotations[podHashAnnotation] == expectedPod.Annotations[podHashAnnotation] {\n\t\t\t\tpodsMatchingRevisionCount++\n\t\t\t}\n\t\t}\n\t\tr.Log.Info(fmt.Sprintf(\"validating new pod was created. expected pod count %d, current pod count %d\", expectedPodCount, podsMatchingRevisionCount))\n\t\tif podsMatchingRevisionCount >= expectedPodCount {\n\t\t\treturn nil\n\t\t}\n\t\ttime.Sleep(time.Second * 1)\n\t}\n\treturn fmt.Errorf(\"timed out waiting to validate new pod was created\")\n}", "func waitUntilPodStatus(provider *vkAWS.FargateProvider, podName string, desiredStatus v1.PodPhase) error {\n\tctx := context.Background()\n\tcontext.WithTimeout(ctx, time.Duration(time.Second*60))\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn ctx.Err()\n\t\tdefault:\n\t\t\tstatus, err := provider.GetPodStatus(\"default\", podName)\n\t\t\tif err != nil {\n\t\t\t\tif strings.Contains(err.Error(), \"is not found\") {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif status.Phase == desiredStatus {\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\ttime.Sleep(3 * time.Second)\n\t\t}\n\t}\n}", "func waitForPortInTest(port int, t *testing.T) {\n\ttimeout := time.After(1 * time.Second)\n\tfor {\n\t\tselect {\n\t\tcase <-timeout:\n\t\t\tt.Fatalf(\"Expected server to start < 1s.\")\n\t\tcase <-time.After(50 * time.Millisecond):\n\t\t\t_, err := net.Dial(\"tcp\", fmt.Sprintf(\":%d\", port))\n\t\t\tif err == nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}", "func watchForFinishedTestRuns(cfg config.Config, ac *cache.AgentCache) error {\n\tagentUUID, err := ac.GetKeyValue(\"uuid\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor {\n\n\t\ttime.Sleep(5000 * time.Millisecond)\n\n\t\ttestruns, err := ac.GetFinishedTestRuns()\n\t\tif err != nil {\n\t\t\tlog.Error(\"Problem retrieving finished test runs\")\n\t\t\treturn errors.New(\"Problem retrieving finished test runs\")\n\t\t}\n\n\t\tfor testUUID, testData := range testruns {\n\n\t\t\tlog.Debug(\"Found ripe testrun: \", testUUID)\n\n\t\t\ttc, err := comms.NewToDDComms(cfg)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tutdr := responses.NewUploadTestData(agentUUID, testUUID, testData)\n\t\t\ttc.Package.SendResponse(utdr)\n\n\t\t}\n\n\t}\n}", "func waitForScanStatus(t *testing.T, f *framework.Framework, namespace, name string, targetStaus complianceoperatorv1alpha1.ComplianceScanStatusPhase) error {\n\texampleComplianceScan := &complianceoperatorv1alpha1.ComplianceScan{}\n\tvar lastErr error\n\t// retry and ignore errors until timeout\n\ttimeouterr := wait.Poll(retryInterval, timeout, func() (bool, error) {\n\t\tlastErr = f.Client.Get(goctx.TODO(), types.NamespacedName{Name: name, Namespace: namespace}, exampleComplianceScan)\n\t\tif lastErr != nil {\n\t\t\tif apierrors.IsNotFound(lastErr) {\n\t\t\t\tt.Logf(\"Waiting for availability of %s compliancescan\\n\", name)\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t\tt.Logf(\"Retrying. Got error: %v\\n\", lastErr)\n\t\t\treturn false, nil\n\t\t}\n\n\t\tif exampleComplianceScan.Status.Phase == targetStaus {\n\t\t\treturn true, nil\n\t\t}\n\t\tt.Logf(\"Waiting for run of %s compliancescan (%s)\\n\", name, exampleComplianceScan.Status.Phase)\n\t\treturn false, nil\n\t})\n\t// Error in function call\n\tif lastErr != nil {\n\t\treturn lastErr\n\t}\n\t// Timeout\n\tif timeouterr != nil {\n\t\treturn timeouterr\n\t}\n\tt.Logf(\"ComplianceScan ready (%s)\\n\", exampleComplianceScan.Status.Phase)\n\treturn nil\n}", "func TestWait_timeout(t *testing.T) {\n\tdefer check(t)\n\tcontent := \"hello world!\"\n\treq := &showcasepb.WaitRequest{\n\t\tEnd: &showcasepb.WaitRequest_Ttl{\n\t\t\tTtl: &durationpb.Duration{Seconds: 1},\n\t\t},\n\t\tResponse: &showcasepb.WaitRequest_Success{\n\t\t\tSuccess: &showcasepb.WaitResponse{Content: content},\n\t\t},\n\t}\n\tctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)\n\tdefer cancel()\n\n\top, err := echo.Wait(ctx, req)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tresp, err := op.Wait(ctx)\n\tif err == nil {\n\t\tt.Errorf(\"Wait() = %+v, want error\", resp)\n\t}\n}", "func (d *deploymentTester) waitForReadyReplicas() error {\n\tif err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {\n\t\tdeployment, err := d.c.AppsV1().Deployments(d.deployment.Namespace).Get(context.TODO(), d.deployment.Name, metav1.GetOptions{})\n\t\tif err != nil {\n\t\t\treturn false, fmt.Errorf(\"failed to get deployment %q: %v\", d.deployment.Name, err)\n\t\t}\n\t\treturn deployment.Status.ReadyReplicas == *deployment.Spec.Replicas, nil\n\t}); err != nil {\n\t\treturn fmt.Errorf(\"failed to wait for .readyReplicas to equal .replicas: %v\", err)\n\t}\n\treturn nil\n}", "func waitForPods(cs *framework.ClientSet, expectedTotal, min, max int32) error {\n\terr := wait.PollImmediate(1*time.Second, 5*time.Minute, func() (bool, error) {\n\t\td, err := cs.AppsV1Interface.Deployments(\"openshift-machine-config-operator\").Get(context.TODO(), \"etcd-quorum-guard\", metav1.GetOptions{})\n\t\tif err != nil {\n\t\t\t// By this point the deployment should exist.\n\t\t\tfmt.Printf(\" error waiting for etcd-quorum-guard deployment to exist: %v\\n\", err)\n\t\t\treturn true, err\n\t\t}\n\t\tif d.Status.Replicas < 1 {\n\t\t\tfmt.Println(\"operator deployment has no replicas\")\n\t\t\treturn false, nil\n\t\t}\n\t\tif d.Status.Replicas == expectedTotal &&\n\t\t\td.Status.AvailableReplicas >= min &&\n\t\t\td.Status.AvailableReplicas <= max {\n\t\t\tfmt.Printf(\" Deployment is ready! %d %d\\n\", d.Status.Replicas, d.Status.AvailableReplicas)\n\t\t\treturn true, nil\n\t\t}\n\t\treturn false, nil\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor pod, info := range pods {\n\t\tif info.status == \"Running\" {\n\t\t\tnode := info.node\n\t\t\tif node == \"\" {\n\t\t\t\treturn fmt.Errorf(\"Pod %s not associated with a node\", pod)\n\t\t\t}\n\t\t\tif _, ok := nodes[node]; !ok {\n\t\t\t\treturn fmt.Errorf(\"pod %s running on %s, not a master\", pod, node)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func waitForOK(service *Service, url string, timeout time.Duration) (int, error) {\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tokFunc := func(status int, response []byte) (bool, error) {\n\t\tok, err := isOK(status)\n\t\tif err != nil {\n\t\t\treturn false, fmt.Errorf(\"failed to query %s at %s: %w\", service.Description(), url, err)\n\t\t}\n\t\treturn ok, err\n\t}\n\treturn wait(service, okFunc, func() *http.Request { return req }, timeout)\n}", "func WaitForDeletion(pod *corev1.Pod, timeout time.Duration) error {\n\treturn wait.PollImmediate(time.Second, timeout, func() (bool, error) {\n\t\tif err := testclient.Client.Get(context.TODO(), client.ObjectKeyFromObject(pod), pod); errors.IsNotFound(err) {\n\t\t\treturn true, nil\n\t\t}\n\t\treturn false, nil\n\t})\n}", "func waitForImage(ctx context.Context, cl client.Client, timeout time.Duration, ns, key, val, container, expected string) error {\n\tpods := &corev1.PodList{}\n\terr := wait.PollImmediate(1*time.Second, timeout, func() (bool, error) {\n\t\tif err := cl.List(ctx, pods, client.MatchingLabels{key: val}, client.InNamespace(ns)); err != nil {\n\t\t\treturn false, nil\n\t\t}\n\t\tfor _, p := range pods.Items {\n\t\t\tfor _, c := range p.Spec.Containers {\n\t\t\t\tif c.Name == container && c.Image != expected {\n\t\t\t\t\treturn false, nil\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn true, nil\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (m *MeshReconciler) waitForPod(client runtimeclient.Client, namespace string, labels map[string]string, containerImageWithTag string) error {\n\tm.logger.Debug(\"waiting for pod\")\n\n\tbackoffConfig := backoff.ConstantBackoffConfig{\n\t\tDelay: time.Duration(backoffDelaySeconds) * time.Second,\n\t\tMaxRetries: backoffMaxretries,\n\t}\n\tbackoffPolicy := backoff.NewConstantBackoffPolicy(backoffConfig)\n\n\terr := backoff.Retry(func() error {\n\t\tvar pods corev1.PodList\n\t\to := &runtimeclient.ListOptions{}\n\t\truntimeclient.InNamespace(namespace).ApplyToList(o)\n\t\truntimeclient.MatchingLabels(labels).ApplyToList(o)\n\n\t\terr := client.List(context.Background(), &pods, o)\n\t\tif err != nil {\n\t\t\treturn errors.WrapIf(err, \"could not list pods\")\n\t\t}\n\n\t\tfor _, pod := range pods.Items {\n\t\t\tif containerImageWithTag != \"\" {\n\t\t\t\tmatch := false\n\t\t\t\tfor _, container := range pod.Spec.Containers {\n\t\t\t\t\tif container.Image == containerImageWithTag {\n\t\t\t\t\t\tmatch = true\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif !match {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t\tif pod.Status.Phase == corev1.PodRunning {\n\t\t\t\treadyContainers := 0\n\t\t\t\tfor _, cs := range pod.Status.ContainerStatuses {\n\t\t\t\t\tif cs.Ready {\n\t\t\t\t\t\treadyContainers++\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif readyContainers == len(pod.Status.ContainerStatuses) {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\treturn errors.New(\"could not find running and healthy pods\")\n\t}, backoffPolicy)\n\n\treturn err\n}", "func waitForCompletion(done chan bool) bool {\n\ttimer := time.NewTimer(totalWaitTime)\n\tdefer timer.Stop()\n\tselect {\n\tcase <-done:\n\t\treturn true\n\tcase <-timer.C:\n\t\treturn false\n\t}\n}", "func (d *deploymentTester) waitForDeploymentCompleteAndMarkPodsReady() error {\n\tvar wg sync.WaitGroup\n\n\t// Manually mark updated Deployment pods as ready in a separate goroutine\n\twg.Add(1)\n\tgo d.markUpdatedPodsReady(&wg)\n\n\t// Wait for the Deployment status to complete using soft check, while Deployment pods are becoming ready\n\terr := d.waitForDeploymentComplete()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to wait for Deployment status %s: %v\", d.deployment.Name, err)\n\t}\n\n\t// Wait for goroutine to finish\n\twg.Wait()\n\n\treturn nil\n}", "func waitForWebhook(ctx context.Context, client dynclient.Client) error {\n\tcondFn := clientutil.PodsReadyCondition(ctx, client, dynclient.ListOptions{\n\t\tNamespace: resources.MachineControllerNameSpace,\n\t\tLabelSelector: labels.SelectorFromSet(map[string]string{\n\t\t\tappLabelKey: resources.MachineControllerWebhookName,\n\t\t}),\n\t})\n\n\treturn fail.KubeClient(wait.PollUntilContextTimeout(ctx, 5*time.Second, 3*time.Minute, false, condFn.WithContext()), \"waiting for machine-controller webhook to became ready\")\n}", "func (m *Manager) waitForFinish() {\n\tm.state.wg.Wait()\n}", "func PodWait(ctx context.Context, t *testing.T, profile string, ns string, selector string, timeout time.Duration) ([]string, error) {\n\tt.Helper()\n\tclient, err := kapi.Client(profile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// For example: kubernetes.io/minikube-addons=gvisor\n\tlistOpts := meta.ListOptions{LabelSelector: selector}\n\tminUptime := 5 * time.Second\n\tpodStart := time.Time{}\n\tfoundNames := map[string]bool{}\n\tlastMsg := \"\"\n\n\tstart := time.Now()\n\tt.Logf(\"(dbg) %s: waiting %s for pods matching %q in namespace %q ...\", t.Name(), timeout, selector, ns)\n\tf := func(ctx context.Context) (bool, error) {\n\t\tpods, err := client.CoreV1().Pods(ns).List(ctx, listOpts)\n\t\tif err != nil {\n\t\t\tt.Logf(\"%s: WARNING: pod list for %q %q returned: %v\", t.Name(), ns, selector, err)\n\t\t\t// Don't return the error upwards so that this is retried, in case the apiserver is rescheduled\n\t\t\tpodStart = time.Time{}\n\t\t\treturn false, nil\n\t\t}\n\t\tif len(pods.Items) == 0 {\n\t\t\tpodStart = time.Time{}\n\t\t\treturn false, nil\n\t\t}\n\n\t\tfor _, pod := range pods.Items {\n\t\t\tfoundNames[pod.ObjectMeta.Name] = true\n\t\t\tmsg := podStatusMsg(pod)\n\t\t\t// Prevent spamming logs with identical messages\n\t\t\tif msg != lastMsg {\n\t\t\t\tt.Log(msg)\n\t\t\t\tlastMsg = msg\n\t\t\t}\n\t\t\t// Successful termination of a short-lived process, will not be restarted\n\t\t\tif pod.Status.Phase == core.PodSucceeded {\n\t\t\t\treturn true, nil\n\t\t\t}\n\t\t\t// Long-running process state\n\t\t\tif pod.Status.Phase != core.PodRunning {\n\t\t\t\tif !podStart.IsZero() {\n\t\t\t\t\tt.Logf(\"%s: WARNING: %s was running %s ago - may be unstable\", t.Name(), selector, time.Since(podStart))\n\t\t\t\t}\n\t\t\t\tpodStart = time.Time{}\n\t\t\t\treturn false, nil\n\t\t\t}\n\n\t\t\tif podStart.IsZero() {\n\t\t\t\tpodStart = time.Now()\n\t\t\t}\n\n\t\t\tif time.Since(podStart) > minUptime {\n\t\t\t\treturn true, nil\n\t\t\t}\n\t\t}\n\t\treturn false, nil\n\t}\n\n\terr = wait.PollUntilContextTimeout(ctx, 1*time.Second, timeout, true, f)\n\tnames := []string{}\n\tfor n := range foundNames {\n\t\tnames = append(names, n)\n\t}\n\n\tif err == nil {\n\t\tt.Logf(\"(dbg) %s: %s healthy within %s\", t.Name(), selector, time.Since(start))\n\t\treturn names, nil\n\t}\n\n\tt.Logf(\"***** %s: pod %q failed to start within %s: %v ****\", t.Name(), selector, timeout, err)\n\tshowPodLogs(ctx, t, profile, ns, names)\n\treturn names, fmt.Errorf(\"%s: %v\", fmt.Sprintf(\"%s within %s\", selector, timeout), err)\n}", "func (tb *Testbed) Wait(timeout time.Duration) error {\n\tdefer tb.cancel()\n\tnow := time.Now()\n\tselect {\n\tcase <-tb.donec:\n\t\treturn nil\n\tcase to := <-time.After(timeout):\n\t\twaited := to.Sub(now)\n\t\treturn errors.New(\"timeout after \" + waited.String())\n\t}\n}", "func waitForPodNotPending(c *client.Client, ns, podName string, tryFor time.Duration) error {\n\ttrySecs := int(tryFor.Seconds())\n\tfor i := 0; i <= trySecs; i += 5 {\n\t\tif i > 0 {\n\t\t\ttime.Sleep(5 * time.Second)\n\t\t}\n\t\tpod, err := c.Pods(ns).Get(podName)\n\t\tif err != nil {\n\t\t\tLogf(\"Get pod %s in namespace %s failed, ignoring for 5s: %v\", podName, ns, err)\n\t\t\tcontinue\n\t\t}\n\t\tif pod.Status.Phase != api.PodPending {\n\t\t\tLogf(\"Saw pod %s in namespace %s out of pending state (found %q)\", podName, ns, pod.Status.Phase)\n\t\t\treturn nil\n\t\t}\n\t\tLogf(\"Waiting for status of pod %s in namespace %s to be !%q (found %q) (%v secs)\", podName, ns, api.PodPending, pod.Status.Phase, i)\n\t}\n\treturn fmt.Errorf(\"Gave up waiting for status of pod %s in namespace %s to go out of pending after %d seconds\", podName, ns, trySecs)\n}", "func TestWaitUntilAllNodesReady(t *testing.T) {\n\tt.Parallel()\n\n\toptions := NewKubectlOptions(\"\", \"\", \"default\")\n\n\tWaitUntilAllNodesReady(t, options, 12, 5*time.Second)\n\n\tnodes := GetNodes(t, options)\n\tnodeNames := map[string]bool{}\n\tfor _, node := range nodes {\n\t\tnodeNames[node.Name] = true\n\t}\n\n\treadyNodes := GetReadyNodes(t, options)\n\treadyNodeNames := map[string]bool{}\n\tfor _, node := range readyNodes {\n\t\treadyNodeNames[node.Name] = true\n\t}\n\n\tassert.Equal(t, nodeNames, readyNodeNames)\n}", "func waitForConvergence(t *testing.T, maxIterations int, testNodes ...*testNode) {\n\twaitForConvergenceNodes(t, maxIterations, testNodesToNodes(testNodes)...)\n}", "func waitForSystemTime() {\n\ttime.Sleep(150 * time.Millisecond)\n}", "func (envManager *TestEnvManager) WaitUntilReady() (bool, error) {\n\tlog.Println(\"Start checking components' status\")\n\tretry := u.Retrier{\n\t\tBaseDelay: 1 * time.Second,\n\t\tMaxDelay: 10 * time.Second,\n\t\tRetries: 8,\n\t}\n\n\tready := false\n\tretryFn := func(_ context.Context, i int) error {\n\t\tfor _, comp := range envManager.testEnv.GetComponents() {\n\t\t\tif alive, err := comp.IsAlive(); err != nil {\n\t\t\t\treturn fmt.Errorf(\"unable to comfirm compoment %s is alive %v\", comp.GetName(), err)\n\t\t\t} else if !alive {\n\t\t\t\treturn fmt.Errorf(\"component %s is not alive\", comp.GetName())\n\t\t\t}\n\t\t}\n\n\t\tready = true\n\t\tlog.Println(\"All components are ready\")\n\t\treturn nil\n\t}\n\n\t_, err := retry.Retry(context.Background(), retryFn)\n\treturn ready, err\n}", "func waitForReadyNodes(desiredCount int, timeout time.Duration, requiredConsecutiveSuccesses int) error {\n\tstop := time.Now().Add(timeout)\n\n\tconsecutiveSuccesses := 0\n\tfor {\n\t\tif time.Now().After(stop) {\n\t\t\tbreak\n\t\t}\n\n\t\tnodes, err := kubectlGetNodes(\"\")\n\t\tif err != nil {\n\t\t\tlog.Printf(\"kubectl get nodes failed, sleeping: %v\", err)\n\t\t\tconsecutiveSuccesses = 0\n\t\t\ttime.Sleep(30 * time.Second)\n\t\t\tcontinue\n\t\t}\n\t\treadyNodes := countReadyNodes(nodes)\n\t\tif readyNodes >= desiredCount {\n\t\t\tconsecutiveSuccesses++\n\t\t\tif consecutiveSuccesses >= requiredConsecutiveSuccesses {\n\t\t\t\tlog.Printf(\"%d ready nodes found, %d sequential successes - cluster is ready\",\n\t\t\t\t\treadyNodes,\n\t\t\t\t\tconsecutiveSuccesses)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tlog.Printf(\"%d ready nodes found, waiting for %d sequential successes (success count = %d)\",\n\t\t\t\treadyNodes,\n\t\t\t\trequiredConsecutiveSuccesses,\n\t\t\t\tconsecutiveSuccesses)\n\t\t\ttime.Sleep(2 * time.Second)\n\t\t} else {\n\t\t\tconsecutiveSuccesses = 0\n\t\t\tlog.Printf(\"%d (ready nodes) < %d (requested instances), sleeping\", readyNodes, desiredCount)\n\t\t\ttime.Sleep(30 * time.Second)\n\t\t}\n\t}\n\treturn fmt.Errorf(\"waiting for ready nodes timed out\")\n}", "func WaitForRunnerCompletion(experimentsDetails *types.ExperimentDetails, clients environment.ClientSets) error {\n\terr := retry.\n\t\tTimes(uint(experimentsDetails.Duration / experimentsDetails.Delay)).\n\t\tWait(time.Duration(experimentsDetails.Delay) * time.Second).\n\t\tTry(func(attempt uint) error {\n\t\t\trunner, err := clients.KubeClient.CoreV1().Pods(experimentsDetails.ChaosNamespace).Get(experimentsDetails.EngineName+\"-runner\", metav1.GetOptions{})\n\t\t\tif err != nil {\n\t\t\t\treturn errors.Errorf(\"Unable to get the runner pod, due to %v\", err)\n\t\t\t}\n\n\t\t\tif string(runner.Status.Phase) != \"Succeeded\" {\n\t\t\t\tlog.Infof(\"Runner pod status is %v\", runner.Status.Phase)\n\t\t\t\treturn errors.Errorf(\"Runner pod is not yet completed\")\n\t\t\t}\n\t\t\tlog.Infof(\"Runner pod status is %v\", runner.Status.Phase)\n\n\t\t\treturn nil\n\t\t})\n\n\treturn err\n}", "func WaitForPhase(podKey client.ObjectKey, phase corev1.PodPhase, timeout time.Duration) (*corev1.Pod, error) {\n\tupdatedPod := &corev1.Pod{}\n\terr := wait.PollImmediate(time.Second, timeout, func() (bool, error) {\n\t\tif err := testclient.Client.Get(context.TODO(), podKey, updatedPod); err != nil {\n\t\t\treturn false, nil\n\t\t}\n\n\t\tif updatedPod.Status.Phase == phase {\n\t\t\treturn true, nil\n\t\t}\n\t\treturn false, nil\n\t})\n\treturn updatedPod, err\n}", "func (tfcm *TestFCM) wait(count int) {\n\ttime.Sleep(time.Duration(count) * tfcm.timeout)\n}", "func (d *deploymentTester) waitForDeploymentCompleteAndCheckRolling() error {\n\treturn testutil.WaitForDeploymentCompleteAndCheckRolling(d.c, d.deployment, d.t.Logf, pollInterval, pollTimeout)\n}", "func (d *deploymentTester) waitForDeploymentCompleteAndCheckRollingAndMarkPodsReady() error {\n\tvar wg sync.WaitGroup\n\n\t// Manually mark updated Deployment pods as ready in a separate goroutine\n\twg.Add(1)\n\tgo d.markUpdatedPodsReady(&wg)\n\t// Wait for goroutine to finish, for all return paths.\n\tdefer wg.Wait()\n\n\t// Wait for the Deployment status to complete while Deployment pods are becoming ready\n\terr := d.waitForDeploymentCompleteAndCheckRolling()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to wait for Deployment %s to complete: %v\", d.deployment.Name, err)\n\t}\n\n\treturn nil\n}", "func waitForInit() error {\n\tstart := time.Now()\n\tmaxEnd := start.Add(time.Minute)\n\tfor {\n\t\t// Check for existence of vpcCniInitDonePath\n\t\tif _, err := os.Stat(vpcCniInitDonePath); err == nil {\n\t\t\t// Delete the done file in case of a reboot of the node or restart of the container (force init container to run again)\n\t\t\tif err := os.Remove(vpcCniInitDonePath); err == nil {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\t// If file deletion fails, log and allow retry\n\t\t\tlog.Errorf(\"Failed to delete file: %s\", vpcCniInitDonePath)\n\t\t}\n\t\tif time.Now().After(maxEnd) {\n\t\t\treturn errors.Errorf(\"time exceeded\")\n\t\t}\n\t\ttime.Sleep(1 * time.Second)\n\t}\n}", "func WaitForPodSucceeded(ctx context.Context, pods corev1.PodInterface, podName string, timeout time.Duration) error {\n\tlog.Entry(ctx).Infof(\"Waiting for %s to be complete\", podName)\n\n\tw, err := pods.Watch(ctx, metav1.ListOptions{})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"initializing pod watcher: %s\", err)\n\t}\n\tdefer w.Stop()\n\n\treturn watchUntilTimeout(ctx, timeout, w, isPodSucceeded(podName))\n}", "func waitReady(project, name, region string) error {\n\twait := time.Minute * 4\n\tdeadline := time.Now().Add(wait)\n\tfor time.Now().Before(deadline) {\n\t\tsvc, err := getService(project, name, region)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to query Service for readiness: %w\", err)\n\t\t}\n\n\t\tfor _, cond := range svc.Status.Conditions {\n\t\t\tif cond.Type == \"Ready\" {\n\t\t\t\tif cond.Status == \"True\" {\n\t\t\t\t\treturn nil\n\t\t\t\t} else if cond.Status == \"False\" {\n\t\t\t\t\treturn fmt.Errorf(\"reason=%s message=%s\", cond.Reason, cond.Message)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\ttime.Sleep(time.Second * 2)\n\t}\n\treturn fmt.Errorf(\"the service did not become ready in %s, check Cloud Console for logs to see why it failed\", wait)\n}", "func waitForGlusterContainer() error {\n\n\t//Check if docker gluster container is up and running\n\tfor {\n\t\tglusterServerContainerVal, err := helpers.GetSystemDockerNode(\"gluster-server\")\n\t\tif err != nil {\n\t\t\trwolog.Error(\"Error in checking docker gluster container for status \", err.Error())\n\t\t\treturn err\n\t\t}\n\n\t\tif len(glusterServerContainerVal) > 0 {\n\t\t\tbreak\n\t\t} else {\n\t\t\trwolog.Debug(\"Sleeping for 10 seconds to get gluster docker container up\")\n\t\t\ttime.Sleep(10 * time.Second)\n\t\t}\n\t}\n\treturn nil\n}", "func waitForEndpoint(ctx context.Context, c clientset.Interface, ns, name string) error {\n\t// registerTimeout is how long to wait for an endpoint to be registered.\n\tregisterTimeout := time.Minute\n\tfor t := time.Now(); time.Since(t) < registerTimeout; time.Sleep(framework.Poll) {\n\t\tendpoint, err := c.CoreV1().Endpoints(ns).Get(ctx, name, metav1.GetOptions{})\n\t\tif apierrors.IsNotFound(err) {\n\t\t\tframework.Logf(\"Endpoint %s/%s is not ready yet\", ns, name)\n\t\t\tcontinue\n\t\t}\n\t\tframework.ExpectNoError(err, \"Failed to get endpoints for %s/%s\", ns, name)\n\t\tif len(endpoint.Subsets) == 0 || len(endpoint.Subsets[0].Addresses) == 0 {\n\t\t\tframework.Logf(\"Endpoint %s/%s is not ready yet\", ns, name)\n\t\t\tcontinue\n\t\t}\n\t\treturn nil\n\t}\n\treturn fmt.Errorf(\"failed to get endpoints for %s/%s\", ns, name)\n}", "func (rcc *rotateCertsCmd) waitForKubeSystemReadiness() error {\n\tlog.Info(\"Checking health of all kube-system pods\")\n\ttimeout := time.Duration(len(rcc.nodes)) * time.Duration(float64(time.Minute)*1.25)\n\tif rotateCertsDefaultTimeout > timeout {\n\t\ttimeout = rotateCertsDefaultTimeout\n\t}\n\tif err := ops.WaitForAllInNamespaceReady(rcc.kubeClient, metav1.NamespaceSystem, rotateCertsDefaultInterval, timeout, rcc.nodes); err != nil {\n\t\treturn errors.Wrap(err, \"waiting for kube-system containers to reach the Ready state within the timeout period\")\n\t}\n\treturn nil\n}", "func waitForGuestbookResponse(ctx context.Context, c clientset.Interface, cmd, arg, expectedResponse string, timeout time.Duration, ns string) bool {\n\tfor start := time.Now(); time.Since(start) < timeout && ctx.Err() == nil; time.Sleep(5 * time.Second) {\n\t\tres, err := makeRequestToGuestbook(ctx, c, cmd, arg, ns)\n\t\tif err == nil && res == expectedResponse {\n\t\t\treturn true\n\t\t}\n\t\tframework.Logf(\"Failed to get response from guestbook. err: %v, response: %s\", err, res)\n\t}\n\treturn false\n}", "func LoadTest(w stdio.Writer, state State, waitForCancel func(), t target.Target) TestResult {\n\tresult := TestResult{Cancelled: true}\n\n\t// apm-server warm up\n\ttime.Sleep(time.Second)\n\tif err := state.Ready(); err != nil {\n\t\tio.ReplyEither(w, err)\n\t\treturn result\n\t}\n\n\tuncompressed := int64(len(t.Body))\n\twork := t.GetWork(ioutil.Discard)\n\n\tdocsBefore := state.ElasticSearch().Count()\n\tstart := time.Now()\n\tgo work.Run()\n\tio.ReplyNL(w, io.Grey+fmt.Sprintf(\"started new work, payload size %s (uncompressed), %s (compressed) ...\",\n\t\tbyteCountDecimal(uncompressed), byteCountDecimal(int64(len(t.Body)))))\n\tio.Prompt(w)\n\n\tcancelled := make(chan struct{}, 1)\n\tgo func() {\n\t\twaitForCancel()\n\t\tcancelled <- struct{}{}\n\t}()\n\n\tselect {\n\tcase <-time.After(t.Config.RunTimeout):\n\t\twork.Stop()\n\t\telapsedTime := time.Now().Sub(start)\n\t\tcodes := work.StatusCodes()\n\t\t_, totalResponses := output.SortedTotal(codes)\n\t\tresult = TestResult{\n\t\t\tElapsed: elapsedTime,\n\t\t\tDuration: t.Config.RunTimeout,\n\t\t\tErrors: t.Config.NumErrors,\n\t\t\tTransactions: t.Config.NumTransactions,\n\t\t\tSpans: t.Config.NumSpans,\n\t\t\tFrames: t.Config.NumFrames,\n\t\t\tDocsPerRequest: int64(t.Config.NumErrors+t.Config.NumTransactions+(t.Config.NumTransactions*t.Config.NumSpans)) * work.Flushes(),\n\t\t\tAgents: t.Config.NumAgents,\n\t\t\tThrottle: int(t.Config.Throttle),\n\t\t\tStream: t.Config.Stream,\n\t\t\tGzipBodySize: int64(len(t.Body)),\n\t\t\tBodySize: uncompressed,\n\t\t\tPushed: uncompressed * work.Flushes(),\n\t\t\tGzipPushed: int64(len(t.Body)) * work.Flushes(),\n\t\t\tFlushes: work.Flushes(),\n\t\t\tReqTimeout: time.Duration(t.Config.RequestTimeout),\n\t\t\tElasticUrl: state.ElasticSearch().Url(),\n\t\t\tApmUrls: s.Join(state.ApmServer().Urls(), \",\"),\n\t\t\tApmHosts: s.Join(hosts(state.ApmServer().Urls()), \",\"),\n\t\t\tNumApm: len(state.ApmServer().Urls()),\n\t\t\tBranch: state.ApmServer().Branch(),\n\t\t\t// AcceptedResponses: codes[202],\n\t\t\tTotalResponses: totalResponses,\n\t\t\tActualDocs: state.ElasticSearch().Count() - docsBefore,\n\t\t}\n\n\t\tvar format string\n\t\tvar throttled string\n\t\tif t.Config.Stream {\n\t\t\tif result.Throttle < math.MaxInt16 {\n\t\t\t\tthrottled = fmt.Sprintf(\"throttled at %d events per second\", result.Throttle)\n\t\t\t}\n\t\t\tformat = \"%s\\nstreamed %d events per request with %d agent(s) %s\\n%s\"\n\t\t} else {\n\t\t\tif result.Throttle < math.MaxInt16 {\n\t\t\t\tthrottled = fmt.Sprintf(\"throttled at %d requests per second\", result.Throttle)\n\t\t\t}\n\t\t\tformat = \"%s\\nsent %d events per request with %d agent(s) %s\\n%s\"\n\t\t}\n\t\tio.ReplyNL(w, fmt.Sprintf(format, io.Yellow, result.DocsPerRequest, result.Agents, throttled, io.Grey))\n\t\toutput.PrintResults(work, elapsedTime.Seconds(), w)\n\n\tcase <-cancelled:\n\t\twork.Stop()\n\t}\n\treturn result\n}", "func (client *Client) WaitForAllTestResourcesReady() error {\n\tif err := client.WaitForChannelsReady(); err != nil {\n\t\treturn err\n\t}\n\tif err := client.WaitForSubscriptionsReady(); err != nil {\n\t\treturn err\n\t}\n\tif err := client.WaitForBrokersReady(); err != nil {\n\t\treturn err\n\t}\n\tif err := client.WaitForTriggersReady(); err != nil {\n\t\treturn err\n\t}\n\tif err := client.WaitForCronJobSourcesReady(); err != nil {\n\t\treturn err\n\t}\n\tif err := client.WaitForContainerSourcesReady(); err != nil {\n\t\treturn err\n\t}\n\tif err := pkgTest.WaitForAllPodsRunning(client.Kube, client.Namespace); err != nil {\n\t\treturn err\n\t}\n\t// FIXME(Fredy-Z): This hacky sleep is added to try mitigating the test flakiness.\n\t// Will delete it after we find the root cause and fix.\n\ttime.Sleep(10 * time.Second)\n\treturn nil\n}", "func executeQuestionTestWithTimeout(t *testing.T, test questionTest) {\n\ttimeout := time.After(2 * time.Second)\n\tdone := make(chan bool)\n\n\tgo func() {\n\t\texecuteQuestionTest(t, test)\n\t\tdone <- true\n\t}()\n\n\tselect {\n\tcase <-timeout:\n\t\tt.Fatal(\"Test timed-out\")\n\tcase <-done:\n\t}\n}", "func (rcc *rotateCertsCmd) waitForControlPlaneReadiness() error {\n\tlog.Info(\"Checking health of control plane components\")\n\tpods := make([]string, 0)\n\tfor _, n := range rcc.cs.Properties.GetMasterVMNameList() {\n\t\tfor _, c := range []string{kubeAddonManager, kubeAPIServer, kubeControllerManager, kubeScheduler} {\n\t\t\tpods = append(pods, fmt.Sprintf(\"%s-%s\", c, n))\n\t\t}\n\t}\n\tif err := ops.WaitForReady(rcc.kubeClient, metav1.NamespaceSystem, pods, rotateCertsDefaultInterval, rotateCertsDefaultTimeout, rcc.nodes); err != nil {\n\t\treturn errors.Wrap(err, \"waiting for control plane containers to reach the Ready state within the timeout period\")\n\t}\n\treturn nil\n}", "func (c *myClient) waitForEnvironmentsReady(p, t int, envList ...string) (err error) {\n\n\tlogger.Infof(\"Waiting up to %v seconds for the environments to be ready\", t)\n\ttimeOut := 0\n\tfor timeOut < t {\n\t\tlogger.Info(\"Waiting for the environments\")\n\t\ttime.Sleep(time.Duration(p) * time.Second)\n\t\ttimeOut = timeOut + p\n\t\tif err = c.checkForBusyEnvironments(envList); err == nil {\n\t\t\tbreak\n\t\t}\n\t}\n\tif timeOut >= t {\n\t\terr = fmt.Errorf(\"waitForEnvironmentsReady timed out\")\n\t}\n\treturn err\n}", "func TestReductionTimeout(t *testing.T) {\n\teb, _, streamer, _, _ := launchReductionTest(true, 2)\n\n\t// send a hash to start reduction\n\thash, _ := crypto.RandEntropy(32)\n\n\t// Because round updates are asynchronous (sent through a channel), we wait\n\t// for a bit to let the broker update its round.\n\ttime.Sleep(200 * time.Millisecond)\n\tsendSelection(1, hash, eb)\n\n\ttimer := time.After(1 * time.Second)\n\t<-timer\n\n\tstopChan := make(chan struct{})\n\ttime.AfterFunc(1*time.Second, func() {\n\t\tseenTopics := streamer.SeenTopics()\n\t\tfor _, topic := range seenTopics {\n\t\t\tif topic == topics.Agreement {\n\t\t\t\tt.Fatal(\"\")\n\t\t\t}\n\t\t}\n\n\t\tstopChan <- struct{}{}\n\t})\n\n\t<-stopChan\n}", "func (t *TestProcessor) WaitForClose(timeout time.Duration) error {\n\tprintln(\"Waiting for close\")\n\treturn nil\n}", "func main() {\n\tclient := pegasus.NewClient(pegasus.Config{MetaServers: []string{\"172.21.0.11:35601\"}})\n\tdefer client.Close()\n\n\tctx, cancel := context.WithTimeout(context.Background(), time.Second)\n\tdefer cancel()\n\ttb, err := client.OpenTable(ctx, \"test\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tvar firstTimeoutTime time.Time\n\tfor {\n\t\tctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)\n\t\terr := tb.Set(ctx, []byte(time.Now().Format(time.RFC3339Nano)), []byte(\"\"), []byte(\"value\"))\n\t\tcancel()\n\n\t\tif err != nil {\n\t\t\tpegalog.GetLogger().Print(err)\n\n\t\t\tif !strings.Contains(err.Error(), context.DeadlineExceeded.Error()) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif firstTimeoutTime.IsZero() {\n\t\t\t\tfirstTimeoutTime = time.Now()\n\t\t\t}\n\t\t\tif time.Since(firstTimeoutTime) > 3*time.Minute {\n\t\t\t\tpanic(\"unable to recover from failure in 3min\")\n\t\t\t}\n\t\t}\n\t\tif err == nil && !firstTimeoutTime.IsZero() && time.Since(firstTimeoutTime) > 10*time.Minute {\n\t\t\tfmt.Println(\"test passed\")\n\t\t\treturn\n\t\t}\n\t\ttime.Sleep(time.Millisecond * 500)\n\t}\n}", "func waitForOnePodReady(t testing.TB, ctx context.Context, namespace string, label string) {\n\tt.Helper()\n\tkubeClient := tu.GetKubeClient(t)\n\trequire.NoErrorWithinTRetryConstant(t, 1*time.Minute, func() error {\n\t\tpods, err := kubeClient.CoreV1().Pods(namespace).\n\t\t\tList(ctx, metav1.ListOptions{LabelSelector: label})\n\t\tif err != nil {\n\t\t\treturn errors.EnsureStack(err)\n\t\t}\n\n\t\tif len(pods.Items) < 1 {\n\t\t\treturn errors.Errorf(\"pod with label %s has not yet been restarted.\", label)\n\t\t}\n\t\tfor _, item := range pods.Items {\n\t\t\tif item.Status.Phase == v1.PodRunning {\n\t\t\t\tfor _, c := range item.Status.Conditions {\n\t\t\t\t\tif c.Type == v1.PodReady && c.Status == v1.ConditionTrue {\n\t\t\t\t\t\treturn nil\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn errors.Errorf(\"one pod with label %s is not yet running and ready.\", label)\n\t}, 5*time.Second)\n}", "func (k Kubectl) WaitPodsReady(timeout time.Duration) error {\n\treturn utils.CheckUntil(5*time.Second, timeout, func() (bool, error) {\n\t\tcmd := kindCommand(\"kubectl get -n authelia pods --no-headers\")\n\t\tcmd.Stdout = nil\n\t\tcmd.Stderr = nil\n\t\toutput, _ := cmd.Output()\n\n\t\tlines := strings.Split(string(output), \"\\n\")\n\n\t\tnonEmptyLines := make([]string, 0)\n\t\tfor _, line := range lines {\n\t\t\tif line != \"\" {\n\t\t\t\tnonEmptyLines = append(nonEmptyLines, line)\n\t\t\t}\n\t\t}\n\n\t\tfor _, line := range nonEmptyLines {\n\t\t\tif !strings.Contains(line, \"1/1\") {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t}\n\t\treturn true, nil\n\t})\n}", "func waitForConsistency() {\n\ttime.Sleep(500 * time.Millisecond)\n}", "func WaitForDeploymentComplete(c clientset.Interface, d *appsv1.Deployment) error {\n\treturn testutils.WaitForDeploymentComplete(c, d, framework.Logf, poll, pollLongTimeout)\n}", "func testWithTimeout(timeout time.Duration, run func() error) error {\n\ttimer := time.NewTimer(timeout)\n\tdefer timer.Stop()\n\n\tfor {\n\t\tif err := run(); err != nil {\n\t\t\tselect {\n\t\t\tcase <-timer.C:\n\t\t\t\treturn err\n\t\t\tdefault:\n\t\t\t\ttime.Sleep(time.Millisecond * 5)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t}\n}", "func (k *kubelet) waitForNodeReady() error {\n\tkc, _ := k.config.AdminConfig.ToYAMLString() //nolint:errcheck // This is checked in Validate().\n\n\tc, err := client.NewClient([]byte(kc))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"creating kubernetes client: %w\", err)\n\t}\n\n\treturn c.WaitForNodeReady(k.config.Name)\n}", "func TestSucceedsAfterTimeout(t *testing.T){\n breaker := NewBreaker(2 * time.Second, 2, 2)\n\n breaker.halfOpen()\n\n _, err := breaker.Run(alwaysSucceedsFunc)\n\n evaluateCondition(t, err == nil, \"TestSucceedsAfterTimeout\")\n}", "func timeoutVerify(progressBarArray *[]*progressReader) {\n\tfor {\n\t\tfor _, ret := range *progressBarArray {\n\t\t\tdiff := time.Since(ret.lastRead)\n\t\t\tif diff > nanosecondsTimeout {\n\t\t\t\t(*(ret.reader)).Close()\n\t\t\t\tret.lastRead = time.Now()\n\t\t\t}\n\t\t}\n\t\ttime.Sleep(time.Second)\n\t}\n}", "func WaitForDeletion(profileKey types.NamespacedName, timeout time.Duration) error {\n\treturn wait.PollImmediate(time.Second, timeout, func() (bool, error) {\n\t\tprof := &performancev2.PerformanceProfile{}\n\t\tif err := testclient.Client.Get(context.TODO(), profileKey, prof); errors.IsNotFound(err) {\n\t\t\treturn true, nil\n\t\t}\n\t\treturn false, nil\n\t})\n}", "func waitForMachineController(ctx context.Context, client dynclient.Client) error {\n\tcondFn := clientutil.PodsReadyCondition(ctx, client, dynclient.ListOptions{\n\t\tNamespace: resources.MachineControllerNameSpace,\n\t\tLabelSelector: labels.SelectorFromSet(map[string]string{\n\t\t\tappLabelKey: resources.MachineControllerName,\n\t\t}),\n\t})\n\n\treturn fail.KubeClient(wait.PollUntilContextTimeout(ctx, 5*time.Second, 3*time.Minute, false, condFn.WithContext()), \"waiting for machine-controller to became ready\")\n}", "func Test_Complete_Flow(t *testing.T) {\n\tassert := assert.New(t)\n\n\tdao := dao.NewMetricDaoMemoryImpl(3) // Setting the TTL in 3 seconds for testing purpose\n\tsrv := service.NewMetricsServiceImpl(dao)\n\n\t// T0\n\terr := srv.AddMetric(\"metric1\", 1)\n\tassert.Nil(err)\n\n\terr = srv.AddMetric(\"metric2\", 3)\n\tassert.Nil(err)\n\n\terr = srv.AddMetric(\"metric2\", 1)\n\tassert.Nil(err)\n\n\terr = srv.AddMetric(\"metric3\", -2)\n\tassert.Nil(err)\n\n\t// Checking the metrics\n\tval, err := srv.SumMetric(\"metric1\")\n\tassert.Nil(err)\n\tassert.Equal(1, val)\n\n\tval, err = srv.SumMetric(\"metric2\")\n\tassert.Nil(err)\n\tassert.Equal(4, val)\n\n\tval, err = srv.SumMetric(\"metric3\")\n\tassert.Nil(err)\n\tassert.Equal(-2, val)\n\n\t// sleeping 3 secs\n\ttime.Sleep(time.Second * 2)\n\n\t// T1 - adding more values to the metrics\n\terr = srv.AddMetric(\"metric1\", 10)\n\tassert.Nil(err)\n\n\terr = srv.AddMetric(\"metric2\", -2)\n\tassert.Nil(err)\n\n\terr = srv.AddMetric(\"metric3\", 10)\n\tassert.Nil(err)\n\n\terr = srv.AddMetric(\"metric3\", 22)\n\tassert.Nil(err)\n\n\t// Checking the metrics again\n\tval, err = srv.SumMetric(\"metric1\")\n\tassert.Nil(err)\n\tassert.Equal(11, val)\n\n\tval, err = srv.SumMetric(\"metric2\")\n\tassert.Nil(err)\n\tassert.Equal(2, val)\n\n\tval, err = srv.SumMetric(\"metric3\")\n\tassert.Nil(err)\n\tassert.Equal(30, val)\n\n\t// sleeping 3 more seconds and the metrics added on T0 should be removed\n\ttime.Sleep(time.Second * 2)\n\n\t// T2 - Checking the metrics again\n\tval, err = srv.SumMetric(\"metric1\")\n\tassert.Nil(err)\n\tassert.Equal(10, val)\n\n\tval, err = srv.SumMetric(\"metric2\")\n\tassert.Nil(err)\n\tassert.Equal(-2, val)\n\n\tval, err = srv.SumMetric(\"metric3\")\n\tassert.Nil(err)\n\tassert.Equal(32, val)\n\n\t// sleeping 5 more seconds and there shouldn't be more metrics\n\ttime.Sleep(time.Second * 3)\n\n\tval, err = srv.SumMetric(\"metric1\")\n\tassert.Nil(err)\n\tassert.Equal(0, val)\n\n\tval, err = srv.SumMetric(\"metric2\")\n\tassert.Nil(err)\n\tassert.Equal(0, val)\n\n\tval, err = srv.SumMetric(\"metric3\")\n\tassert.Nil(err)\n\tassert.Equal(0, val)\n}", "func PodStatusCheck(experimentsDetails *types.ExperimentDetails, clients environment.ClientSets) error {\n\tPodList, err := clients.KubeClient.CoreV1().Pods(experimentsDetails.AppNS).List(metav1.ListOptions{LabelSelector: experimentsDetails.AppLabel})\n\tif err != nil {\n\t\treturn errors.Errorf(\"fail to get the list of pods, due to %v\", err)\n\t}\n\tvar flag = false\n\tfor _, pod := range PodList.Items {\n\t\tif string(pod.Status.Phase) != \"Running\" {\n\t\t\tfor count := 0; count < 20; count++ {\n\t\t\t\tPodList, err := clients.KubeClient.CoreV1().Pods(experimentsDetails.AppNS).List(metav1.ListOptions{LabelSelector: experimentsDetails.AppLabel})\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn errors.Errorf(\"fail to get the list of pods, due to %v\", err)\n\t\t\t\t}\n\t\t\t\tfor _, pod := range PodList.Items {\n\t\t\t\t\tif string(pod.Status.Phase) != \"Running\" {\n\t\t\t\t\t\tlog.Infof(\"Currently, the experiment job pod is in %v State, Please Wait ...\", pod.Status.Phase)\n\t\t\t\t\t\ttime.Sleep(5 * time.Second)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tflag = true\n\t\t\t\t\t\tbreak\n\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif flag == true {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tif count == 19 {\n\t\t\t\t\treturn errors.Errorf(\"pod fails to come in running state, due to %v\", err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tlog.Info(\"[Status]: Pod is in Running state\")\n\n\treturn nil\n}", "func WaitForDeletion(cs *testclient.ClientSet, pod *corev1.Pod, timeout time.Duration) error {\n\treturn wait.PollImmediate(time.Second, timeout, func() (bool, error) {\n\t\t_, err := cs.Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{})\n\t\tif errors.IsNotFound(err) {\n\t\t\treturn true, nil\n\t\t}\n\t\treturn false, nil\n\t})\n}", "func waitForServer(client *http.Client, serverAddress string) error {\n\tvar err error\n\t// wait for the server to come up\n\tfor i := 0; i < 10; i++ {\n\t\ttime.Sleep(100 * time.Millisecond)\n\t\t_, err = client.Get(\"http://\" + serverAddress)\n\t\tif err == nil {\n\t\t\treturn nil // server is up now\n\t\t}\n\t}\n\treturn fmt.Errorf(\"timed out waiting for server %s to come up: %w\", serverAddress, err)\n}", "func TestClusterDelayedUpgrade(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"skipping cluster test in short mode.\")\n\t}\n\tt.Parallel()\n\n\ttc := newTestCluster(t)\n\tdefer tc.tearDown()\n\n\ttc.addSequinses(3)\n\ttc.expectProgression(down, noVersion, v1, v2)\n\n\ttc.makeVersionAvailable(v1)\n\ttc.setup()\n\ttc.startTest()\n\n\ttime.Sleep(expectTimeout)\n\ttc.makeVersionAvailable(v2)\n\ttc.sequinses[0].hup()\n\n\ttime.Sleep(expectTimeout)\n\ttc.hup()\n\n\ttc.assertProgression()\n}", "func WaitUntilPodAvailable(t testing.TestingT, options *KubectlOptions, podName string, retries int, sleepBetweenRetries time.Duration) {\n\trequire.NoError(t, WaitUntilPodAvailableE(t, options, podName, retries, sleepBetweenRetries))\n}", "func waitForNPods(ps *podStore, expect int, timeout time.Duration) ([]string, error) {\n\t// Loop until we find expect pods or timeout is passed.\n\tvar pods []*api.Pod\n\tvar errLast error\n\tfound := wait.Poll(poll, timeout, func() (bool, error) {\n\t\tpods = ps.List()\n\t\tif len(pods) != expect {\n\t\t\terrLast = fmt.Errorf(\"expected to find %d pods but found only %d\", expect, len(pods))\n\t\t\tLogf(\"Error getting pods: %v\", errLast)\n\t\t\treturn false, nil\n\t\t}\n\t\treturn true, nil\n\t}) == nil\n\t// Extract the names of all found pods.\n\tpodNames := make([]string, len(pods))\n\tfor i, p := range pods {\n\t\tpodNames[i] = p.ObjectMeta.Name\n\t}\n\tif !found {\n\t\treturn podNames, fmt.Errorf(\"couldn't find %d pods within %v; last error: %v\",\n\t\t\texpect, timeout, errLast)\n\t}\n\treturn podNames, nil\n}", "func TestWaitUntilRunning(t *testing.T) {\n\tts := memorytopo.NewServer(\"cell1\")\n\tm := NewManager(ts)\n\n\t// Start it 3 times i.e. restart it 2 times.\n\tfor i := 1; i <= 3; i++ {\n\t\t// Run the manager in the background.\n\t\twg, _, cancel := StartManager(m)\n\n\t\t// Shut it down and wait for the shutdown to complete.\n\t\tcancel()\n\t\twg.Wait()\n\t}\n}", "func TestWait(t *testing.T) {\n\tdefer check(t)\n\tcontent := \"hello world!\"\n\treq := &showcasepb.WaitRequest{\n\t\tEnd: &showcasepb.WaitRequest_Ttl{\n\t\t\tTtl: &durationpb.Duration{Nanos: 100},\n\t\t},\n\t\tResponse: &showcasepb.WaitRequest_Success{\n\t\t\tSuccess: &showcasepb.WaitResponse{Content: content},\n\t\t},\n\t}\n\top, err := echo.Wait(context.Background(), req)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tresp, err := op.Wait(context.Background())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif resp.GetContent() != content {\n\t\tt.Errorf(\"Wait() = %q, want %q\", resp.GetContent(), content)\n\t}\n}", "func (v *ObservabilityVerifier) ExpectCompletingStatus(g Gomega) {}", "func (t *NvidiaGPUUpgradeTest) Test(ctx context.Context, f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) {\n\t<-done\n\tginkgo.By(\"Verifying gpu job success\")\n\tscheduling.VerifyJobNCompletions(ctx, f, completions)\n\tif upgrade == upgrades.MasterUpgrade || upgrade == upgrades.ClusterUpgrade {\n\t\t// MasterUpgrade should be totally hitless.\n\t\tjob, err := e2ejob.GetJob(ctx, f.ClientSet, f.Namespace.Name, \"cuda-add\")\n\t\tframework.ExpectNoError(err)\n\t\tframework.ExpectEqual(job.Status.Failed, 0, \"Job pods failed during master upgrade: %v\", job.Status.Failed)\n\t}\n}", "func CheckAfterTest(d time.Duration) error {\n\thttp.DefaultTransport.(*http.Transport).CloseIdleConnections()\n\tvar bad string\n\t// Presence of these goroutines causes immediate test failure.\n\tbadSubstring := map[string]string{\n\t\t\").writeLoop(\": \"a Transport\",\n\t\t\"created by net/http/httptest.(*Server).Start\": \"an httptest.Server\",\n\t\t\"timeoutHandler\": \"a TimeoutHandler\",\n\t\t\"net.(*netFD).connect(\": \"a timing out dial\",\n\t\t\").noteClientGone(\": \"a closenotifier sender\",\n\t\t\").readLoop(\": \"a Transport\",\n\t\t\".grpc\": \"a gRPC resource\",\n\t\t\").sendCloseSubstream(\": \"a stream closing routine\",\n\t}\n\n\tvar stacks string\n\tbegin := time.Now()\n\tfor time.Since(begin) < d {\n\t\tbad = \"\"\n\t\tgoroutines := interestingGoroutines()\n\t\tif len(goroutines) == 0 {\n\t\t\treturn nil\n\t\t}\n\t\tstacks = strings.Join(goroutines, \"\\n\\n\")\n\n\t\tfor substr, what := range badSubstring {\n\t\t\tif strings.Contains(stacks, substr) {\n\t\t\t\tbad = what\n\t\t\t}\n\t\t}\n\t\t// Undesired goroutines found, but goroutines might just still be\n\t\t// shutting down, so give it some time.\n\t\truntime.Gosched()\n\t\ttime.Sleep(50 * time.Millisecond)\n\t}\n\treturn fmt.Errorf(\"appears to have leaked %s:\\n%s\", bad, stacks)\n}", "func TestRequeueAfterProgressDeadlineSeconds(t *testing.T) {\n\ttemplates := generateTemplates(\"bar\")\n\tex := newExperiment(\"foo\", templates, \"\")\n\tex.Status.TemplateStatuses = []v1alpha1.TemplateStatus{\n\t\tgenerateTemplatesStatus(\"bar\", 0, 0, v1alpha1.TemplateStatusProgressing, now()),\n\t}\n\tnow := metav1.Now()\n\tex.Status.TemplateStatuses[0].LastTransitionTime = &now\n\texCtx := newTestContext(ex)\n\trs1 := templateToRS(ex, ex.Spec.Templates[0], 0)\n\texCtx.templateRSs = map[string]*appsv1.ReplicaSet{\n\t\t\"bar\": rs1,\n\t}\n\tenqueueCalled := false\n\texCtx.enqueueExperimentAfter = func(obj interface{}, duration time.Duration) {\n\t\tenqueueCalled = true\n\t\t// ensures we are enqueued around 10 minutes\n\t\ttenMinutes := time.Second * time.Duration(600)\n\t\tdelta := math.Abs(float64(tenMinutes - duration))\n\t\tassert.True(t, delta < float64(100*time.Millisecond))\n\t}\n\texCtx.reconcile()\n\tassert.True(t, enqueueCalled)\n}", "func waitForHelmRunning(ctx context.Context, configPath string) error {\n\tfor {\n\t\tcmd := exec.Command(\"helm\", \"ls\", \"--kubeconfig\", configPath)\n\t\tvar out bytes.Buffer\n\t\tcmd.Stderr = &out\n\t\tcmd.Run()\n\t\tif out.String() == \"\" {\n\t\t\treturn nil\n\t\t}\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn errors.Wrap(ctx.Err(), \"timed out waiting for helm to become ready\")\n\t\tcase <-time.After(5 * time.Second):\n\t\t}\n\t}\n}", "func waitForPodCondition(kubeClient *unversioned.Client, ns, podName string, condition func(pod *api.Pod) (bool, error),\n\tinterval, timeout time.Duration) error {\n\terr := wait.PollImmediate(interval, timeout, func() (bool, error) {\n\t\tpod, err := kubeClient.Pods(ns).Get(podName)\n\t\tif err != nil {\n\t\t\tif apierrs.IsNotFound(err) {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t}\n\n\t\tdone, err := condition(pod)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tif done {\n\t\t\treturn true, nil\n\t\t}\n\n\t\treturn false, nil\n\t})\n\n\tif err != nil {\n\t\treturn fmt.Errorf(\"timed out waiting to observe own status as Running\")\n\t}\n\n\treturn nil\n}", "func waitForSplitSecond() {\n\tcurNs := time.Now().Nanosecond()\n\tfor curNs > 500000000 {\n\t\ttime.Sleep(100 * time.Millisecond)\n\t\tcurNs = time.Now().Nanosecond()\n\t}\n}", "func testHealth(service *bridge.Service, client fargo.EurekaConnection, elbReg *fargo.Instance) {\n\tcontainerID := service.Origin.ContainerID\n\n\t// Get actual eureka status and lookup previous logical registration status\n\teurekaStatus := getELBStatus(client, elbReg)\n\tlog.Debugf(\"Eureka status check gave: %v\", eurekaStatus)\n\tlast := getPreviousStatus(containerID)\n\n\t// Work out an appropriate registration status given previous and current values\n\tstatusChange := determineNewEurekaStatus(containerID, eurekaStatus, last)\n\tsetPreviousStatus(containerID, statusChange.newStatus)\n\telbReg.Status = statusChange.registrationStatus\n\tlog.Debugf(\"Status health check returned prev: %v registration: %v\", last, elbReg.Status)\n}", "func waitForPoint(acc *testutil.Accumulator, t *testing.T) {\n\t// Give the kafka container up to 2 seconds to get the point to the consumer\n\tticker := time.NewTicker(5 * time.Millisecond)\n\tcounter := 0\n\t//nolint:gosimple // for-select used on purpose\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tcounter++\n\t\t\tif counter > 1000 {\n\t\t\t\tt.Fatal(\"Waited for 5s, point never arrived to consumer\")\n\t\t\t} else if acc.NFields() == 1 {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}", "func WaitDemoApp(t ginkgo.GinkgoTInterface, options *k8s.KubectlOptions, replicas int) {\n\tk8s.WaitUntilNumPodsCreated(t, options, metav1.ListOptions{\n\t\tLabelSelector: \"app=\" + StatefulSetName,\n\t}, replicas, retries, sleepBetweenRetries)\n\n\tpods := k8s.ListPods(t, options, metav1.ListOptions{\n\t\tLabelSelector: \"app=\" + StatefulSetName,\n\t})\n\tfor index := range pods {\n\t\tk8s.WaitUntilPodAvailable(t, options, pods[index].Name, retries, sleepBetweenRetries)\n\t}\n}", "func waitForDeployment(getDeploymentFunc func() (*appsv1.Deployment, error), interval, timeout time.Duration) error {\n\treturn wait.PollImmediate(interval, timeout, func() (bool, error) {\n\t\tdeployment, err := getDeploymentFunc()\n\t\tif err != nil {\n\t\t\tif apierrors.IsNotFound(err) {\n\t\t\t\tframework.Logf(\"deployment not found, continue waiting: %s\", err)\n\t\t\t\treturn false, nil\n\t\t\t}\n\n\t\t\tframework.Logf(\"error while deploying, error %s\", err)\n\t\t\treturn false, err\n\t\t}\n\t\tframework.Logf(\"deployment status %s\", &deployment.Status)\n\t\treturn util.DeploymentComplete(deployment, &deployment.Status), nil\n\t})\n}", "func waitExamples() string {\n\treturn `$ pouch ps\nName ID Status Created Image Runtime\nfoo f6717e Up 2 seconds 3 seconds ago registry.hub.docker.com/library/busybox:latest runc\n$ pouch stop foo\n$ pouch ps -a\nName ID Status Created Image Runtime\nfoo f6717e Stopped (0) 1 minute 2 minutes ago registry.hub.docker.com/library/busybox:latest runc\n$ pouch wait foo\n0`\n}", "func (suite *APIContainerExecStartSuite) TestContainerExecStartPaused(c *check.C) {\n}", "func WaitUntilPodAvailableE(t testing.TestingT, options *KubectlOptions, podName string, retries int, sleepBetweenRetries time.Duration) error {\n\tstatusMsg := fmt.Sprintf(\"Wait for pod %s to be provisioned.\", podName)\n\tmessage, err := retry.DoWithRetryE(\n\t\tt,\n\t\tstatusMsg,\n\t\tretries,\n\t\tsleepBetweenRetries,\n\t\tfunc() (string, error) {\n\t\t\tpod, err := GetPodE(t, options, podName)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\t\t\tif !IsPodAvailable(pod) {\n\t\t\t\treturn \"\", NewPodNotAvailableError(pod)\n\t\t\t}\n\t\t\treturn \"Pod is now available\", nil\n\t\t},\n\t)\n\tif err != nil {\n\t\tlogger.Logf(t, \"Timedout waiting for Pod to be provisioned: %s\", err)\n\t\treturn err\n\t}\n\tlogger.Logf(t, message)\n\treturn nil\n}", "func (r *runner) UpdateTestRun(ctrl controller.Interface, testRun *v1alpha1.TestRun) error {\n\n\tif testRun.Status.Status == v1alpha1.TestRunComplete {\n\t\tlog.Printf(\" | '%v/%v' is already Complete - Skipping\", testRun.Namespace, testRun.Name)\n\t\treturn nil\n\t}\n\n\tif testRun.Status.Status == \"\" {\n\t\terr := initializeStatus(ctrl, testRun)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tstats := runStats{0, 0}\n\tfor _, record := range testRun.Status.Records {\n\t\tif record.EndTime != nil {\n\t\t\tstats.CompletedCount++\n\t\t\tif record.Result != string(corev1.PodSucceeded) {\n\t\t\t\tstats.FailCount++\n\t\t\t}\n\t\t}\n\t}\n\tif stats.CompletedCount == len(testRun.Status.Records) {\n\t\treturn testRunComplete(ctrl, testRun, stats)\n\t}\n\n\tlog.Printf(\"Running '%v/%v'\", testRun.Namespace, testRun.Name)\n\n\tlog.Printf(\" | %v/%v\", testRun.Namespace, testRun.Name)\n\n\ttests, err := getTestsForTestRun(ctrl, testRun)\n\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error getting list of tests: %s\", err.Error())\n\t}\n\tlog.Printf(\" | Test Count: %v\", len(tests))\n\n\tJobsSlots := getJobSlots(testRun)\n\n\treturn runNextNTests(ctrl, testRun, tests, JobsSlots)\n}", "func TestConsulScript_Exec_Timeout(t *testing.T) {\n\tt.Parallel() // run the slow tests in parallel\n\tserviceCheck := structs.ServiceCheck{\n\t\tName: \"sleeper\",\n\t\tInterval: time.Hour,\n\t\tTimeout: time.Second,\n\t}\n\texec := newBlockingScriptExec()\n\n\thb := newFakeHeartbeater()\n\tcheck := newScriptCheck(\"allocid\", \"testtask\", \"checkid\", &serviceCheck, exec, hb, testLogger(), nil)\n\thandle := check.run()\n\tdefer handle.cancel() // just-in-case cleanup\n\t<-exec.running\n\n\t// Check for UpdateTTL call\n\tselect {\n\tcase update := <-hb.updates:\n\t\tif update.status != api.HealthCritical {\n\t\t\tt.Errorf(\"expected %q due to timeout but received %q\", api.HealthCritical, update)\n\t\t}\n\tcase <-time.After(3 * time.Second):\n\t\tt.Fatalf(\"timed out waiting for script check to exit\")\n\t}\n\tif !exec.exited {\n\t\tt.Errorf(\"expected script executor to run and exit but it has not\")\n\t}\n\n\t// Cancel and watch for exit\n\thandle.cancel()\n\tselect {\n\tcase <-handle.wait():\n\t\t// ok!\n\tcase update := <-hb.updates:\n\t\tt.Errorf(\"unexpected UpdateTTL call on exit with status=%q\", update)\n\tcase <-time.After(3 * time.Second):\n\t\tt.Fatalf(\"timed out waiting for script check to exit\")\n\t}\n}", "func WaitForPodCompletion(ctx context.Context, cli kubernetes.Interface, namespace, name string) error {\n\tattachLog := true\n\tcontainerForLogs := \"\"\n\terr := poll.Wait(ctx, func(ctx context.Context) (bool, error) {\n\t\tp, err := cli.CoreV1().Pods(namespace).Get(ctx, name, metav1.GetOptions{})\n\t\tif err != nil {\n\t\t\tattachLog = false\n\t\t\treturn true, err\n\t\t}\n\t\tcontainerForLogs = p.Spec.Containers[0].Name\n\t\tswitch p.Status.Phase {\n\t\tcase v1.PodFailed:\n\t\t\treturn false, errors.Errorf(\"Pod %s failed. Pod status: %s\", name, p.Status.String())\n\t\t}\n\t\treturn p.Status.Phase == v1.PodSucceeded, nil\n\t})\n\n\terrorMessage := \"Pod failed or did not transition into complete state\"\n\tif attachLog {\n\t\treturn getErrorFromLogs(ctx, cli, namespace, name, containerForLogs, err, errorMessage)\n\t}\n\treturn errors.Wrap(err, errorMessage)\n}", "func waitForLokiPodTermination(ctx context.Context, k8sClient client.Client, namespace string, log logr.Logger) error {\n\tlog.Info(\"Loki2vali: Verify that the pod loki-0 is not running, otherwise we cannot delete the PVC\", \"lokiNamespace\", namespace)\n\t// ensure that the context has a deadline\n\tctx, cancel := context.WithDeadline(ctx, time.Now().Add(30*time.Second))\n\tdefer cancel()\n\tdeadline, _ := ctx.Deadline()\n\tpod := &corev1.Pod{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: \"loki-0\",\n\t\t\tNamespace: namespace,\n\t\t},\n\t}\n\tif err := wait.PollUntilWithContext(ctx, 1*time.Second, func(context.Context) (done bool, err error) {\n\t\tif err := k8sClient.Get(ctx, client.ObjectKeyFromObject(pod), pod); err != nil {\n\t\t\tif apierrors.IsNotFound(err) {\n\t\t\t\tlog.Info(\"Loki2vali: pod loki-0 not found, continuing with the rename\", \"lokiNamespace\", namespace)\n\t\t\t\treturn true, nil\n\t\t\t} else {\n\t\t\t\treturn true, fmt.Errorf(\"Loki2vali: %v: Error retrieving pod loki-0, aborting: %w\", namespace, err)\n\t\t\t}\n\t\t}\n\t\tlog.Info(\"Loki2vali: Waiting for pod loki-0 to terminate\", \"lokiNamespace\", namespace, \"timeLeft\", time.Until(deadline))\n\t\treturn false, nil\n\t}); err != nil && err == wait.ErrWaitTimeout {\n\t\terr := fmt.Errorf(\"Loki2vali: %v: Timeout while waiting for the loki-0 pod to terminate\", namespace)\n\t\tlog.Info(\"Loki2vali:\", \"lokiError\", err)\n\t\treturn err\n\t} else {\n\t\treturn err\n\t}\n}", "func WaitForSyncComplete(ctx context.Context, napi api.FullNode) error {\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn ctx.Err()\n\t\tcase <-time.After(3 * time.Second):\n\t\t\thead, err := napi.ChainHead(ctx)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif time.Now().Unix()-int64(head.MinTimestamp()) < build.BlockDelay {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}\n}", "func waitForCertificateReady(configMap *corev1.ConfigMap) bool {\n\tlog := util.Logger()\n\tklient := util.KubeClient()\n\n\tinterval := time.Duration(3)\n\ttimeout := time.Duration(15)\n\n\terr := wait.PollUntilContextTimeout(ctx, interval*time.Second, timeout*time.Second, true, func(ctx context.Context) (bool, error) {\n\t\tif err := klient.Get(util.Context(), util.ObjectKey(configMap), configMap); err != nil {\n\t\t\tlog.Printf(\"⏳ Failed to get service ca certificate: %s\", err)\n\t\t\treturn false, nil\n\t\t}\n\t\tif configMap.Data[\"service-ca.crt\"] == \"\" {\n\t\t\treturn false, fmt.Errorf(\"service ca certificate not created\")\n\t\t}\n\n\t\treturn true, nil\n\t})\n\treturn (err == nil)\n}", "func WaitDUTActive(ctx context.Context, servoInst *servo.Servo, expectActive bool, timeout time.Duration) error {\n\tvar expectedDesc string\n\tif expectActive {\n\t\texpectedDesc = \"active\"\n\t} else {\n\t\texpectedDesc = \"inactive\"\n\t}\n\tif err := testing.Poll(ctx, func(ctx context.Context) error {\n\t\tactive, err := DUTActive(ctx, servoInst)\n\t\tif err != nil {\n\t\t\treturn testing.PollBreak(err)\n\t\t}\n\t\tif active != expectActive {\n\t\t\treturn errors.Errorf(\"DUT is not yet %s\", expectedDesc)\n\t\t}\n\t\treturn nil\n\t}, &testing.PollOptions{\n\t\tTimeout: timeout,\n\t\tInterval: time.Second,\n\t}); err != nil {\n\t\treturn errors.Wrapf(err, \"failed to wait for DUT to become %s\", expectedDesc)\n\t}\n\treturn nil\n}", "func (c *Client) doWaitForStatus(eniID string, checkNum, checkInterval int, finalStatus string) error {\n\tfor i := 0; i < checkNum; i++ {\n\t\ttime.Sleep(time.Second * time.Duration(checkInterval))\n\t\tenis, err := c.queryENI(eniID, \"\", \"\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfor _, eni := range enis {\n\t\t\tif *eni.NetworkInterfaceId == eniID {\n\t\t\t\tswitch *eni.State {\n\t\t\t\tcase ENI_STATUS_AVAILABLE:\n\t\t\t\t\tswitch finalStatus {\n\t\t\t\t\tcase ENI_STATUS_ATTACHED:\n\t\t\t\t\t\tif eni.Attachment != nil && eni.Attachment.InstanceId != nil {\n\t\t\t\t\t\t\tblog.Infof(\"eni %s is attached\", eniID)\n\t\t\t\t\t\t\treturn nil\n\t\t\t\t\t\t}\n\t\t\t\t\t\tblog.Infof(\"eni %s is not attached\", eniID)\n\t\t\t\t\tcase ENI_STATUS_DETACHED:\n\t\t\t\t\t\tif eni.Attachment == nil {\n\t\t\t\t\t\t\tblog.Infof(\"eni %s is detached\", eniID)\n\t\t\t\t\t\t\treturn nil\n\t\t\t\t\t\t}\n\t\t\t\t\t\tblog.Infof(\"eni %s is not detached\", eniID)\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tblog.Infof(\"eni %s is %s now\", eniID, *eni.State)\n\t\t\t\t\t\treturn nil\n\t\t\t\t\t}\n\t\t\t\t\tbreak\n\t\t\t\tcase ENI_STATUS_PENDING, ENI_STATUS_ATTACHING, ENI_STATUS_DETACHING, ENI_STATUS_DELETING:\n\t\t\t\t\tblog.Infof(\"eni %s is %s\", eniID, *eni.State)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tblog.Errorf(\"timeout when wait for eni %s\", eniID)\n\treturn fmt.Errorf(\"timeout when wait for eni %s\", eniID)\n}", "func WaitForPodReady(ctx context.Context, cli kubernetes.Interface, namespace, name string) error {\n\ttimeoutCtx, waitCancel := context.WithTimeout(ctx, GetPodReadyWaitTimeout())\n\tdefer waitCancel()\n\tattachLog := true\n\tcontainerForLogs := \"\"\n\terr := poll.Wait(timeoutCtx, func(ctx context.Context) (bool, error) {\n\t\tp, err := cli.CoreV1().Pods(namespace).Get(ctx, name, metav1.GetOptions{})\n\t\tif err != nil {\n\t\t\tattachLog = false\n\t\t\treturn false, err\n\t\t}\n\t\tcontainerForLogs = p.Spec.Containers[0].Name\n\n\t\t// check if nodes are up and available\n\t\terr = checkNodesStatus(p, cli)\n\t\tif err != nil && !strings.Contains(err.Error(), errAccessingNode) {\n\t\t\tattachLog = false\n\t\t\treturn false, err\n\t\t}\n\n\t\t// check for memory or resource issues\n\t\tif p.Status.Phase == v1.PodPending {\n\t\t\tif p.Status.Reason == \"OutOfmemory\" || p.Status.Reason == \"OutOfcpu\" {\n\t\t\t\tattachLog = false\n\t\t\t\treturn false, errors.Errorf(\"Pod stuck in pending state, reason: %s\", p.Status.Reason)\n\t\t\t}\n\t\t}\n\n\t\t// check if pvc and pv are up and ready to mount\n\t\tif err := getVolStatus(timeoutCtx, p, cli, namespace); err != nil {\n\t\t\tattachLog = false\n\t\t\treturn false, err\n\t\t}\n\n\t\treturn p.Status.Phase != v1.PodPending && p.Status.Phase != \"\", nil\n\t})\n\n\tif err == nil {\n\t\treturn nil\n\t}\n\n\terrorMessage := fmt.Sprintf(\"Pod did not transition into running state. Timeout:%v Namespace:%s, Name:%s\", GetPodReadyWaitTimeout(), namespace, name)\n\tif attachLog {\n\t\treturn getErrorFromLogs(ctx, cli, namespace, name, containerForLogs, err, errorMessage)\n\t}\n\n\treturn errors.Wrap(err, errorMessage)\n}", "func (c *InstallerController) timeToWaitBeforeInstallingNextPod(ctx context.Context, nodeStatuses []operatorv1.NodeStatus) time.Duration {\n\tif c.minReadyDuration == 0 {\n\t\treturn 0\n\t}\n\t// long enough that we would notice if something went really wrong. Short enough that a customer cluster will still function\n\tminDurationPodHasBeenReady := 600 * time.Second\n\tfor _, nodeStatus := range nodeStatuses {\n\t\tpod, err := c.podsGetter.Pods(c.targetNamespace).Get(ctx, mirrorPodNameForNode(c.staticPodName, nodeStatus.NodeName), metav1.GetOptions{})\n\t\tif err != nil {\n\t\t\t// if we have an issue getting the static pod, just don't bother delaying for minReadySeconds at all\n\t\t\tcontinue\n\t\t}\n\t\tfor _, podCondition := range pod.Status.Conditions {\n\t\t\tif podCondition.Type != corev1.PodReady {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif podCondition.Status != corev1.ConditionTrue {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tdurationPodHasBeenReady := c.clock.Now().Sub(podCondition.LastTransitionTime.Time)\n\t\t\tif durationPodHasBeenReady < minDurationPodHasBeenReady {\n\t\t\t\tminDurationPodHasBeenReady = durationPodHasBeenReady\n\t\t\t}\n\t\t}\n\t}\n\t// if we've been ready longer than the minimum, don't wait\n\tif minDurationPodHasBeenReady > c.minReadyDuration {\n\t\treturn 0\n\t}\n\n\t// otherwise wait the balance\n\treturn c.minReadyDuration - minDurationPodHasBeenReady\n}" ]
[ "0.7927179", "0.72653604", "0.6584559", "0.60880834", "0.6087988", "0.6062425", "0.5917469", "0.59123796", "0.59050435", "0.59032446", "0.58534753", "0.5846773", "0.5770013", "0.57408786", "0.57073563", "0.56853384", "0.56759846", "0.5669011", "0.56403846", "0.56218016", "0.55970097", "0.5596225", "0.5586995", "0.55804664", "0.5570809", "0.5557242", "0.55402225", "0.5528433", "0.55077803", "0.5496877", "0.5478135", "0.54672843", "0.5449422", "0.54436547", "0.5443086", "0.5441915", "0.54336154", "0.5432275", "0.54302794", "0.5421677", "0.5413733", "0.54082614", "0.5378726", "0.5371019", "0.53549117", "0.53534013", "0.53461707", "0.5342356", "0.53278506", "0.53278047", "0.5313584", "0.5304138", "0.5295457", "0.528062", "0.5262516", "0.5253663", "0.5249203", "0.52424866", "0.5240472", "0.5239208", "0.52346545", "0.5222499", "0.5218435", "0.5215935", "0.52044636", "0.51974094", "0.5190698", "0.5189967", "0.5189803", "0.51810855", "0.5162798", "0.5162387", "0.51538104", "0.5151824", "0.5150239", "0.5143986", "0.5142657", "0.51402396", "0.5137145", "0.51363254", "0.5134087", "0.5129473", "0.51279324", "0.5125149", "0.5124852", "0.5121318", "0.5120713", "0.51201224", "0.5115033", "0.5113525", "0.5113166", "0.5111529", "0.5103263", "0.50935835", "0.50866646", "0.50844526", "0.5077812", "0.50750357", "0.50733745", "0.50684226" ]
0.76772004
1
DelayDuration returns delay duration in a form of time.Duration.
func (o *Options) DelayDuration() time.Duration { return time.Second * time.Duration(o.Delay) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (v RxDelay) Duration() time.Duration {\n\tswitch v {\n\tcase RX_DELAY_0, RX_DELAY_1:\n\t\treturn time.Second\n\tdefault:\n\t\treturn time.Duration(v) * time.Second\n\t}\n}", "func (d *Delay) TimeDuration() time.Duration {\n\treturn time.Duration(d.Duration*1000) * time.Millisecond\n}", "func (b *Backoff) Duration() time.Duration {\n\tb.setup()\n\n\tb.decayN()\n\n\tt := b.duration(b.n)\n\n\tif b.n < math.MaxUint64 {\n\t\tb.n++\n\t}\n\n\tif !b.noJitter {\n\t\tprngMu.Lock()\n\t\tt = time.Duration(prng.Int63n(int64(t)))\n\t\tprngMu.Unlock()\n\t}\n\n\treturn t\n}", "func (b *backoff) Duration() time.Duration {\n\tbackoff := float64(b.n) + 1\n\td := math.Min(b.InitialDelay.Seconds()*backoff, b.MaxDelay.Seconds())\n\tb.n++\n\treturn time.Duration(d) * time.Second\n}", "func (b *Backoff) Duration() time.Duration {\n\tbase := b.Min + b.delta\n\tpause := base\n\tif b.Jitter { // Add a number in the range [0, pause).\n\t\tpause += time.Duration(rand.Int63n(int64(pause)))\n\t}\n\n\tnextPause := time.Duration(float64(base) * b.Factor)\n\tif nextPause > b.Max || nextPause < b.Min { // Multiplication could overflow.\n\t\tnextPause = b.Max\n\t}\n\tb.delta = nextPause - b.Min\n\n\treturn pause\n}", "func getDelay() time.Duration {\n\tDelaySeconds := 7200\n\n\tif utils.DebugMode() {\n\t\tDelaySeconds = 20\n\t}\n\n\treturn time.Duration(DelaySeconds)\n}", "func (clus *Cluster) GetCaseDelayDuration() time.Duration {\n\treturn time.Duration(clus.Tester.CaseDelayMs) * time.Millisecond\n}", "func (o ApplicationOperationRetryBackoffOutput) Duration() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ApplicationOperationRetryBackoff) *string { return v.Duration }).(pulumi.StringPtrOutput)\n}", "func (o ApplicationSpecSyncPolicyRetryBackoffOutput) Duration() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ApplicationSpecSyncPolicyRetryBackoff) *string { return v.Duration }).(pulumi.StringPtrOutput)\n}", "func (timeout *Timeout) Duration() time.Duration {\n\treturn timeout.d\n}", "func (d duration) Duration() time.Duration {\n\treturn time.Duration(d)\n}", "func (o ApplicationStatusOperationStateOperationRetryBackoffOutput) Duration() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ApplicationStatusOperationStateOperationRetryBackoff) *string { return v.Duration }).(pulumi.StringPtrOutput)\n}", "func (b *ConstantBackoff) Duration() time.Duration {\n\tb.retry++\n\treturn time.Duration(b.Time) * b.TimeUnit\n}", "func (o ApplicationOperationRetryBackoffPtrOutput) Duration() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *ApplicationOperationRetryBackoff) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Duration\n\t}).(pulumi.StringPtrOutput)\n}", "func (o ApplicationSpecSyncPolicyRetryBackoffPtrOutput) Duration() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *ApplicationSpecSyncPolicyRetryBackoff) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Duration\n\t}).(pulumi.StringPtrOutput)\n}", "func (i *Interval) Delay() time.Duration {\n\treturn i.delay\n}", "func Delay(n *NetOp, statusCode int, pass int, duration time.Duration) time.Duration {\n\tm := fmt.Sprintf(\"Delay: HTTP error %v on %v. Sleeping %v seconds, pass %d of %d.\",\n\t\tstatusCode, n.Endpoint, duration.Seconds(), pass, MaxWaitIterations)\n\tlog.Println(m)\n\tn.Println(m)\n\ttime.Sleep(duration)\n\tduration = duration * Multiplier\n\treturn duration\n}", "func (o ApplicationStatusOperationStateOperationRetryBackoffPtrOutput) Duration() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *ApplicationStatusOperationStateOperationRetryBackoff) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Duration\n\t}).(pulumi.StringPtrOutput)\n}", "func Duration(ds *dpb.Duration) time.Duration {\n\td, err := ptypes.Duration(ds)\n\tif err != nil {\n\t\treturn 0\n\t}\n\treturn d\n}", "func (e *Execution) Delay() time.Duration {\n\treturn e.ReceivedTime.Sub(e.ExecDate)\n}", "func (r *RandomLib) Duration(min, max int) time.Duration {\n\treturn time.Duration(r.Int(min, max))\n}", "func Duration(flag string, value time.Duration, description string) *time.Duration {\n\tvar v time.Duration\n\tDurationVar(&v, flag, value, description)\n\treturn &v\n}", "func (o PatchDeploymentOutput) Duration() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *PatchDeployment) pulumi.StringPtrOutput { return v.Duration }).(pulumi.StringPtrOutput)\n}", "func (decoder *Decoder) Duration() time.Duration {\n\treturn decoder.ByteOffsetToDur(decoder.dataSize)\n}", "func getDuration(seconds int) time.Duration {\n\treturn time.Duration(seconds) * time.Second\n}", "func (td *jsonDuration) Duration() time.Duration {\r\n\treturn time.Duration(*td)\r\n}", "func (b Backoff) Delay() time.Duration {\n\tif b.attempts == 0 {\n\t\treturn time.Duration(0)\n\t}\n\n\tbackoff := math.Pow(_backoffFactor, float64(b.attempts))\n\n\treturn time.Duration(backoff) * time.Second\n}", "func (d *Duration) Duration() time.Duration {\n\tif d == nil {\n\t\treturn 0\n\t}\n\treturn (time.Duration(d.Seconds) * time.Second) + (time.Duration(d.Nanos) * time.Nanosecond)\n}", "func (pomo *Pomo) GetDuration() string {\n\n\t// if pomo is off do not output anything\n\tif pomo.Status == OFF {\n\t\treturn \"\"\n\t}\n\n\t// if pomo run out of time that was set\n\t// make a blinking animation and send ntification\n\tif pomo.Time < 0 {\n\n\t\t// if user not notified\n\t\tif !pomo.Notified {\n\n\t\t\t// notify the user\n\t\t\tgo notifyUser(NOTIFICATION_MESSAGE)\n\n\t\t\tpomo.Notified = true\n\t\t}\n\n\t\t// emoji_id is a number between 0 and 1\n\t\temoji_id := (pomo.Time.Milliseconds() / 1000 % 2) * (-1)\n\n\t\treturn fmt.Sprintf(\"%s%s\\n\", pomo.Blink[emoji_id], pomo.Time)\n\t}\n\n\treturn fmt.Sprintf(\"%s%s\\n\", pomo.Emoji, pomo.Time)\n}", "func (b *Backoff) duration(n uint64) (t time.Duration) {\n\t// Saturate pow\n\tpow := time.Duration(math.MaxInt64)\n\tif n < 63 {\n\t\tpow = 1 << n\n\t}\n\n\tt = b.interval * pow\n\tif t/pow != b.interval || t > b.maxDuration {\n\t\tt = b.maxDuration\n\t}\n\n\treturn\n}", "func (o QuotaLimitOutput) Duration() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v QuotaLimit) *string { return v.Duration }).(pulumi.StringPtrOutput)\n}", "func addDelay(d time.Duration) time.Duration {\n\trand.Seed(time.Now().UnixNano())\n\n\tsec := int(math.Max(float64(d/time.Second), 1))\n\tsec = int(math.Min(float64(sec+rand.Intn(9))+1, 60)) // #nosec G404\n\n\treturn time.Duration(sec) * time.Second\n}", "func addDelay(d time.Duration) time.Duration {\n\trand.Seed(time.Now().UnixNano())\n\n\tsec := int(math.Max(float64(d/time.Second), 1))\n\tsec = int(math.Min(float64(sec+rand.Intn(9))+1, 60)) // #nosec G404\n\n\treturn time.Duration(sec) * time.Second\n}", "func toDelay(apiTime int64) time.Duration {\n\treturn time.Now().Sub(time.Unix(apiTime, 10))\n}", "func (s *Conn) Delay() time.Duration {\n\tif s.recovery.lastPacketSchedule.IsZero() {\n\t\treturn 0\n\t}\n\tnow := s.timeFn()\n\tdelay := s.recovery.lastPacketSchedule.Sub(now)\n\tdebug(\"packet delay: %v\", delay)\n\treturn delay\n}", "func (d *delayer) SetDelay(seconds int) {\n\td.delaySeconds = seconds\n}", "func (o InstanceMaintenancePolicyWeeklyMaintenanceWindowOutput) Duration() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v InstanceMaintenancePolicyWeeklyMaintenanceWindow) *string { return v.Duration }).(pulumi.StringPtrOutput)\n}", "func (o InstanceMaintenancePolicyWeeklyMaintenanceWindowOutput) Duration() pulumi.StringOutput {\n\treturn o.ApplyT(func(v InstanceMaintenancePolicyWeeklyMaintenanceWindow) string { return v.Duration }).(pulumi.StringOutput)\n}", "func (t *Tracker) sleepDuration(attempts int) time.Duration {\n\tsleepTime := (t.rand.Float64() + 1) + math.Pow(2, float64(attempts-0))\n\tdurationStr := fmt.Sprintf(\"%ss\", strconv.FormatFloat(sleepTime, 'f', 2, 64))\n\tsleepDuration, _ := time.ParseDuration(durationStr)\n\treturn sleepDuration\n}", "func Duration(d time.Duration, err error) time.Duration {\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\n\treturn d\n}", "func (t *task) Duration() time.Duration {\n\treturn t.duration\n}", "func (r RecordTTL) Duration() time.Duration {\n\treturn (time.Second * time.Duration(int(r)))\n}", "func (in *Delay) DeepCopy() *Delay {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(Delay)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (d Duration) Dur() time.Duration {\n\treturn time.Duration(d)\n}", "func (i Interval) Duration() time.Duration {\n\treturn time.Duration(i)\n}", "func (pollCfg PollConfig) delay() time.Duration {\n\treturn delay(pollCfg.MeanDelay, pollCfg.StdDevDelay, pollCfg.MinDelay, pollCfg.MaxDelay)\n}", "func (t *limiter) Delay() time.Duration {\n\tif t.limit > 0 {\n\n\t\tdelta := time.Now().Sub(t.start).Seconds()\n\t\trate := int64(float64(t.count) / delta)\n\n\t\t// Determine how far off from the max rate we are\n\t\tdelayAdj := float64((t.limit - rate)) / float64(t.limit)\n\n\t\t// Don't adjust by more than 1 second at a time\n\t\tdelayAdj = t.clamp(delayAdj, -1, 1)\n\n\t\tt.delay -= delayAdj\n\t\tif t.delay < 0 {\n\t\t\tt.delay = 0\n\t\t}\n\n\t\treturn time.Duration(t.delay) * time.Second\n\t}\n\treturn time.Duration(0)\n}", "func CombineDelay(delays ...DelayTypeFunc) DelayTypeFunc {\n\tconst maxInt64 = uint64(math.MaxInt64)\n\n\treturn func(n uint, err error, config *Config) time.Duration {\n\t\tvar total uint64\n\t\tfor _, delay := range delays {\n\t\t\ttotal += uint64(delay(n, err, config))\n\t\t\tif total > maxInt64 {\n\t\t\t\ttotal = maxInt64\n\t\t\t}\n\t\t}\n\n\t\treturn time.Duration(total)\n\t}\n}", "func (s *Script) Duration() time.Duration {\n\treturn s.End - s.Start\n}", "func (o MonitorV1Output) Delay() pulumi.IntOutput {\n\treturn o.ApplyT(func(v *MonitorV1) pulumi.IntOutput { return v.Delay }).(pulumi.IntOutput)\n}", "func (o ResourcePolicyDailyCycleResponseOutput) Duration() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ResourcePolicyDailyCycleResponse) string { return v.Duration }).(pulumi.StringOutput)\n}", "func (t timeFlag) Duration() time.Duration {\n\treturn time.Duration(t)\n}", "func (t *DelayRecent) AddDuration(duration time.Duration) {\n\tdelay := int64(duration / time.Microsecond)\n\tt.Add(delay)\n}", "func Duration(str string) time.Duration {\n\tdur, err := time.ParseDuration(str)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn dur\n}", "func Duration(name string, alias rune, value time.Duration, usage string, fn Callback) *time.Duration {\n\treturn CommandLine.Duration(name, alias, value, usage, fn)\n}", "func (c *Client) Duration() (float64, error) {\n\treturn c.GetFloatProperty(\"duration\")\n}", "func Duration(val interface{}) time.Duration {\r\n\tswitch t := val.(type) {\r\n\tcase int:\r\n\t\treturn time.Duration(int64(t))\r\n\tcase int8:\r\n\t\treturn time.Duration(int64(t))\r\n\tcase int16:\r\n\t\treturn time.Duration(int64(t))\r\n\tcase int32:\r\n\t\treturn time.Duration(int64(t))\r\n\tcase int64:\r\n\t\treturn time.Duration(t)\r\n\tcase uint:\r\n\t\treturn time.Duration(int64(t))\r\n\tcase uint8:\r\n\t\treturn time.Duration(int64(t))\r\n\tcase uint16:\r\n\t\treturn time.Duration(int64(t))\r\n\tcase uint32:\r\n\t\treturn time.Duration(int64(t))\r\n\tcase uint64:\r\n\t\treturn time.Duration(int64(t))\r\n\tdefault:\r\n\t\ts := String(val)\r\n\t\treturn strToDuration(s)\r\n\t}\r\n\tpanic(\"Reached\")\r\n}", "func Duration(name string, defaultValue time.Duration) time.Duration {\n\tif strVal, ok := os.LookupEnv(name); ok {\n\t\tif d, err := time.ParseDuration(strVal); err == nil {\n\t\t\treturn d\n\t\t}\n\t}\n\n\treturn defaultValue\n}", "func Duration(key string, def time.Duration) time.Duration {\n\tif s := String(key, \"\"); s != \"\" {\n\t\tif d, err := time.ParseDuration(s); err == nil {\n\t\t\treturn d\n\t\t} else {\n\t\t\tLog(key, err)\n\t\t}\n\t}\n\treturn def\n}", "func (t ntpTimeShort) Duration() time.Duration {\n\tsec := uint64(t>>16) * nanoPerSec\n\tfrac := uint64(t&0xffff) * nanoPerSec\n\tnsec := frac >> 16\n\tif uint16(frac) >= 0x8000 {\n\t\tnsec++\n\t}\n\treturn time.Duration(sec + nsec)\n}", "func (o ResourcePolicyWeeklyCycleDayOfWeekResponseOutput) Duration() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ResourcePolicyWeeklyCycleDayOfWeekResponse) string { return v.Duration }).(pulumi.StringOutput)\n}", "func (o QuotaLimitResponseOutput) Duration() pulumi.StringOutput {\n\treturn o.ApplyT(func(v QuotaLimitResponse) string { return v.Duration }).(pulumi.StringOutput)\n}", "func (s *RoutingProfileQueueConfigSummary) SetDelay(v int64) *RoutingProfileQueueConfigSummary {\n\ts.Delay = &v\n\treturn s\n}", "func (t ntpTime) Duration() time.Duration {\n\tsec := (t >> 32) * nanoPerSec\n\tfrac := (t & 0xffffffff) * nanoPerSec >> 32\n\treturn time.Duration(sec + frac)\n}", "func (d UnixDuration) Duration() time.Duration {\n\treturn time.Duration(d) * time.Second\n}", "func (tr *customTransport) Duration() time.Duration {\n\treturn tr.reqEnd.Sub(tr.reqStart)\n}", "func Duration(name string, value time.Duration, usage string) *time.Duration {\n\treturn ex.FlagSet.Duration(name, value, usage)\n}", "func MakeDuration(target string, def int) time.Duration {\n\tif !elapso.MatchString(target) {\n\t\treturn time.Duration(def)\n\t}\n\n\tmatchs := elapso.FindAllStringSubmatch(target, -1)\n\n\tif len(matchs) <= 0 {\n\t\treturn time.Duration(def)\n\t}\n\n\tmatch := matchs[0]\n\n\tif len(match) < 3 {\n\t\treturn time.Duration(def)\n\t}\n\n\tdur := time.Duration(ConvertToInt(match[1], def))\n\n\tmtype := match[2]\n\n\tswitch mtype {\n\tcase \"s\":\n\t\treturn dur * time.Second\n\tcase \"mcs\":\n\t\treturn dur * time.Microsecond\n\tcase \"ns\":\n\t\treturn dur * time.Nanosecond\n\tcase \"ms\":\n\t\treturn dur * time.Millisecond\n\tcase \"m\":\n\t\treturn dur * time.Minute\n\tcase \"h\":\n\t\treturn dur * time.Hour\n\tdefault:\n\t\treturn time.Duration(dur) * time.Second\n\t}\n}", "func Duration(key string, def time.Duration) time.Duration {\n\tif env, ok := os.LookupEnv(key); ok {\n\t\tt, err := time.ParseDuration(env)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"env: parse time.Duration from flag: %s\\n\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\treturn t\n\t}\n\treturn def\n}", "func (o ResourcePolicyHourlyCycleResponseOutput) Duration() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ResourcePolicyHourlyCycleResponse) string { return v.Duration }).(pulumi.StringOutput)\n}", "func GetDuration(key string) time.Duration { return viper.GetDuration(key) }", "func (f *FlagSet) Duration(name string, alias rune, value time.Duration, usage string, fn Callback) *time.Duration {\n\tp := new(time.Duration)\n\tf.DurationVar(p, name, alias, value, usage, fn)\n\treturn p\n}", "func ConstDelay(delay time.Duration) DelayFn {\n\treturn func(attempt Attempt) time.Duration {\n\t\treturn delay\n\t}\n}", "func (pa *PodAutoscaler) ScaleDownDelay() (time.Duration, bool) {\n\t// The value is validated in the webhook.\n\treturn pa.annotationDuration(autoscaling.ScaleDownDelayAnnotation)\n}", "func Duration(d time.Duration) string {\n\treturn d.String()\n}", "func GetPauseDelay() (time.Duration, error) {\n\tvalue, err := strconv.Atoi(getEnv(\"ENM_CONFIG_PAUSE_DELAY\", \"10\"))\n\treturn time.Duration(value) * time.Second, err\n}", "func jitterDuration(d duration, scale float64) duration {\n\treturn duration(jitterFloat64(float64(d), scale))\n}", "func Duration(value int) Command {\n\treturn &durationCommand{\n\t\tvalue: value,\n\t}\n}", "func Delay(duration time.Duration) Strategy {\n\treturn func(breaker Breaker, attempt uint, _ error) bool {\n\t\tkeep := true\n\t\tif attempt == 0 {\n\t\t\ttimer := time.NewTimer(duration)\n\t\t\tselect {\n\t\t\tcase <-timer.C:\n\t\t\tcase <-breaker.Done():\n\t\t\t\tkeep = false\n\t\t\t}\n\t\t\tstop(timer)\n\t\t}\n\t\treturn keep\n\t}\n}", "func (v *Validator) Duration() time.Duration {\n\treturn v.EndTime().Sub(v.StartTime())\n}", "func Delay(delayString string) {\n\tduration, _ := time.ParseDuration(delayString)\n\ttime.Sleep(duration)\n\n}", "func (zap ChZap) Duration(provided *ChZap) time.Duration { //added *!\n\t//TODO write this method (1p)\n//\t\tmy_duration := zap.Time //- provided.Time\n\t//\tfmt.Println(my_duration)\n\tduration := zap.Time.Sub(provided.Time)\n\tif duration < 0 {\n\t\treturn provided.Time.Sub(zap.Time)\n\t}\n\treturn duration\n\n}", "func (o *EventAttributes) GetDuration() int64 {\n\tif o == nil || o.Duration == nil {\n\t\tvar ret int64\n\t\treturn ret\n\t}\n\treturn *o.Duration\n}", "func Duration(name string) (d time.Duration, err error) {\n\tvar v string\n\tif v, err = getenv(name); err != nil {\n\t\treturn\n\t}\n\td, err = time.ParseDuration(v)\n\treturn\n}", "func (c *Client) Duration(stat string, duration time.Duration, rate float64) error {\n\treturn c.send(stat, rate, \"%d|ms\", millisecond(duration))\n}", "func (p *Provider) backoffDuration() time.Duration {\n\t// Use the default backoff\n\tbackoff := DefaultBackoff\n\n\t// Check for a server specified backoff\n\tp.backoffLock.Lock()\n\tif p.backoff != 0 {\n\t\tbackoff = p.backoff\n\t}\n\tif p.noRetry {\n\t\tbackoff = 0\n\t}\n\tp.backoffLock.Unlock()\n\n\treturn backoff\n}", "func (o ResourcePolicyDailyCycleResponsePtrOutput) Duration() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *ResourcePolicyDailyCycleResponse) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.Duration\n\t}).(pulumi.StringPtrOutput)\n}", "func (tr Row) ForceDuration(nn int) (val time.Duration) {\n\tval, _ = tr.DurationErr(nn)\n\treturn\n}", "func Duration(name string, value time.Duration) *time.Duration {\n\tp := new(time.Duration)\n\tDurationVar(p, name, value)\n\treturn p\n}", "func (s *RoutingProfileQueueConfig) SetDelay(v int64) *RoutingProfileQueueConfig {\n\ts.Delay = &v\n\treturn s\n}", "func Duration(v time.Duration) *time.Duration {\n\treturn &v\n}", "func Duration(v time.Duration) *time.Duration {\n\treturn &v\n}", "func (t ntpTime) Duration() time.Duration {\n\tsec := (t >> 32) * nanoPerSec\n\tfrac := (t & 0xffffffff) * nanoPerSec\n\tnsec := frac >> 32\n\tif uint32(frac) >= 0x80000000 {\n\t\tnsec++\n\t}\n\treturn time.Duration(sec + nsec)\n}", "func (e EnvVars) Duration(ctx context.Context) (*time.Duration, error) {\n\tvars := e.vars(ctx)\n\n\tif vars.Has(e.Key) {\n\t\tvalue, err := vars.Duration(e.Key)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn &value, nil\n\t}\n\treturn nil, nil\n}", "func GetDuration(key string) time.Duration {\n\tswitch key {\n\tcase keyMongoConnTimeout:\n\t\treturn time.Duration(base.m.Mongo.ConnectionTimeout)\n\tcase keyRedisIdelTimeout:\n\t\treturn time.Duration(base.m.Redis.IdelTimeout)\n\t}\n\treturn 0\n}", "func (d Duration) TimeDuration() time.Duration {\n\treturn time.Duration(int64(d) / Millisecond * int64(time.Millisecond))\n}", "func Duration(name string, val time.Duration) Field {\n\treturn Field(zap.Duration(name, val))\n}", "func GetMetricCleanupDelaySeconds() time.Duration {\n\treturn time.Duration(defaultMetricCleanupDelay) * time.Second\n}", "func (o *SLOCorrectionCreateRequestAttributes) GetDuration() int64 {\n\tif o == nil || o.Duration == nil {\n\t\tvar ret int64\n\t\treturn ret\n\t}\n\treturn *o.Duration\n}", "func (p *parser) duration() Node {\n\ttoken := p.expect(TokenDuration)\n\tnum, err := newDur(token.pos, token.val)\n\tif err != nil {\n\t\tp.error(err)\n\t}\n\treturn num\n}" ]
[ "0.79466355", "0.7600061", "0.6968741", "0.68646014", "0.68198526", "0.6818932", "0.6725045", "0.66359437", "0.66057986", "0.6569005", "0.6561068", "0.6546487", "0.6497679", "0.64850616", "0.6454614", "0.64448506", "0.64159924", "0.6401013", "0.6279634", "0.6225847", "0.61952806", "0.61644435", "0.6132663", "0.61266464", "0.6114488", "0.6089928", "0.6088944", "0.6077972", "0.60768485", "0.60743874", "0.60598683", "0.6033521", "0.6033521", "0.60331297", "0.60243815", "0.59849125", "0.59782124", "0.59756684", "0.59729916", "0.596127", "0.59584594", "0.59539545", "0.5938783", "0.5938592", "0.5932341", "0.5929224", "0.5915334", "0.58976513", "0.5883233", "0.588088", "0.5859305", "0.5853237", "0.5850729", "0.58383656", "0.5830281", "0.5828301", "0.5820522", "0.5814335", "0.5808388", "0.5799059", "0.57963663", "0.57947934", "0.5794051", "0.579232", "0.57911146", "0.57886815", "0.57708114", "0.57647175", "0.5764567", "0.57584184", "0.5753702", "0.5751872", "0.574497", "0.57420534", "0.5735047", "0.5728179", "0.572346", "0.5713789", "0.57027084", "0.5697583", "0.5695191", "0.5687038", "0.5683595", "0.56790787", "0.5676446", "0.5670724", "0.56677413", "0.5666194", "0.56661433", "0.5662427", "0.56590337", "0.56590337", "0.56582093", "0.5657145", "0.5656827", "0.56506264", "0.5648577", "0.56437975", "0.5639342", "0.56333476" ]
0.78912795
1
Body packs job payload into binary payload.
func (i *Item) Body() []byte { return utils.AsBytes(i.Payload) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (enc *Enqueuer) BodyBytes() ([]byte, error) {\n\tvar body = map[string]string{\n\t\t\"account\": enc.account,\n\t\t\"repo\": enc.repo,\n\t\t\"ref\": enc.ref,\n\t\t\"bobfile\": enc.bobfile,\n\t}\n\tbodyBytes, err := json.Marshal(body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn bodyBytes, nil\n}", "func (j *Job) Encode(payload interface{}) error {\n\tvar err error\n\tj.Raw, err = encode(ContentTypeMsgpack, &payload)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (o DiagnosticBackendRequestOutput) BodyBytes() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v DiagnosticBackendRequest) *int { return v.BodyBytes }).(pulumi.IntPtrOutput)\n}", "func (o ApiDiagnosticBackendRequestOutput) BodyBytes() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v ApiDiagnosticBackendRequest) *int { return v.BodyBytes }).(pulumi.IntPtrOutput)\n}", "func (o DiagnosticFrontendRequestOutput) BodyBytes() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v DiagnosticFrontendRequest) *int { return v.BodyBytes }).(pulumi.IntPtrOutput)\n}", "func (o ApiDiagnosticFrontendRequestOutput) BodyBytes() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v ApiDiagnosticFrontendRequest) *int { return v.BodyBytes }).(pulumi.IntPtrOutput)\n}", "func buildPostBodyFromFlags(name string, connection string, schedule string, query string, email string) []byte {\n\n\tbodyString := fmt.Sprintf(\"name: %s\\nconnection: %s\\nschedule: %s\\nquery: %s\\noutputs:\\n - type: email\\n address: %s\\n\", name, connection, schedule, query, email)\n\n\treturn []byte(bodyString)\n}", "func (b *BaseHandler) Payload() []byte {\n\tcontent, err := ioutil.ReadAll(b.request.Body)\n\tif err != nil {\n\t\treturn nil\n\t}\n\treturn content\n}", "func (j *JobInterchange) Raw() []byte { return j.Job }", "func (rs *Restake) Payload() []byte { return rs.payload }", "func (p NullPayload) EncodeBinary(w *io.BinWriter) {}", "func (request ListManagedInstanceUpdatablePackagesRequest) BinaryRequestBody() (*common.OCIReadSeekCloser, bool) {\n\n\treturn nil, false\n\n}", "func GeneratePromReadBody(t *testing.T) io.Reader {\n\treq := GeneratePromReadRequest()\n\tdata, err := proto.Marshal(req)\n\tif err != nil {\n\t\tt.Fatal(\"couldn't marshal prometheus request\")\n\t}\n\n\tcompressed := snappy.Encode(nil, data)\n\t// Uncomment the line below to write the data into a file useful for integration testing\n\t//ioutil.WriteFile(\"/tmp/dat1\", compressed, 0644)\n\tb := bytes.NewReader(compressed)\n\treturn b\n}", "func (cm *CallManager) EncodePayload(rpc *RPC) []byte {\n\tbuf := bytes.NewBuffer(nil)\n\n\t//cm.logger.Logf(\"encoding payload of call %d\\n\", rpc.Header.Call)\n\n\tbencode.Marshal(buf, rpc.Payload)\n\tb := buf.Bytes()\n\t/*if len(b) == 0 {\n\t\tb = make([]byte, 2)\n\t\tb[0] = 'l'\n\t\tb[1] = 'e'\n\t}*/\n\treturn b\n}", "func (o DiagnosticBackendRequestPtrOutput) BodyBytes() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v *DiagnosticBackendRequest) *int {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.BodyBytes\n\t}).(pulumi.IntPtrOutput)\n}", "func encodeBody(obj any) (io.Reader, error) {\n\tbuf := bytes.NewBuffer(nil)\n\tenc := json.NewEncoder(buf)\n\tif err := enc.Encode(obj); err != nil {\n\t\treturn nil, err\n\t}\n\treturn buf, nil\n}", "func (j Job) Bytes() []byte {\n\tif v, ok := j.data.([]byte); ok {\n\t\treturn v\n\t} else if s, ok := j.data.(string); ok {\n\t\treturn []byte(s)\n\t}\n\n\treturn nil\n}", "func ConvertBodyToByteArray(r io.Reader, print bool) []byte {\n\tvar mw io.Writer\n\tvar body bytes.Buffer\n\tif print {\n\t\tmw = io.MultiWriter(&body, os.Stdout)\n\t} else {\n\t\tmw = io.MultiWriter(&body)\n\t}\n\tio.Copy(mw, r)\n\tif print {\n\t\tfmt.Printf(\"\\n\")\n\t}\n\n\treturn body.Bytes()\n}", "func ConvertBodyToByteArray(r io.Reader, print bool) []byte {\n\tvar mw io.Writer\n\tvar body bytes.Buffer\n\tif print {\n\t\tmw = io.MultiWriter(&body, os.Stdout)\n\t} else {\n\t\tmw = io.MultiWriter(&body)\n\t}\n\tio.Copy(mw, r)\n\tif print {\n\t\tfmt.Printf(\"\\n\")\n\t}\n\n\treturn body.Bytes()\n}", "func (e *Event) packBytes() ([]byte, error) {\n\tdata := make([]interface{}, 2)\n\tdata[0] = e.Header\n\tdata[1] = e.Name\n\n\tfor _, a := range e.Args {\n\t\tdata = append(data, a)\n\t}\n\n\tvar buf []byte\n\n\tenc := codec.NewEncoderBytes(&buf, &mh)\n\tif err := enc.Encode(data); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn buf, nil\n}", "func (bj BatchJob) MarshalJSON() ([]byte, error) {\n\tobjectMap := make(map[string]interface{})\n\tif bj.LivyInfo != nil {\n\t\tobjectMap[\"livyInfo\"] = bj.LivyInfo\n\t}\n\tif bj.Name != nil {\n\t\tobjectMap[\"name\"] = bj.Name\n\t}\n\tif bj.WorkspaceName != nil {\n\t\tobjectMap[\"workspaceName\"] = bj.WorkspaceName\n\t}\n\tif bj.SparkPoolName != nil {\n\t\tobjectMap[\"sparkPoolName\"] = bj.SparkPoolName\n\t}\n\tif bj.SubmitterName != nil {\n\t\tobjectMap[\"submitterName\"] = bj.SubmitterName\n\t}\n\tif bj.SubmitterID != nil {\n\t\tobjectMap[\"submitterId\"] = bj.SubmitterID\n\t}\n\tif bj.ArtifactID != nil {\n\t\tobjectMap[\"artifactId\"] = bj.ArtifactID\n\t}\n\tif bj.JobType != \"\" {\n\t\tobjectMap[\"jobType\"] = bj.JobType\n\t}\n\tif bj.Result != \"\" {\n\t\tobjectMap[\"result\"] = bj.Result\n\t}\n\tif bj.Scheduler != nil {\n\t\tobjectMap[\"schedulerInfo\"] = bj.Scheduler\n\t}\n\tif bj.Plugin != nil {\n\t\tobjectMap[\"pluginInfo\"] = bj.Plugin\n\t}\n\tif bj.Errors != nil {\n\t\tobjectMap[\"errorInfo\"] = bj.Errors\n\t}\n\tif bj.Tags != nil {\n\t\tobjectMap[\"tags\"] = bj.Tags\n\t}\n\tif bj.ID != nil {\n\t\tobjectMap[\"id\"] = bj.ID\n\t}\n\tif bj.AppID != nil {\n\t\tobjectMap[\"appId\"] = bj.AppID\n\t}\n\tif bj.AppInfo != nil {\n\t\tobjectMap[\"appInfo\"] = bj.AppInfo\n\t}\n\tif bj.State != nil {\n\t\tobjectMap[\"state\"] = bj.State\n\t}\n\tif bj.LogLines != nil {\n\t\tobjectMap[\"log\"] = bj.LogLines\n\t}\n\treturn json.Marshal(objectMap)\n}", "func encodeBody(obj interface{}) (io.Reader, error) {\n\tbuf := bytes.NewBuffer(nil)\n\tenc := json.NewEncoder(buf)\n\tif err := enc.Encode(obj); err != nil {\n\t\treturn nil, err\n\t}\n\treturn buf, nil\n}", "func (input *BeegoInput) CopyBody(MaxMemory int64) []byte {\n\tif input.Context.Request.Body == nil {\n\t\treturn []byte{}\n\t}\n\n\tvar requestbody []byte\n\tsafe := &io.LimitedReader{R: input.Context.Request.Body, N: MaxMemory}\n\tif input.Header(\"Content-Encoding\") == \"gzip\" {\n\t\treader, err := gzip.NewReader(safe)\n\t\tif err != nil {\n\t\t\treturn nil\n\t\t}\n\t\trequestbody, _ = ioutil.ReadAll(reader)\n\t} else {\n\t\trequestbody, _ = ioutil.ReadAll(safe)\n\t}\n\n\tinput.Context.Request.Body.Close()\n\tbf := bytes.NewBuffer(requestbody)\n\tinput.Context.Request.Body = http.MaxBytesReader(input.Context.ResponseWriter, ioutil.NopCloser(bf), MaxMemory)\n\tinput.RequestBody = requestbody\n\treturn requestbody\n}", "func (o ApiDiagnosticBackendRequestPtrOutput) BodyBytes() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v *ApiDiagnosticBackendRequest) *int {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.BodyBytes\n\t}).(pulumi.IntPtrOutput)\n}", "func (j *Job) Decode(payload interface{}) error {\n\treturn decode(ContentTypeMsgpack, j.Raw, &payload)\n}", "func encodeBody(obj interface{}) (io.Reader, error) {\n\tif obj == nil {\n\t\treturn nil, nil\n\t}\n\tbuf := bytes.NewBuffer(nil)\n\tenc := json.NewEncoder(buf)\n\tif err := enc.Encode(obj); err != nil {\n\t\treturn nil, err\n\t}\n\treturn buf, nil\n}", "func encodeBody (obj interface{}) (io.Reader, error) {\n\t\tif reader, ok := obj.(io.Reader); ok {\n\t\t\treturn reader, nil\n\t\t}\n\t\n\t\tbuf := bytes.NewBuffer(nil)\n\t\tenc := json.NewEncoder(buf)\n\t\tif err := enc.Encode(obj); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn buf, nil\n\t}", "func (req MinRequest) Body(res tarantool.SchemaResolver, enc *msgpack.Encoder) error {\n\targs := minArgs{Space: req.space, Index: req.index, Opts: req.opts}\n\treq.impl = req.impl.Args(args)\n\treturn req.impl.Body(res, enc)\n}", "func (rw *DataRW) payload(msg Msg) []byte {\n\tbuffer := bytes.NewBuffer(make([]byte, 0, 65536))\n\t_, err := io.Copy(buffer, msg.Payload)\n\tif err != nil {\n\t\treturn nil\n\t}\n\ttemp := buffer.Bytes()\n\tlength := len(temp)\n\tvar body []byte\n\t//are we wasting more than 5% space?\n\tif cap(temp) > (length + length/5) {\n\t\tbody = make([]byte, length)\n\t\tcopy(body, temp)\n\t} else {\n\t\tbody = temp\n\t}\n\treturn body\n}", "func (ctx *Context) BodyBytes() []byte {\r\n\tif ctx.limitedRequestBody != nil {\r\n\t\treturn ctx.limitedRequestBody\r\n\t}\r\n\tif ctx.R.Body == nil {\r\n\t\tctx.limitedRequestBody = []byte{}\r\n\t\treturn ctx.limitedRequestBody\r\n\t}\r\n\tsafe := &io.LimitedReader{R: ctx.R.Body, N: ctx.frame.config.multipartMaxMemory}\r\n\tlimitedRequestBody, _ := ioutil.ReadAll(safe)\r\n\tctx.R.Body.Close()\r\n\tbf := bytes.NewBuffer(limitedRequestBody)\r\n\tctx.R.Body = ioutil.NopCloser(bf)\r\n\tctx.limitedRequestBody = limitedRequestBody\r\n\treturn limitedRequestBody\r\n}", "func (o ApiDiagnosticBackendResponseOutput) BodyBytes() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v ApiDiagnosticBackendResponse) *int { return v.BodyBytes }).(pulumi.IntPtrOutput)\n}", "func (o DiagnosticBackendResponseOutput) BodyBytes() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v DiagnosticBackendResponse) *int { return v.BodyBytes }).(pulumi.IntPtrOutput)\n}", "func (b *BitcoinClient) createBody(rpcBody *RPCBody) (*bytes.Buffer, error) {\n\tbodyJSON, err := json.Marshal(rpcBody)\n\tif err != nil {\n\t\tlog.Println(ErrCreatingBody)\n\t\treturn nil, ErrCreatingBody\n\t}\n\n\treturn bytes.NewBuffer(bodyJSON), nil\n}", "func (s CreateJobInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.HopDestinations != nil {\n\t\tv := s.HopDestinations\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"hopDestinations\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddFields(v1)\n\t\t}\n\t\tls0.End()\n\t}\n\tif s.AccelerationSettings != nil {\n\t\tv := s.AccelerationSettings\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"accelerationSettings\", v, metadata)\n\t}\n\tif len(s.BillingTagsSource) > 0 {\n\t\tv := s.BillingTagsSource\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"billingTagsSource\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tvar ClientRequestToken string\n\tif s.ClientRequestToken != nil {\n\t\tClientRequestToken = *s.ClientRequestToken\n\t} else {\n\t\tClientRequestToken = protocol.GetIdempotencyToken()\n\t}\n\t{\n\t\tv := ClientRequestToken\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"clientRequestToken\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.JobTemplate != nil {\n\t\tv := *s.JobTemplate\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"jobTemplate\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Priority != nil {\n\t\tv := *s.Priority\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"priority\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.Queue != nil {\n\t\tv := *s.Queue\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"queue\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Role != nil {\n\t\tv := *s.Role\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"role\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Settings != nil {\n\t\tv := s.Settings\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"settings\", v, metadata)\n\t}\n\tif len(s.SimulateReservedQueue) > 0 {\n\t\tv := s.SimulateReservedQueue\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"simulateReservedQueue\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif len(s.StatusUpdateInterval) > 0 {\n\t\tv := s.StatusUpdateInterval\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"statusUpdateInterval\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.Tags != nil {\n\t\tv := s.Tags\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"tags\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.UserMetadata != nil {\n\t\tv := s.UserMetadata\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"userMetadata\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\treturn nil\n}", "func prepareBigPayload() ([]byte, error) {\n\tpayload := make([]string, 0)\n\tfor len(payload) < 128 {\n\t\tpayload = append(payload, \"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\")\n\t}\n\treturn json.Marshal(payload)\n}", "func encodeBody(obj interface{}) (io.Reader, error) {\n\tif reader, ok := obj.(io.Reader); ok {\n\t\treturn reader, nil\n\t}\n\n\tbuf := bytes.NewBuffer(nil)\n\tenc := json.NewEncoder(buf)\n\tif err := enc.Encode(obj); err != nil {\n\t\treturn nil, err\n\t}\n\treturn buf, nil\n}", "func (r ApiApiProjectsIdDeploymentsPostRequest) Body(body RequestsCreateProjectDeploymentRequest) ApiApiProjectsIdDeploymentsPostRequest {\n\tr.body = &body\n\treturn r\n}", "func (e EncodedFile) BodyAfterScanning(bodyByte []byte) string {\n\tencodedFile := base64.StdEncoding.EncodeToString(bodyByte)\n\te.data[\"Base64\"] = encodedFile\n\tj, _ := json.Marshal(e.data)\n\treturn string(j)\n}", "func (tx *Genesis) MarshalBinary(scheme Scheme) ([]byte, error) {\n\tb, err := tx.BodyMarshalBinary(scheme)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to marshal Genesis transaction to bytes\")\n\t}\n\treturn b, nil\n}", "func (i *BodyInterceptor) Body() []byte {\n\treturn i.BodyBytes\n}", "func Body(body, contentType, transferEncoding string, opt Options) (string, error) {\n\t// attempt to do some base64-decoding anyway\n\tif decoded, err := base64.URLEncoding.DecodeString(body); err == nil {\n\t\tbody = string(decoded)\n\t}\n\tif decoded, err := base64.StdEncoding.DecodeString(body); err == nil {\n\t\tbody = string(decoded)\n\t}\n\n\tif strings.ToLower(transferEncoding) == \"quoted-printable\" {\n\t\tb, err := ioutil.ReadAll(quotedprintable.NewReader(strings.NewReader(body)))\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tbody = string(b)\n\t}\n\n\tct := strings.ToLower(contentType)\n\tif strings.Contains(ct, \"multipart/\") {\n\t\treturn parseMultipart(body, contentType, opt)\n\t}\n\n\tif !opt.SkipHTML && strings.Contains(ct, \"text/html\") {\n\t\tbody = stripHTML(body, opt)\n\t}\n\n\tbody = stripEmbedded(body, opt)\n\tif opt.LineLimit > 0 || opt.ColLimit > 0 {\n\t\tlines := strings.Split(body, \"\\n\")\n\t\tif len(lines) > opt.LineLimit {\n\t\t\tlines = lines[:opt.LineLimit]\n\t\t}\n\t\tfor kk, l := range lines {\n\t\t\tif len(l) > opt.ColLimit {\n\t\t\t\tlines[kk] = l[:opt.ColLimit]\n\t\t\t}\n\t\t}\n\t\tbody = strings.Join(lines, \"\\n\")\n\t}\n\treturn body, nil\n}", "func (bp *BasePayload) MarshalBinary() ([]byte, error) {\n\treturn json.Marshal(bp)\n}", "func (o DiagnosticFrontendRequestPtrOutput) BodyBytes() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v *DiagnosticFrontendRequest) *int {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.BodyBytes\n\t}).(pulumi.IntPtrOutput)\n}", "func (f *framer) payload() {\n\tf.flags |= flagCustomPayload\n}", "func ScriptingHandler(jobc chan<- *blobPutJob, router *backend.Router) func(http.ResponseWriter, *http.Request) {\n return func (w http.ResponseWriter, r *http.Request) {\n switch {\n case r.Method == \"POST\":\n decoder := json.NewDecoder(r.Body)\n data := map[string]interface{}{}\n if err := decoder.Decode(&data); err != nil {\n http.Error(w, err.Error(), http.StatusInternalServerError)\n }\n req := &backend.Request{\n Namespace: r.Header.Get(\"BlobStash-Namespace\"),\n }\n db := router.DB(req)\n fmt.Printf(\"Received script: %v\\n\", data)\n code := data[\"_script\"].(string)\n sargs := data[\"_args\"].(string)\n args := map[string]interface{}{}\n if err := json.Unmarshal([]byte(sargs), &args); err != nil {\n \thttp.Error(w, err.Error(), http.StatusInternalServerError)\n }\n out, tx := scripting.ExecScript(db, code, args)\n err, isErr := out[\"error\"]\n if isErr {\n\t\t\t\thttp.Error(w, err.(string), http.StatusInternalServerError)\n }\n if tx.Len() > 0 {\n \thash, js := tx.Dump()\n \tjobc<- newBlobPutJob(req.Meta(), hash, js, nil)\n }\n fmt.Printf(\"Script out: %+v\\n\", out)\n WriteJSON(w, &out)\n return\n default:\n w.WriteHeader(http.StatusMethodNotAllowed)\n return\n }\n }\n}", "func processPayloadBuildResponse(msg amqp.Delivery) {\n\tlogging.LogInfo(\"got message\", \"routingKey\", msg.RoutingKey)\n\tpayloadBuildResponse := PayloadBuildResponse{}\n\tif err := json.Unmarshal(msg.Body, &payloadBuildResponse); err != nil {\n\t\tlogging.LogError(err, \"Failed to process payload build response message\")\n\t} else {\n\t\t//logging.LogInfo(\"got build response\", \"buildMsg\", payloadBuildResponse)\n\t\tdatabasePayload := databaseStructs.Payload{}\n\t\tif err := database.DB.Get(&databasePayload, `SELECT \n\t\t\tpayload.build_message, payload.build_stderr, payload.build_stdout, payload.id, payload.build_phase,\n\t\t\tfilemeta.filename \"filemeta.filename\",\n\t\t\tfilemeta.id \"filemeta.id\"\n\t\t\tFROM payload \n\t\t\tJOIN filemeta ON payload.file_id = filemeta.id\n\t\t\tWHERE uuid=$1 \n\t\t\tLIMIT 1`, payloadBuildResponse.PayloadUUID); err != nil {\n\t\t\tlogging.LogError(err, \"Failed to get payload from the database\")\n\t\t} else {\n\t\t\tdatabasePayload.BuildMessage += payloadBuildResponse.BuildMessage\n\t\t\tdatabasePayload.BuildStderr += payloadBuildResponse.BuildStdErr\n\t\t\tdatabasePayload.BuildStdout += payloadBuildResponse.BuildStdOut\n\t\t\tif payloadBuildResponse.Success {\n\t\t\t\tdatabasePayload.BuildPhase = PAYLOAD_BUILD_STATUS_SUCCESS\n\t\t\t} else {\n\t\t\t\tdatabasePayload.BuildPhase = PAYLOAD_BUILD_STATUS_ERROR\n\t\t\t}\n\t\t\tif payloadBuildResponse.UpdatedFilename != nil {\n\t\t\t\tdatabasePayload.Filemeta.Filename = []byte(*payloadBuildResponse.UpdatedFilename)\n\t\t\t\tif _, err := database.DB.NamedExec(`UPDATE filemeta SET \n filename=:filename\n WHERE id=:id`, databasePayload.Filemeta); err != nil {\n\t\t\t\t\tlogging.LogError(err, \"Failed to update filename for payload\")\n\t\t\t\t}\n\t\t\t}\n\t\t\t/* Payload should be uploaded separately\n\t\t\tif payloadBuildResponse.Payload != nil && len(*payloadBuildResponse.Payload) > 0 {\n\t\t\t\tif err := os.WriteFile(databasePayload.Filemeta.Path, *payloadBuildResponse.Payload, 0600); err != nil {\n\t\t\t\t\tdatabasePayload.BuildStderr += \"\\nFailed to write file to disk\"\n\t\t\t\t\tlogging.LogError(err, \"Failed to write payload to disk\")\n\t\t\t\t} else {\n\t\t\t\t\tsha1Sum := sha1.Sum(*payloadBuildResponse.Payload)\n\t\t\t\t\tdatabasePayload.Filemeta.Sha1 = fmt.Sprintf(\"%x\", sha1Sum)\n\t\t\t\t\tmd5Sum := md5.Sum(*payloadBuildResponse.Payload)\n\t\t\t\t\tdatabasePayload.Filemeta.Md5 = fmt.Sprintf(\"%x\", md5Sum)\n\t\t\t\t\tdatabasePayload.Filemeta.ChunkSize = len(*payloadBuildResponse.Payload)\n\t\t\t\t\tdatabasePayload.Filemeta.TotalChunks = 1\n\t\t\t\t\tdatabasePayload.Filemeta.ChunksReceived = 1\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t*/\n\t\t\t// update the payload in the database\n\t\t\tif _, updateError := database.DB.NamedExec(`UPDATE payload SET \n\t\t\t\tbuild_phase=:build_phase, build_stderr=:build_stderr, build_message=:build_message, build_stdout=:build_stdout\n\t\t\t\tWHERE id=:id`, databasePayload,\n\t\t\t); updateError != nil {\n\t\t\t\tlogging.LogError(updateError, \"Failed to update payload's build status\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\tdatabase.UpdateRemainingBuildSteps(databasePayload)\n\t\t\t/* Payload should be uploaded separately\n\t\t\tif databasePayload.BuildPhase == PAYLOAD_BUILD_STATUS_SUCCESS {\n\t\t\t\tif _, updateError := database.DB.NamedExec(`UPDATE filemeta SET\n\t\t\t\t\tsha1=:sha1, md5=:md5, chunk_size=:chunk_size, total_chunks=:total_chunks, chunks_received=:chunks_received\n\t\t\t\t\tWHERE id=:id`, databasePayload.Filemeta,\n\t\t\t\t); updateError != nil {\n\t\t\t\t\tlogging.LogError(updateError, \"Failed to update payload's file hashes\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t*/\n\t\t\tif databasePayload.BuildPhase == PAYLOAD_BUILD_STATUS_SUCCESS {\n\t\t\t\t// process the additional UpdatedCommands\n\t\t\t\tif err := updateLoadedCommandsFromPayloadBuild(databasePayload, payloadBuildResponse.UpdatedCommandList); err != nil {\n\t\t\t\t\tdatabase.UpdatePayloadWithError(databasePayload, err)\n\t\t\t\t\tdatabase.UpdateRemainingBuildSteps(databasePayload)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tlogging.LogDebug(\"Finished processing payload build response message\")\n\t}\n}", "func (o ApiDiagnosticFrontendRequestPtrOutput) BodyBytes() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v *ApiDiagnosticFrontendRequest) *int {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.BodyBytes\n\t}).(pulumi.IntPtrOutput)\n}", "func (o ApiDiagnosticFrontendResponseOutput) BodyBytes() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v ApiDiagnosticFrontendResponse) *int { return v.BodyBytes }).(pulumi.IntPtrOutput)\n}", "func NewByteBody(b []byte) io.ReadCloser {\n\treturn ioutil.NopCloser(bytes.NewReader(b))\n}", "func (h *host) Encode(ctx context.Context, ids ttnpb.EndDeviceIdentifiers, version *ttnpb.EndDeviceVersionIdentifiers, msg *ttnpb.ApplicationDownlink, script string) error {\n\tdefer trace.StartRegion(ctx, \"encode message\").End()\n\n\tdecoded := msg.DecodedPayload\n\tif decoded == nil {\n\t\treturn nil\n\t}\n\tm, err := gogoproto.Map(decoded)\n\tif err != nil {\n\t\treturn errInput.WithCause(err)\n\t}\n\tenv := h.createEnvironment(ids, version)\n\tenv[\"payload\"] = m\n\tenv[\"f_port\"] = msg.FPort\n\tscript = fmt.Sprintf(`\n\t\t%s\n\t\tEncoder(env.payload, env.f_port)\n\t`, script)\n\tvalue, err := h.engine.Run(ctx, script, env)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif value == nil || reflect.TypeOf(value).Kind() != reflect.Slice {\n\t\treturn errOutputType.New()\n\t}\n\tslice := reflect.ValueOf(value)\n\tfrmPayload := make([]byte, slice.Len())\n\tfor i := 0; i < slice.Len(); i++ {\n\t\tval := slice.Index(i).Interface()\n\t\tvar b int64\n\t\tswitch i := val.(type) {\n\t\tcase int:\n\t\t\tb = int64(i)\n\t\tcase int8:\n\t\t\tb = int64(i)\n\t\tcase int16:\n\t\t\tb = int64(i)\n\t\tcase int32:\n\t\t\tb = int64(i)\n\t\tcase int64:\n\t\t\tb = i\n\t\tcase uint8:\n\t\t\tb = int64(i)\n\t\tcase uint16:\n\t\t\tb = int64(i)\n\t\tcase uint32:\n\t\t\tb = int64(i)\n\t\tcase uint64:\n\t\t\tb = int64(i)\n\t\tdefault:\n\t\t\treturn errOutputType.WithAttributes(\"type\", fmt.Sprintf(\"%T\", i))\n\t\t}\n\t\tif b < 0x00 || b > 0xFF {\n\t\t\treturn errOutputRange.WithAttributes(\n\t\t\t\t\"value\", b,\n\t\t\t\t\"low\", 0x00,\n\t\t\t\t\"high\", 0xFF,\n\t\t\t)\n\t\t}\n\t\tfrmPayload[i] = byte(b)\n\t}\n\tmsg.FRMPayload = frmPayload\n\treturn nil\n}", "func (s *SlcLogger) buildPayload(slackChannel, color, message string, titleParam []string) ([]byte, error) {\n\n\tvar title string\n\tif len(titleParam) == 0 {\n\t\ttitle = s.DefaultTitle\n\t} else {\n\t\ttitle = titleParam[0]\n\t}\n\n\ta := &attachment{Text: message, Title: title, Color: color}\n\tattachments := []attachment{*a}\n\n\treturn json.Marshal(payload{\n\t\tChannel: slackChannel,\n\t\tUserName: s.UserName,\n\t\tIconURL: s.IconURL,\n\t\tAttachments: attachments,\n\t})\n}", "func (s StartJobInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.CommitId != nil {\n\t\tv := *s.CommitId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"commitId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.CommitMessage != nil {\n\t\tv := *s.CommitMessage\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"commitMessage\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.CommitTime != nil {\n\t\tv := *s.CommitTime\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"commitTime\", protocol.TimeValue{V: v, Format: protocol.UnixTimeFormat}, metadata)\n\t}\n\tif s.JobId != nil {\n\t\tv := *s.JobId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"jobId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.JobReason != nil {\n\t\tv := *s.JobReason\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"jobReason\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.JobType) > 0 {\n\t\tv := s.JobType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"jobType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.AppId != nil {\n\t\tv := *s.AppId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"appId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.BranchName != nil {\n\t\tv := *s.BranchName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"branchName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (p *BBcAssetRaw) AddBody(assetID *[]byte, assetBody interface{}) {\n\tif assetID != nil {\n\t\tp.AssetID = make([]byte, p.IdLengthConf.AssetIdLength)\n\t\tcopy(p.AssetID, *assetID)\n\t}\n\tswitch assetBody.(type) {\n\tcase string:\n\t\tp.AssetBody = []byte(assetBody.(string))\n\t\tp.AssetBodySize = uint16(len(p.AssetBody))\n\t\tbreak\n\tcase []byte:\n\t\tp.AssetBody = assetBody.([]byte)\n\t\tp.AssetBodySize = uint16(len(p.AssetBody))\n\t\tbreak\n\t}\n}", "func buildEmailBody(requestBuffer *[]*http.Request) {\n\tvar finalBody string\n\tfor _, req := range *requestBuffer {\n\t\tjsonSerializedBody, _ := json.MarshalIndent(getVisitor(req), \"\", \"\t\")\n\t\tfinalBody = finalBody + string(jsonSerializedBody) + \"\\r\\n\"\n\t}\n\tsendEmail(finalBody)\n}", "func encode(bat batch, tags []string) (contentType string, body io.Reader, err error) {\n\tvar buf bytes.Buffer\n\n\tmw := multipart.NewWriter(&buf)\n\n\tif bat.host != \"\" {\n\t\ttags = append(tags, fmt.Sprintf(\"host:%s\", bat.host))\n\t}\n\ttags = append(tags, \"runtime:go\")\n\n\tevent := &uploadEvent{\n\t\tVersion: \"4\",\n\t\tFamily: \"go\",\n\t\tStart: bat.start.Format(time.RFC3339Nano),\n\t\tEnd: bat.end.Format(time.RFC3339Nano),\n\t\tTags: strings.Join(tags, \",\"),\n\t\tEndpointCounts: bat.endpointCounts,\n\t}\n\n\tfor _, p := range bat.profiles {\n\t\tevent.Attachments = append(event.Attachments, p.name)\n\t\tf, err := mw.CreateFormFile(p.name, p.name)\n\t\tif err != nil {\n\t\t\treturn \"\", nil, err\n\t\t}\n\t\tif _, err := f.Write(p.data); err != nil {\n\t\t\treturn \"\", nil, err\n\t\t}\n\t}\n\n\tf, err := mw.CreatePart(textproto.MIMEHeader{\n\t\t\"Content-Disposition\": []string{`form-data; name=\"event\"; filename=\"event.json\"`},\n\t\t\"Content-Type\": []string{\"application/json\"},\n\t})\n\tif err != nil {\n\t\treturn \"\", nil, err\n\t}\n\tif err := json.NewEncoder(f).Encode(event); err != nil {\n\t\treturn \"\", nil, err\n\t}\n\n\tif err := mw.Close(); err != nil {\n\t\treturn \"\", nil, err\n\t}\n\treturn mw.FormDataContentType(), &buf, nil\n}", "func (r *MachinePoolsAddServerResponse) Body(value *MachinePool) *MachinePoolsAddServerResponse {\n\tr.body = value\n\treturn r\n}", "func Body(data ...interface{}) AdditionalAttribute {\n return func(rb *Builder) error {\n rb.SetBody(data...)\n return nil\n }\n}", "func postJobDefinition(postBody []byte) ([]byte, error) {\n\n\turl := \"http://\" + config.Host + \"/jobs/\" + \"?apikey=\" + config.Apikey\n\n\treq, err := http.NewRequest(\"POST\", url, bytes.NewBuffer(postBody))\n\n\tclient := http.Client{}\n\tresp, err := client.Do(req)\n\n\tdefer resp.Body.Close()\n\n\tif err != nil {\n\t\tlogger.Fatalf(fmt.Sprintf(\"autoqctl: %v\\n\", err))\n\t}\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\n\tif err != nil {\n\t\tlogger.Fatalf(fmt.Sprintf(\"autoqctl: %v\\n\", err))\n\t}\n\n\treturn body, err\n\n}", "func PayloadFromBytes(b []byte) Payload {\n\treturn jsonPayload(b)\n}", "func BodyAll(req *http.Request) ([]byte, error) {\n\tdata, err := ioutil.ReadAll(req.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer req.Body.Close()\n\treturn data, nil\n}", "func (o DiagnosticFrontendResponseOutput) BodyBytes() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v DiagnosticFrontendResponse) *int { return v.BodyBytes }).(pulumi.IntPtrOutput)\n}", "func (ds *DepositToStake) Payload() []byte { return ds.payload }", "func (request ListEntitlementsRequest) BinaryRequestBody() (*common.OCIReadSeekCloser, bool) {\n\n\treturn nil, false\n\n}", "func (job *ScanCloseJob) buildPayload(pjson string) (err error) {\n\tif len(pjson) > 0 {\n\t\tjob.Payload = &ScanClosePayload{}\n\t\terr = json.Unmarshal([]byte(pjson), job.Payload)\n\t} else {\n\t\terr = fmt.Errorf(\"empty json string passed to ScanCloseJob\")\n\t}\n\treturn err\n}", "func buildBody(level, title string) map[string]interface{} {\n\ttimestamp := time.Now().Unix()\n\thostname, _ := os.Hostname()\n\n\treturn map[string]interface{}{\n\t\t\"access_token\": Token,\n\t\t\"data\": map[string]interface{}{\n\t\t\t\"environment\": Environment,\n\t\t\t\"title\": title,\n\t\t\t\"level\": level,\n\t\t\t\"timestamp\": timestamp,\n\t\t\t\"platform\": runtime.GOOS,\n\t\t\t\"language\": \"go\",\n\t\t\t\"server\": map[string]interface{}{\n\t\t\t\t\"host\": hostname,\n\t\t\t},\n\t\t\t\"notifier\": map[string]interface{}{\n\t\t\t\t\"name\": NAME,\n\t\t\t\t\"version\": VERSION,\n\t\t\t},\n\t\t},\n\t}\n}", "func writeBody(body []byte, buffer *bytes.Buffer, contentType string, mw *multipart.Writer) error {\n\tif mw != nil {\n\t\tencoding := \"quoted-printable\"\n\t\tpartHeader := textproto.MIMEHeader{\n\t\t\t\"Content-Type\": {contentType + \"; charset=UTF-8\"},\n\t\t\t\"Content-Transfer-Encoding\": {encoding},\n\t\t}\n\t\tif _, err := mw.CreatePart(partHeader); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tdata := append(body, \"\\r\\n\"...)\n\tqp := quotedprintable.NewWriter(buffer)\n\tif _, err := qp.Write(data); err != nil {\n\t\treturn err\n\t}\n\treturn qp.Close()\n}", "func (r *Request) Payload() (p *roadrunner.Payload, err error) {\n\tp = &roadrunner.Payload{}\n\n\tj := json.ConfigCompatibleWithStandardLibrary\n\tif p.Context, err = j.Marshal(r); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif r.Parsed {\n\t\tif p.Body, err = j.Marshal(r.body); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else if r.body != nil {\n\t\tp.Body = r.body.([]byte)\n\t}\n\n\treturn p, nil\n}", "func prepareJSONPayload(rawReq *GenericRequest) (*bytes.Buffer, error) {\n\t// When payload ready, convert it to Json format\n\tbReqData, err := json.Marshal(&rawReq)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// @TODO Debug print marshal body\n\tfmt.Println(\"RAW Marshal BODY \" + string(bReqData))\n\n\t// Write json object to buffer\n\tbuffer := bytes.NewBuffer(bReqData)\n\n\treturn buffer, nil\n}", "func (o PostEntitiesEntityFidAttachmentsUploadURLOKBody) MarshalJSON() ([]byte, error) {\n\t_parts := make([][]byte, 0, 2)\n\n\tpostEntitiesEntityFidAttachmentsUploadURLOKBodyAO0, err := swag.WriteJSON(o.Envelope)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t_parts = append(_parts, postEntitiesEntityFidAttachmentsUploadURLOKBodyAO0)\n\tvar dataPostEntitiesEntityFidAttachmentsUploadURLOKBodyAO1 struct {\n\t\tData *models.AttachmentURL `json:\"data,omitempty\"`\n\t}\n\n\tdataPostEntitiesEntityFidAttachmentsUploadURLOKBodyAO1.Data = o.Data\n\n\tjsonDataPostEntitiesEntityFidAttachmentsUploadURLOKBodyAO1, errPostEntitiesEntityFidAttachmentsUploadURLOKBodyAO1 := swag.WriteJSON(dataPostEntitiesEntityFidAttachmentsUploadURLOKBodyAO1)\n\tif errPostEntitiesEntityFidAttachmentsUploadURLOKBodyAO1 != nil {\n\t\treturn nil, errPostEntitiesEntityFidAttachmentsUploadURLOKBodyAO1\n\t}\n\t_parts = append(_parts, jsonDataPostEntitiesEntityFidAttachmentsUploadURLOKBodyAO1)\n\treturn swag.ConcatJSON(_parts...), nil\n}", "func (s Job) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.AbortConfig != nil {\n\t\tv := s.AbortConfig\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"abortConfig\", v, metadata)\n\t}\n\tif s.Comment != nil {\n\t\tv := *s.Comment\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"comment\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.CompletedAt != nil {\n\t\tv := *s.CompletedAt\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"completedAt\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.CreatedAt != nil {\n\t\tv := *s.CreatedAt\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"createdAt\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ForceCanceled != nil {\n\t\tv := *s.ForceCanceled\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"forceCanceled\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.JobArn != nil {\n\t\tv := *s.JobArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"jobArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.JobExecutionsRolloutConfig != nil {\n\t\tv := s.JobExecutionsRolloutConfig\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"jobExecutionsRolloutConfig\", v, metadata)\n\t}\n\tif s.JobId != nil {\n\t\tv := *s.JobId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"jobId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.JobProcessDetails != nil {\n\t\tv := s.JobProcessDetails\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"jobProcessDetails\", v, metadata)\n\t}\n\tif s.LastUpdatedAt != nil {\n\t\tv := *s.LastUpdatedAt\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"lastUpdatedAt\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.PresignedUrlConfig != nil {\n\t\tv := s.PresignedUrlConfig\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"presignedUrlConfig\", v, metadata)\n\t}\n\tif s.ReasonCode != nil {\n\t\tv := *s.ReasonCode\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"reasonCode\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.Status) > 0 {\n\t\tv := s.Status\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"status\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif len(s.TargetSelection) > 0 {\n\t\tv := s.TargetSelection\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"targetSelection\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.Targets != nil {\n\t\tv := s.Targets\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"targets\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.TimeoutConfig != nil {\n\t\tv := s.TimeoutConfig\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"timeoutConfig\", v, metadata)\n\t}\n\treturn nil\n}", "func Body(str string, args ...interface{}) io.ReadCloser {\n\treturn io.NopCloser(strings.NewReader(fmt.Sprintf(str, args...)))\n}", "func (m Message) Payload() ([]byte, error) {\n\tbody, err := ioutil.ReadAll(m.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tm.Body = ioutil.NopCloser(bytes.NewReader(body))\n\n\treturn body, err\n}", "func (tx *Payment) MarshalBinary(Scheme) ([]byte, error) {\n\tb := tx.bodyMarshalBinaryBuffer()\n\terr := tx.bodyMarshalBinary(b)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbuf := make([]byte, paymentBodyLen+crypto.SignatureSize)\n\tcopy(buf, b)\n\tif tx.Signature == nil {\n\t\treturn nil, errors.New(\"marshaling unsigned transaction\")\n\t}\n\tcopy(buf[paymentBodyLen:], tx.Signature[:])\n\treturn buf, nil\n}", "func (r *Request) Body() []byte {\n\treturn r.body\n}", "func (r *Request) Body() []byte {\n\treturn r.body\n}", "func (c *Control) Body(data interface{}) {\n\tvar content []byte\n\n\tif str, ok := data.(string); ok {\n\t\tcontent = []byte(str)\n\t\tif c.ContentType != \"\" {\n\t\t\tc.Writer.Header().Add(\"Content-type\", c.ContentType)\n\t\t} else {\n\t\t\tc.Writer.Header().Add(\"Content-type\", MIMETEXT)\n\t\t}\n\t} else {\n\t\tif c.useMetaData {\n\t\t\tc.header.Data = data\n\t\t\tif !c.timer.IsZero() {\n\t\t\t\ttook := time.Now()\n\t\t\t\tc.header.Duration = took.Sub(c.timer)\n\t\t\t\tc.header.Took = took.Sub(c.timer).String()\n\t\t\t}\n\t\t\tif c.header.Params == nil && len(c.params) > 0 {\n\t\t\t\tc.header.Params = c.params\n\t\t\t}\n\t\t\tif c.errorHeader.Code != 0 || c.errorHeader.Message != \"\" || len(c.errorHeader.Errors) > 0 {\n\t\t\t\tc.header.Error = c.errorHeader\n\t\t\t}\n\t\t\tdata = c.header\n\t\t}\n\t\tvar err error\n\t\tif c.compactJSON {\n\t\t\tcontent, err = json.Marshal(data)\n\t\t} else {\n\t\t\tcontent, err = json.MarshalIndent(data, \"\", \" \")\n\t\t}\n\t\tif err != nil {\n\t\t\tc.Writer.WriteHeader(http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\tc.Writer.Header().Add(\"Content-type\", MIMEJSON)\n\t}\n\tif strings.Contains(c.Request.Header.Get(\"Accept-Encoding\"), \"gzip\") {\n\t\tc.Writer.Header().Add(\"Content-Encoding\", \"gzip\")\n\t\tif c.code > 0 {\n\t\t\tc.Writer.WriteHeader(c.code)\n\t\t}\n\t\tgz := gzip.NewWriter(c.Writer)\n\t\tgz.Write(content)\n\t\tgz.Close()\n\t} else {\n\t\tif c.code > 0 {\n\t\t\tc.Writer.WriteHeader(c.code)\n\t\t}\n\t\tc.Writer.Write(content)\n\t}\n}", "func (b *Block) signablePayload() []byte {\n\n\telements := [][]byte{\n\t\t[]byte(b.BlockId),\n\t\t[]byte(b.Author),\n\t\t[]byte(b.Hash),\n\t\t[]byte(b.PrevBlockHash),\n\t\tb.Data.Bytes(),\n\t}\n\treturn bytes.Join(elements, []byte{})\n\n}", "func (r *MachinePoolsAddServerRequest) Body() *MachinePool {\n\tif r == nil {\n\t\treturn nil\n\t}\n\treturn r.body\n}", "func (p *ubPayload) Encode() []byte {\n\tretString := strconv.Itoa(p.flags)\n\tfl := strconv.Itoa(len(retString))\n\tretString = fl + retString + p.suffix\n\treturn []byte(retString)\n}", "func getPayload(thingToEncode interface{}, contentType ContentType) (payload []byte, err error) {\n\tswitch contentType {\n\tcase Xml:\n\t\tpayload, err = xml.Marshal(thingToEncode)\n\tcase Json:\n\t\tpayload, err = json.Marshal(thingToEncode)\n\t}\n\treturn\n}", "func (request ListAssignedSubscriptionLineItemsRequest) BinaryRequestBody() (*common.OCIReadSeekCloser, bool) {\n\n\treturn nil, false\n\n}", "func (request ListFsuDiscoveriesRequest) BinaryRequestBody() (*common.OCIReadSeekCloser, bool) {\n\n\treturn nil, false\n\n}", "func Pack(Interface interface{}) ([]byte, error) {\n\t// Create a scratchpad which will be used for creating this.\n\tpad := newScratchpad(INITIAL_ALLOC)\n\tpad.endAppend(131)\n\n\t// Add a switch for the type.\n\tvar handler func(i interface{}) error\n\thandler = func(i interface{}) error {\n\t\tswitch b := i.(type) {\n\t\tcase json.RawMessage:\n\t\t\t// Just add the raw data (compatibility for libs using both erlpack and json).\n\t\t\tpad.endAppend(b...)\n\t\t\treturn nil\n\t\tcase RawData:\n\t\t\t// Just add the raw data.\n\t\t\tpad.endAppend(b...)\n\t\t\treturn nil\n\t\tcase nil:\n\t\t\t// Pack the nil bytes and return nil.\n\t\t\tpackNil(pad)\n\t\t\treturn nil\n\t\tcase string:\n\t\t\t// Pack the string and return nil.\n\t\t\tpackString(i.(string), pad)\n\t\t\treturn nil\n\t\tcase bool:\n\t\t\t// Pack the boolean and return nil.\n\t\t\tpackBool(i.(bool), pad)\n\t\t\treturn nil\n\t\tcase int:\n\t\t\t// Pack the integer and return nil.\n\t\t\tpackInt(i.(int), pad)\n\t\t\treturn nil\n\t\tcase int64:\n\t\t\t// Pack the int64 and return nil.\n\t\t\tpackInt64(i.(int64), pad)\n\t\t\treturn nil\n\t\tcase float32:\n\t\t\t// Pack the float32 as a float64 and return nil.\n\t\t\tpackFloat64(float64(i.(float32)), pad)\n\t\t\treturn nil\n\t\tcase Atom:\n\t\t\t// Pack a atom and return nil.\n\t\t\tpad.endAppend('s', byte(len(i.(Atom))))\n\t\t\tpad.endAppend([]byte(i.(Atom))...)\n\t\t\treturn nil\n\t\tcase UncastedResult:\n\t\t\t// Pack a uncasted result.\n\t\t\treturn handler(i.(UncastedResult).item)\n\t\tcase float64:\n\t\t\t// Pack the float64 and return nil.\n\t\t\tpackFloat64(i.(float64), pad)\n\t\t\treturn nil\n\t\tdefault:\n\t\t\trt := reflect.ValueOf(i)\n\t\t\tswitch rt.Kind() {\n\t\t\tcase reflect.Ptr:\n\t\t\t\t// Check if it's a null pointer.\n\t\t\t\tif rt.IsNil() {\n\t\t\t\t\t// It is. Add a null.\n\t\t\t\t\tpackNil(pad)\n\t\t\t\t} else {\n\t\t\t\t\t// No it isn't. Pack the value.\n\t\t\t\t\terr := handler(rt.Elem().Interface())\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// Return nil (there's been no errors).\n\t\t\t\treturn nil\n\t\t\tcase reflect.Slice, reflect.Array:\n\t\t\t\t// Get the length.\n\t\t\t\tl := rt.Len()\n\n\t\t\t\t// Process the length.\n\t\t\t\tif l == 0 {\n\t\t\t\t\t// Apply the null length bytes.\n\t\t\t\t\tpad.endAppend('j')\n\t\t\t\t} else {\n\t\t\t\t\t// Iterate through the array.\n\t\t\t\t\tappendListHeader(pad, uint32(l))\n\t\t\t\t\tfor i := 0; i < l; i++ {\n\t\t\t\t\t\titem := rt.Index(i).Interface()\n\t\t\t\t\t\terr := handler(item)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tpad.endAppend('j')\n\t\t\t\t}\n\n\t\t\t\t// Return nil (there were no errors).\n\t\t\t\treturn nil\n\t\t\tcase reflect.Map:\n\t\t\t\t// Create the map header.\n\t\t\t\tkeys := rt.MapKeys()\n\t\t\t\tappendMapHeader(pad, uint32(len(keys)))\n\n\t\t\t\t// Iterate the map.\n\t\t\t\tfor _, e := range keys {\n\t\t\t\t\tv := rt.MapIndex(e)\n\t\t\t\t\tKey := e.Interface()\n\t\t\t\t\terr := handler(Key)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\tValue := v.Interface()\n\t\t\t\t\terr = handler(Value)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// Return nil (there were no errors).\n\t\t\t\treturn nil\n\t\t\tcase reflect.Struct:\n\t\t\t\t// Create a struct parser.\n\t\t\t\ts := structs.New(i)\n\t\t\t\ts.TagName = \"erlpack\"\n\n\t\t\t\t// Call this back with the generated map.\n\t\t\t\treturn handler(s.Map())\n\t\t\tdefault:\n\t\t\t\t// Send a unknown type error.\n\t\t\t\treturn errors.New(fmt.Sprintf(\"unknown type: %T\", i))\n\t\t\t}\n\t\t}\n\t}\n\n\t// Runs the handler.\n\terr := handler(Interface)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn pad.bytes(), nil\n}", "func (o GetExecutionOKBody) MarshalJSON() ([]byte, error) {\n\t_parts := make([][]byte, 0, 2)\n\n\tgetExecutionOKBodyAO0, err := swag.WriteJSON(o.GetExecutionOKBodyAllOf0)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t_parts = append(_parts, getExecutionOKBodyAO0)\n\n\tvar dataGetExecutionOKBodyAO1 struct {\n\t\tEndedAt string `json:\"ended_at,omitempty\"`\n\n\t\tError string `json:\"error,omitempty\"`\n\n\t\tLogs []string `json:\"logs\"`\n\n\t\tParams map[string]interface{} `json:\"params,omitempty\"`\n\n\t\tStartedAt string `json:\"started_at,omitempty\"`\n\t}\n\n\tdataGetExecutionOKBodyAO1.EndedAt = o.EndedAt\n\n\tdataGetExecutionOKBodyAO1.Error = o.Error\n\n\tdataGetExecutionOKBodyAO1.Logs = o.Logs\n\n\tdataGetExecutionOKBodyAO1.Params = o.Params\n\n\tdataGetExecutionOKBodyAO1.StartedAt = o.StartedAt\n\n\tjsonDataGetExecutionOKBodyAO1, errGetExecutionOKBodyAO1 := swag.WriteJSON(dataGetExecutionOKBodyAO1)\n\tif errGetExecutionOKBodyAO1 != nil {\n\t\treturn nil, errGetExecutionOKBodyAO1\n\t}\n\t_parts = append(_parts, jsonDataGetExecutionOKBodyAO1)\n\n\treturn swag.ConcatJSON(_parts...), nil\n}", "func ToBytes(inter interface{}) []byte {\n\treqBodyBytes := new(bytes.Buffer)\n\tjson.NewEncoder(reqBodyBytes).Encode(inter)\n\tfmt.Println(reqBodyBytes.Bytes()) // this is the []byte\n\tfmt.Println(string(reqBodyBytes.Bytes())) // converted back to show it's your original object\n\treturn reqBodyBytes.Bytes()\n}", "func (o *SendJobCommandParams) SetBody(body SendJobCommandBody) {\n\to.Body = body\n}", "func (promise AttachmentPromise) Body() []byte {\n\treturn promise.body\n}", "func WithBodyBytes(body []byte) OptsReq {\n\treturn func(req *request) {\n\t\treq.contentLen = int64(len(body))\n\t\treq.getBody = func() (io.ReadCloser, error) {\n\t\t\treturn ioutil.NopCloser(bytes.NewReader(body)), nil\n\t\t}\n\t}\n}", "func (req *Request) Body() ([]byte, error) {\n\treturn ioutil.ReadAll(req.r.Body)\n}", "func (ctx *HijackRequest) Body() string {\n\treturn ctx.event.Request.PostData\n}", "func (c *coinbaseManager) serializeCoinbasePayload(blueScore uint64, coinbaseData *externalapi.DomainCoinbaseData) ([]byte, error) {\n\tscriptLengthOfScriptPubKey := len(coinbaseData.ScriptPublicKey.Script)\n\tif scriptLengthOfScriptPubKey > int(c.coinbasePayloadScriptPublicKeyMaxLength) {\n\t\treturn nil, errors.Wrapf(ruleerrors.ErrBadCoinbasePayloadLen, \"coinbase's payload script public key is \"+\n\t\t\t\"longer than the max allowed length of %d\", c.coinbasePayloadScriptPublicKeyMaxLength)\n\t}\n\n\tpayload := make([]byte, uint64Len+lengthOfVersionScriptPubKey+lengthOfscriptPubKeyLength+scriptLengthOfScriptPubKey+len(coinbaseData.ExtraData))\n\tbinary.LittleEndian.PutUint64(payload[:uint64Len], blueScore)\n\n\tpayload[uint64Len] = uint8(coinbaseData.ScriptPublicKey.Version)\n\tpayload[uint64Len+lengthOfVersionScriptPubKey] = uint8(len(coinbaseData.ScriptPublicKey.Script))\n\tcopy(payload[uint64Len+lengthOfVersionScriptPubKey+lengthOfscriptPubKeyLength:], coinbaseData.ScriptPublicKey.Script)\n\tcopy(payload[uint64Len+lengthOfVersionScriptPubKey+lengthOfscriptPubKeyLength+scriptLengthOfScriptPubKey:], coinbaseData.ExtraData)\n\treturn payload, nil\n}", "func encodeLeaseRequestBinary(leaseReq *LeaseRequest, jreq *jsonRequest) (hdrBytes []byte, jsonBytes []byte) {\n\tvar err error\n\n\thdrBuf := &bytes.Buffer{}\n\tjsonBuf := &bytes.Buffer{}\n\n\t// marshal jsonRequest fields\n\terr = binary.Write(jsonBuf, binary.LittleEndian, jreq.MyUniqueID)\n\tif err != nil {\n\t\tpanic(\"jreq.MyUniqueID\")\n\t}\n\terr = binary.Write(jsonBuf, binary.LittleEndian, jreq.RequestID)\n\tif err != nil {\n\t\tpanic(\"jreq.RequestID\")\n\t}\n\terr = binary.Write(jsonBuf, binary.LittleEndian, jreq.HighestReplySeen)\n\tif err != nil {\n\t\tpanic(\"jreq.HighestReplySeen\")\n\t}\n\t// use a Nul terminated string\n\tmethodWithNul := jreq.Method + string([]byte{0})\n\t_, err = jsonBuf.WriteString(methodWithNul)\n\tif err != nil {\n\t\tpanic(\"jreq.Method\")\n\t}\n\n\t// marshal LeaseRequest fields\n\tmountIDWithNul := leaseReq.MountID + MountIDAsString([]byte{0})\n\t_, err = jsonBuf.WriteString(string(mountIDWithNul))\n\tif err != nil {\n\t\tpanic(\"leaseReq.MountID\")\n\t}\n\terr = binary.Write(jsonBuf, binary.LittleEndian, leaseReq.InodeNumber)\n\tif err != nil {\n\t\tpanic(\"leaseReq.LeaseInodeNumber\")\n\t}\n\terr = binary.Write(jsonBuf, binary.LittleEndian, leaseReq.LeaseRequestType)\n\tif err != nil {\n\t\tpanic(\"leaseReq.LeaseRequestType\")\n\t}\n\n\t// now create the IoRequest header and Marshal it\n\tioReq := ioRequestRetryRpc{\n\t\tHdr: ioHeader{\n\t\t\tLen: uint32(jsonBuf.Len()),\n\t\t\tProtocol: uint16(1),\n\t\t\tVersion: 1,\n\t\t\tType: 1,\n\t\t\tMagic: 0xCAFEFEED,\n\t\t},\n\t}\n\n\terr = binary.Write(hdrBuf, binary.LittleEndian, ioReq.Hdr.Len)\n\tif err != nil {\n\t\tpanic(\"ioReq.Hdr.Len\")\n\t}\n\terr = binary.Write(hdrBuf, binary.LittleEndian, ioReq.Hdr.Protocol)\n\tif err != nil {\n\t\tpanic(\"ioReq.Hdr.Protocol\")\n\t}\n\terr = binary.Write(hdrBuf, binary.LittleEndian, ioReq.Hdr.Version)\n\tif err != nil {\n\t\tpanic(\"ioReq.Hdr.Version\")\n\t}\n\terr = binary.Write(hdrBuf, binary.LittleEndian, ioReq.Hdr.Type)\n\tif err != nil {\n\t\tpanic(\"ioReq.Hdr.Type\")\n\t}\n\terr = binary.Write(hdrBuf, binary.LittleEndian, ioReq.Hdr.Magic)\n\tif err != nil {\n\t\tpanic(\"ioReq.Hdr.Magic\")\n\t}\n\n\thdrBytes = hdrBuf.Bytes()\n\tjsonBytes = jsonBuf.Bytes()\n\treturn\n}", "func BuildBinary(name string, mode os.FileMode, body io.Reader) (*Binary, error) {\n\tsum, err := checksum(body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Binary{\n\t\tName: name,\n\t\tChecksum: sum,\n\t\tMode: mode,\n\t\tBody: body,\n\t}, nil\n}", "func (m *realMessage) Body() []byte {\n\treturn m.message.Data\n}", "func (req *UpsertRequest) Body(res tarantool.SchemaResolver, enc *encoder) error {\n\targs := upsertArgs{Space: req.space, Tuple: req.tuple,\n\t\tOperations: req.operations, Opts: req.opts}\n\treq.impl = req.impl.Args(args)\n\treturn req.impl.Body(res, enc)\n}", "func unpackRequestBody(decoder *hessian.Decoder, reqObj interface{}) error {\n\tif decoder == nil {\n\t\treturn perrors.Errorf(\"@decoder is nil\")\n\t}\n\n\treq, ok := reqObj.([]interface{})\n\tif !ok {\n\t\treturn perrors.Errorf(\"@reqObj is not of type: []interface{}\")\n\t}\n\tif len(req) < 7 {\n\t\treturn perrors.New(\"length of @reqObj should be 7\")\n\t}\n\n\tvar (\n\t\terr error\n\t\tdubboVersion, target, serviceVersion, method, argsTypes interface{}\n\t\targs []interface{}\n\t)\n\n\tdubboVersion, err = decoder.Decode()\n\tif err != nil {\n\t\treturn perrors.WithStack(err)\n\t}\n\treq[0] = dubboVersion\n\n\ttarget, err = decoder.Decode()\n\tif err != nil {\n\t\treturn perrors.WithStack(err)\n\t}\n\treq[1] = target\n\n\tserviceVersion, err = decoder.Decode()\n\tif err != nil {\n\t\treturn perrors.WithStack(err)\n\t}\n\treq[2] = serviceVersion\n\n\tmethod, err = decoder.Decode()\n\tif err != nil {\n\t\treturn perrors.WithStack(err)\n\t}\n\treq[3] = method\n\n\targsTypes, err = decoder.Decode()\n\tif err != nil {\n\t\treturn perrors.WithStack(err)\n\t}\n\treq[4] = argsTypes\n\n\tats := DescRegex.FindAllString(argsTypes.(string), -1)\n\tvar arg interface{}\n\tfor i := 0; i < len(ats); i++ {\n\t\targ, err = decoder.Decode()\n\t\tif err != nil {\n\t\t\treturn perrors.WithStack(err)\n\t\t}\n\t\targs = append(args, arg)\n\t}\n\treq[5] = args\n\n\tattachments, err := decoder.Decode()\n\tif err != nil {\n\t\treturn perrors.WithStack(err)\n\t}\n\tif v, ok := attachments.(map[interface{}]interface{}); ok {\n\t\tv[DUBBO_VERSION_KEY] = dubboVersion\n\t\treq[6] = ToMapStringInterface(v)\n\t\treturn nil\n\t}\n\n\treturn perrors.Errorf(\"get wrong attachments: %+v\", attachments)\n}", "func (c *serverCodec) ReadBody(x interface{}) error {\n\t// If x!=nil and return error e:\n\t// - Write() will be called with e.Error() in r.Error\n\tif x == nil {\n\t\treturn nil\n\t}\n\tif c.req.Params == nil {\n\t\treturn nil\n\t}\n\n\tvar (\n\t\tparams []byte\n\t\terr error\n\t)\n\n\t// 在这里把请求参数json 字符串转换成了相应的struct\n\tparams = []byte(*c.req.Params)\n\t// if c.req.Method == BatchMethod {\n\t// \treturn fmt.Errorf(\"batch request is not allowed\")\n\t// \t/*\n\t// \t\targ := x.(*BatchArg)\n\t// \t\targ.srv = c.srv\n\t// \t\tif err := json.Unmarshal(*c.req.Params, &arg.reqs); err != nil {\n\t// \t\t\treturn NewError(errParams.Code, err.Error())\n\t// \t\t}\n\t// \t\tif len(arg.reqs) == 0 {\n\t// \t\t\treturn errRequest\n\t// \t\t}\n\t// \t*/\n\t// } else\n\tif err = json.Unmarshal(*c.req.Params, x); err != nil {\n\t\t// Note: if c.request.Params is nil it's not an error, it's an optional member.\n\t\t// JSON params structured object. Unmarshal to the args object.\n\n\t\tif 2 < len(params) && params[0] == '[' && params[len(params)-1] == ']' {\n\t\t\t// Clearly JSON params is not a structured object,\n\t\t\t// fallback and attempt an unmarshal with JSON params as\n\t\t\t// array value and RPC params is struct. Unmarshal into\n\t\t\t// array containing the request struct.\n\t\t\tparams := [1]interface{}{x}\n\t\t\tif err = json.Unmarshal(*c.req.Params, &params); err != nil {\n\t\t\t\treturn NewError(errParams.Code, err.Error())\n\t\t\t}\n\t\t} else {\n\t\t\treturn NewError(errParams.Code, err.Error())\n\t\t}\n\t}\n\n\treturn nil\n}", "func (t *TOMLParser) Body() []byte {\n\treturn t.body.Bytes()\n}", "func (request ListPublicIpsRequest) BinaryRequestBody() (*common.OCIReadSeekCloser, bool) {\n\n\treturn nil, false\n\n}", "func PayloadBytesWithHeader(payload []byte, header http.Header) []byte {\n\tif len(header) == 0 {\n\t\treturn payload\n\t}\n\n\thd, _ := json.Marshal(header) // String map never fails marshaling.\n\n\tres := make([]byte, 0, len(jsonHeaderSignature)+len(hd)+1+len(payload))\n\tres = append(res, []byte(jsonHeaderSignature)...)\n\tres = append(res, hd...)\n\tres = append(res, '\\n')\n\tres = append(res, payload...)\n\n\treturn res\n}" ]
[ "0.61125934", "0.6009258", "0.56471306", "0.55544174", "0.54946643", "0.54576397", "0.5440506", "0.5395753", "0.53928536", "0.5352864", "0.5337374", "0.5183572", "0.51288664", "0.51206577", "0.5100693", "0.5085167", "0.50795686", "0.50787616", "0.50787616", "0.5071958", "0.5068982", "0.50647473", "0.5059107", "0.5046009", "0.5017765", "0.5017077", "0.4999179", "0.49979198", "0.49925625", "0.4983308", "0.4977069", "0.4968635", "0.49682474", "0.4955494", "0.49500588", "0.49466717", "0.49284217", "0.49183375", "0.49013528", "0.48959482", "0.488827", "0.48802996", "0.48787758", "0.48753428", "0.48604286", "0.48561168", "0.48532185", "0.48460183", "0.4845584", "0.48421362", "0.48391327", "0.4834888", "0.48262042", "0.48183393", "0.4816547", "0.48147574", "0.48147306", "0.48051995", "0.48044786", "0.48011836", "0.47985753", "0.47940692", "0.47885102", "0.47884023", "0.47760522", "0.47692382", "0.47666904", "0.47596902", "0.4757316", "0.4747054", "0.473675", "0.4728864", "0.4721475", "0.4713375", "0.4713375", "0.47123823", "0.47111005", "0.47096956", "0.47055277", "0.47015446", "0.4700681", "0.46986485", "0.46911842", "0.46865252", "0.46863177", "0.46722114", "0.46696746", "0.46684638", "0.46615595", "0.4657197", "0.465322", "0.46530703", "0.46479997", "0.46467403", "0.46433175", "0.46422255", "0.46420333", "0.46373007", "0.4634093", "0.4633362" ]
0.50859964
15
Context packs job context (job, id) into binary payload. Not used in the sqs, MessageAttributes used instead
func (i *Item) Context() ([]byte, error) { ctx, err := json.Marshal( struct { ID string `json:"id"` Job string `json:"job"` Headers map[string][]string `json:"headers"` Pipeline string `json:"pipeline"` }{ID: i.Ident, Job: i.Job, Headers: i.Headers, Pipeline: i.Options.Pipeline}, ) if err != nil { return nil, err } return ctx, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func serialiseContextRaw(ctx context.Context) ([]byte, error) {\n\tif ctx == nil {\n\t\treturn nil, fmt.Errorf(\"Cannot serialise 'nil' context\")\n\t}\n\tmd := tryGetMetadata(ctx)\n\n\tif md == nil {\n\t\treturn nil, fmt.Errorf(\"No metadata in callstate\")\n\t}\n\tdata, err := proto.Marshal(md)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn data, nil\n}", "func (j *JobInterchange) Raw() []byte { return j.Job }", "func (c *encdecContext) marshal() ([]byte, error) {\n\tvar b cryptobyte.Builder\n\tb.AddUint16(uint16(c.suite.kemID))\n\tb.AddUint16(uint16(c.suite.kdfID))\n\tb.AddUint16(uint16(c.suite.aeadID))\n\tb.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {\n\t\tb.AddBytes(c.exporterSecret)\n\t})\n\tb.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {\n\t\tb.AddBytes(c.key)\n\t})\n\tb.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {\n\t\tb.AddBytes(c.baseNonce)\n\t})\n\tb.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {\n\t\tb.AddBytes(c.sequenceNumber)\n\t})\n\treturn b.Bytes()\n}", "func (c *JobHandler) JobCtx(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tjobID := chi.URLParam(r, \"jobID\")\n\t\tmid, _ := strconv.ParseUint(jobID, 10, 64)\n\t\tjob, err := c.JobService.GetJobByID(mid)\n\t\tif err != nil {\n\t\t\thttp.Error(w, http.StatusText(404), 404)\n\t\t\treturn\n\t\t}\n\t\tctx := context.WithValue(r.Context(), keyJobID, job)\n\t\tnext.ServeHTTP(w, r.WithContext(ctx))\n\t})\n}", "func (j *Job) Context() context.Context {\n\treturn j.tomb.Context(nil)\n}", "func (bj BatchJob) MarshalJSON() ([]byte, error) {\n\tobjectMap := make(map[string]interface{})\n\tif bj.LivyInfo != nil {\n\t\tobjectMap[\"livyInfo\"] = bj.LivyInfo\n\t}\n\tif bj.Name != nil {\n\t\tobjectMap[\"name\"] = bj.Name\n\t}\n\tif bj.WorkspaceName != nil {\n\t\tobjectMap[\"workspaceName\"] = bj.WorkspaceName\n\t}\n\tif bj.SparkPoolName != nil {\n\t\tobjectMap[\"sparkPoolName\"] = bj.SparkPoolName\n\t}\n\tif bj.SubmitterName != nil {\n\t\tobjectMap[\"submitterName\"] = bj.SubmitterName\n\t}\n\tif bj.SubmitterID != nil {\n\t\tobjectMap[\"submitterId\"] = bj.SubmitterID\n\t}\n\tif bj.ArtifactID != nil {\n\t\tobjectMap[\"artifactId\"] = bj.ArtifactID\n\t}\n\tif bj.JobType != \"\" {\n\t\tobjectMap[\"jobType\"] = bj.JobType\n\t}\n\tif bj.Result != \"\" {\n\t\tobjectMap[\"result\"] = bj.Result\n\t}\n\tif bj.Scheduler != nil {\n\t\tobjectMap[\"schedulerInfo\"] = bj.Scheduler\n\t}\n\tif bj.Plugin != nil {\n\t\tobjectMap[\"pluginInfo\"] = bj.Plugin\n\t}\n\tif bj.Errors != nil {\n\t\tobjectMap[\"errorInfo\"] = bj.Errors\n\t}\n\tif bj.Tags != nil {\n\t\tobjectMap[\"tags\"] = bj.Tags\n\t}\n\tif bj.ID != nil {\n\t\tobjectMap[\"id\"] = bj.ID\n\t}\n\tif bj.AppID != nil {\n\t\tobjectMap[\"appId\"] = bj.AppID\n\t}\n\tif bj.AppInfo != nil {\n\t\tobjectMap[\"appInfo\"] = bj.AppInfo\n\t}\n\tif bj.State != nil {\n\t\tobjectMap[\"state\"] = bj.State\n\t}\n\tif bj.LogLines != nil {\n\t\tobjectMap[\"log\"] = bj.LogLines\n\t}\n\treturn json.Marshal(objectMap)\n}", "func (c Context) JobID() string {\n\treturn c.Current().JobID\n}", "func (el *ZapEventLogger) SerializeContext(ctx context.Context) ([]byte, error) {\n\tgTracer := opentrace.GlobalTracer()\n\tb := make([]byte, 0)\n\tcarrier := bytes.NewBuffer(b)\n\tspan := opentrace.SpanFromContext(ctx)\n\tif err := gTracer.Inject(span.Context(), opentrace.Binary, carrier); err != nil {\n\t\treturn nil, err\n\t}\n\treturn carrier.Bytes(), nil\n}", "func (r *JobResult) Context() context.Context {\n\treturn r.ctx\n}", "func (j *Job) Encode(payload interface{}) error {\n\tvar err error\n\tj.Raw, err = encode(ContentTypeMsgpack, &payload)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func encodeGetJobPostByIDResponse(_ context.Context, response interface{}) (interface{}, error) {\n\tres := response.(endpoints.GetJobPostByIDResponse)\n\terr := getError(res.Err)\n\tif err == nil {\n\t\treturn res.JobPost.ToProto(), nil\n\t}\n\treturn nil, err\n}", "func (c *Contexter) GetContext(format string) ([]byte, error) {\n\tmutableMutex.Lock()\n defer mutableMutex.Unlock()\n switch format {\n case \"toml\":\n var buffer bytes.Buffer\n encoder := toml.NewEncoder(&buffer)\n err := encoder.Encode(c.Context)\n if err != nil {\n return nil, errors.Wrap(err, \"can not encode with toml\")\n }\n return buffer.Bytes(), nil\n case \"yaml\":\n y, err := yaml.Marshal(c.Context)\n if err != nil {\n return nil, errors.Wrap(err, \"can not encode with yaml\")\n }\n\t\treturn y, nil\n case \"json\":\n j, err := json.Marshal(c.Context)\n if err != nil {\n return nil, errors.Wrap(err, \"can not encode with json\")\n }\n\t\treturn j, nil\n default:\n return nil, errors.Errorf(\"unexpected format (%v)\", format)\n }\n}", "func SerializeCtx(ctx context.Context, opts ...SerializeOpts) ([]byte, error) {\n\tbuf := new(bytes.Buffer)\n\te := gob.NewEncoder(buf)\n\n\ts := contextData{\n\t\tValues: make(map[interface{}]interface{}),\n\t\tHasCancel: false,\n\t\tDeadline: time.Time{},\n\t}\n\n\tserialized := buildMap(ctx, s)\n\n\t// if options were passed\n\tif len(opts) > 0 {\n\t\t// override cancel/deadline\n\t\tif !opts[0].RetainCancel {\n\t\t\tserialized.HasCancel = false\n\t\t}\n\t\tif !opts[0].RetainDeadline {\n\t\t\tserialized.HasDeadline = false\n\t\t}\n\t\t// ignore functions to allow serialization to pass\n\t\tif opts[0].IgnoreFunctions {\n\t\t\tfor key, val := range serialized.Values {\n\t\t\t\tif reflect.TypeOf(key).Kind() == reflect.Func || reflect.TypeOf(val).Kind() == reflect.Func {\n\t\t\t\t\tdelete(serialized.Values, key)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Encoding the map\n\terr := e.Encode(serialized)\n\treturn buf.Bytes(), err\n}", "func (self *SQL_Client) QueryContext(connectionId interface{}, query interface{}, parameters interface{}) (string, error) {\n\n\tparameters_ := strings.Split(parameters.(string), \",\")\n\tparametersStr, _ := Utility.ToJson(parameters_)\n\n\t// The query and all it parameters.\n\trqst := &sqlpb.QueryContextRqst{\n\t\tQuery: &sqlpb.Query{\n\t\t\tConnectionId: Utility.ToString(connectionId),\n\t\t\tQuery: Utility.ToString(query),\n\t\t\tParameters: parametersStr,\n\t\t},\n\t}\n\n\t// Because number of values can be high I will use a stream.\n\tstream, err := self.c.QueryContext(context.Background(), rqst)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// Here I will create the final array\n\tdata := make([]interface{}, 0)\n\theader := make([]map[string]interface{}, 0)\n\n\tfor {\n\t\tmsg, err := stream.Recv()\n\t\tif err == io.EOF {\n\t\t\t// end of stream...\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\t// Get the result...\n\t\tswitch v := msg.Result.(type) {\n\t\tcase *sqlpb.QueryContextRsp_Header:\n\t\t\t// Here I receive the header information.\n\t\t\tjson.Unmarshal([]byte(v.Header), &header)\n\t\tcase *sqlpb.QueryContextRsp_Rows:\n\t\t\trows := make([]interface{}, 0)\n\t\t\tjson.Unmarshal([]byte(v.Rows), &rows)\n\t\t\tdata = append(data, rows...)\n\t\t}\n\t}\n\n\t// Create object result and put header and data in it.\n\tresult := make(map[string]interface{}, 0)\n\tresult[\"header\"] = header\n\tresult[\"data\"] = data\n\tresultStr, _ := json.Marshal(result)\n\treturn string(resultStr), nil\n}", "func encodeCreateJobPostResponse(_ context.Context, response interface{}) (interface{}, error) {\n\tres := response.(endpoints.CreateJobPostResponse)\n\terr := getError(res.Err)\n\tif err == nil {\n\t\treturn res.JobPost.ToProto(), nil\n\t}\n\treturn nil, err\n}", "func (_obj *DataService) InsertMessageWithContext(tarsCtx context.Context, msg *Message, _opt ...map[string]string) (ret int32, err error) {\n\n\tvar length int32\n\tvar have bool\n\tvar ty byte\n\t_os := codec.NewBuffer()\n\terr = msg.WriteBlock(_os, 1)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tvar _status map[string]string\n\tvar _context map[string]string\n\tif len(_opt) == 1 {\n\t\t_context = _opt[0]\n\t} else if len(_opt) == 2 {\n\t\t_context = _opt[0]\n\t\t_status = _opt[1]\n\t}\n\t_resp := new(requestf.ResponsePacket)\n\n\terr = _obj.s.Tars_invoke(tarsCtx, 0, \"insertMessage\", _os.ToBytes(), _status, _context, _resp)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\t_is := codec.NewReader(tools.Int8ToByte(_resp.SBuffer))\n\terr = _is.Read_int32(&ret, 0, true)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tif len(_opt) == 1 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t} else if len(_opt) == 2 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t\tfor k := range _status {\n\t\t\tdelete(_status, k)\n\t\t}\n\t\tfor k, v := range _resp.Status {\n\t\t\t_status[k] = v\n\t\t}\n\n\t}\n\t_ = length\n\t_ = have\n\t_ = ty\n\treturn ret, nil\n}", "func RecreateContextWithTimeout(t time.Duration, bs []byte) (context.Context, error) {\n\tuid := string(bs)\n\tvar userdata []byte\n\tvar err error\n\tif strings.HasPrefix(uid, SERSTRPREFIX) {\n\t\ts := string(bs[len(SERSTRPREFIX):])\n\t\tuserdata, err = base64.StdEncoding.DecodeString(s)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else if strings.HasPrefix(uid, SERBINPREFIX) {\n\t\tuserdata = bs[len(SERBINPREFIX):]\n\t} else {\n\t\treturn nil, fmt.Errorf(\"invalid serialised context prefix (%s)\", uid)\n\t}\n\n\tmd := &rc.InMetadata{}\n\t//\tau := &auth.User{}\n\terr = proto.Unmarshal(userdata, md)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif md.User != nil && !common.VerifySignature(md.User) {\n\t\treturn nil, fmt.Errorf(\"[go-easyops] no context (User signature invalid)\")\n\t}\n\tif md.Service != nil && !common.VerifySignature(md.Service) {\n\t\treturn nil, fmt.Errorf(\"[go-easyops] no context (Service signature invalid)\")\n\t}\n\tctx, err := contextForMetaWithTimeout(t, md)\n\treturn ctx, err\n}", "func encodeUpdateJobPostResponse(_ context.Context, response interface{}) (interface{}, error) {\n\tres := response.(endpoints.UpdateJobPostResponse)\n\terr := getError(res.Err)\n\tif err == nil {\n\t\treturn res.JobPost.ToProto(), nil\n\t}\n\treturn nil, err\n}", "func (c *QueryContext) Marshal() []byte {\n\tbuf := bufPool.Get().(*bytes.Buffer)\n\tdefer bufPool.Put(buf)\n\tbuf.Reset()\n\tl := make([]byte, 4)\n\tbinary.BigEndian.PutUint32(l, uint32(len(c.Query)))\n\tbuf.Write(l)\n\tbuf.Write(c.Query)\n\n\tbinary.BigEndian.PutUint32(l, uint32(len(c.User)))\n\tbuf.Write(l)\n\tbuf.Write(c.User)\n\n\tbinary.BigEndian.PutUint32(l, uint32(len(c.Client)))\n\tbuf.Write(l)\n\tbuf.Write(c.Client)\n\n\tbinary.BigEndian.PutUint32(l, uint32(len(c.Database)))\n\tbuf.Write(l)\n\tbuf.Write(c.Database)\n\n\tt, _ := c.Time.MarshalBinary()\n\tbuf.Write(t)\n\treturn buf.Bytes()\n}", "func (s Job) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.AbortConfig != nil {\n\t\tv := s.AbortConfig\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"abortConfig\", v, metadata)\n\t}\n\tif s.Comment != nil {\n\t\tv := *s.Comment\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"comment\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.CompletedAt != nil {\n\t\tv := *s.CompletedAt\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"completedAt\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.CreatedAt != nil {\n\t\tv := *s.CreatedAt\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"createdAt\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.Description != nil {\n\t\tv := *s.Description\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"description\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.ForceCanceled != nil {\n\t\tv := *s.ForceCanceled\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"forceCanceled\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.JobArn != nil {\n\t\tv := *s.JobArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"jobArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.JobExecutionsRolloutConfig != nil {\n\t\tv := s.JobExecutionsRolloutConfig\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"jobExecutionsRolloutConfig\", v, metadata)\n\t}\n\tif s.JobId != nil {\n\t\tv := *s.JobId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"jobId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.JobProcessDetails != nil {\n\t\tv := s.JobProcessDetails\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"jobProcessDetails\", v, metadata)\n\t}\n\tif s.LastUpdatedAt != nil {\n\t\tv := *s.LastUpdatedAt\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"lastUpdatedAt\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.PresignedUrlConfig != nil {\n\t\tv := s.PresignedUrlConfig\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"presignedUrlConfig\", v, metadata)\n\t}\n\tif s.ReasonCode != nil {\n\t\tv := *s.ReasonCode\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"reasonCode\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.Status) > 0 {\n\t\tv := s.Status\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"status\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif len(s.TargetSelection) > 0 {\n\t\tv := s.TargetSelection\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"targetSelection\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.Targets != nil {\n\t\tv := s.Targets\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"targets\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tls0.End()\n\n\t}\n\tif s.TimeoutConfig != nil {\n\t\tv := s.TimeoutConfig\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"timeoutConfig\", v, metadata)\n\t}\n\treturn nil\n}", "func unmarshalContext(raw []byte) (*encdecContext, error) {\n\tvar (\n\t\terr error\n\t\tt cryptobyte.String\n\t)\n\n\tc := new(encdecContext)\n\ts := cryptobyte.String(raw)\n\tif !s.ReadUint16((*uint16)(&c.suite.kemID)) ||\n\t\t!s.ReadUint16((*uint16)(&c.suite.kdfID)) ||\n\t\t!s.ReadUint16((*uint16)(&c.suite.aeadID)) ||\n\t\t!s.ReadUint8LengthPrefixed(&t) ||\n\t\t!t.ReadBytes(&c.exporterSecret, len(t)) ||\n\t\t!s.ReadUint8LengthPrefixed(&t) ||\n\t\t!t.ReadBytes(&c.key, len(t)) ||\n\t\t!s.ReadUint8LengthPrefixed(&t) ||\n\t\t!t.ReadBytes(&c.baseNonce, len(t)) ||\n\t\t!s.ReadUint8LengthPrefixed(&t) ||\n\t\t!t.ReadBytes(&c.sequenceNumber, len(t)) {\n\t\treturn nil, errors.New(\"failed to parse context\")\n\t}\n\n\tif !c.suite.isValid() {\n\t\treturn nil, ErrInvalidHPKESuite\n\t}\n\n\tNh := c.suite.kdfID.ExtractSize()\n\tif len(c.exporterSecret) != Nh {\n\t\treturn nil, errors.New(\"invalid exporter secret length\")\n\t}\n\n\tNk := int(c.suite.aeadID.KeySize())\n\tif len(c.key) != Nk {\n\t\treturn nil, errors.New(\"invalid key length\")\n\t}\n\n\tc.AEAD, err = c.suite.aeadID.New(c.key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tNn := int(c.suite.aeadID.NonceSize())\n\tif len(c.baseNonce) != Nn {\n\t\treturn nil, errors.New(\"invalid base nonce length\")\n\t}\n\tif len(c.sequenceNumber) != Nn {\n\t\treturn nil, errors.New(\"invalid sequence number length\")\n\t}\n\tc.nonce = make([]byte, Nn)\n\n\treturn c, nil\n}", "func (m *IncomingContext) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter)(error) {\n {\n err := writer.WriteStringValue(\"observedParticipantId\", m.GetObservedParticipantId())\n if err != nil {\n return err\n }\n }\n {\n err := writer.WriteStringValue(\"@odata.type\", m.GetOdataType())\n if err != nil {\n return err\n }\n }\n {\n err := writer.WriteObjectValue(\"onBehalfOf\", m.GetOnBehalfOf())\n if err != nil {\n return err\n }\n }\n {\n err := writer.WriteStringValue(\"sourceParticipantId\", m.GetSourceParticipantId())\n if err != nil {\n return err\n }\n }\n {\n err := writer.WriteObjectValue(\"transferor\", m.GetTransferor())\n if err != nil {\n return err\n }\n }\n {\n err := writer.WriteAdditionalData(m.GetAdditionalData())\n if err != nil {\n return err\n }\n }\n return nil\n}", "func (eventMessage EventMessage) Context() Context {\n\treturn Context{\n\t\tCorrelationID: eventMessage.Header.CorrelationID,\n\t\tTimestamp: eventMessage.Header.Timestamp,\n\t\tApplication: eventMessage.Header.Application,\n\t\tPlatform: eventMessage.Header.Platform,\n\t}\n}", "func copyJobToHookQueueJob(src common.Job) (dst HookQueueOrderJobData) {\n\tdst.ID = src.UUID\n\tdst.Name = src.Name\n\tdst.Status = src.Status\n\tdst.Owner = src.Owner\n\tdst.StartTime = src.StartTime\n\tdst.CrackedHashes = src.CrackedHashes\n\tdst.TotalHashes = src.TotalHashes\n\tdst.Progress = src.Progress\n\n\treturn dst\n}", "func (t *JobTemplate) Marshal() (interface{}, error) {\n\tvar jobFingerprint string\n\tif t.Job != nil {\n\t\tjobFingerprint = t.Job.Fingerprint\n\t}\n\n\treturn map[string]interface{}{\n\t\t\"sourcePath\": t.SourcePath,\n\t\t\"destinationPath\": t.DestinationPath,\n\t\t\"job\": jobFingerprint,\n\t\t\"content\": t.Content,\n\t}, nil\n}", "func (a *AzureBlobStorage) WriteWithContext(_ context.Context, msg *message.Batch) error {\n\treturn IterateBatchedSend(msg, func(i int, p *message.Part) error {\n\t\tc := a.client.GetContainerReference(a.container.String(i, msg))\n\t\tb := c.GetBlobReference(a.path.String(i, msg))\n\t\tif err := a.uploadBlob(b, a.blobType.String(i, msg), p.Get()); err != nil {\n\t\t\tif containerNotFound(err) {\n\t\t\t\tif cerr := a.createContainer(c, a.accessLevel.String(i, msg)); cerr != nil {\n\t\t\t\t\ta.log.Debugf(\"error creating container: %v.\", cerr)\n\t\t\t\t\treturn cerr\n\t\t\t\t}\n\t\t\t\terr = a.uploadBlob(b, a.blobType.String(i, msg), p.Get())\n\t\t\t\tif err != nil {\n\t\t\t\t\ta.log.Debugf(\"error retrying to upload blob: %v.\", err)\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}", "func (m *messageBatch) marshal() []byte {\n\tbufLen := unsafe.Sizeof(uint64(0)) /* NextID */ +\n\t\tunsafe.Sizeof(uint64(0)) /* len(Messages) */\n\tfor _, msg := range m.Messages {\n\t\tbufLen += 2*unsafe.Sizeof(uint64(0)) /* RobustId */ +\n\t\t\tunsafe.Sizeof(uint64(0)) /* len(Data) */ +\n\t\t\tunsafe.Sizeof(byte(0))*uintptr(len(msg.Data)) /* Data */ +\n\t\t\tunsafe.Sizeof(uint64(0)) /* len(InterestingFor) */ +\n\t\t\tunsafe.Sizeof(uint64(0))*uintptr(len(msg.InterestingFor)) /* InterestingFor */\n\t}\n\n\tbuffer := make([]byte, bufLen)\n\tn := 0\n\tbinary.LittleEndian.PutUint64(buffer[n:], m.NextID)\n\tn += 8\n\tbinary.LittleEndian.PutUint64(buffer[n:], uint64(len(m.Messages)))\n\tn += 8\n\tfor _, msg := range m.Messages {\n\t\tbinary.LittleEndian.PutUint64(buffer[n:], uint64(msg.Id.Id))\n\t\tn += 8\n\t\tbinary.LittleEndian.PutUint64(buffer[n:], uint64(msg.Id.Reply))\n\t\tn += 8\n\t\tbinary.LittleEndian.PutUint64(buffer[n:], uint64(len(msg.Data)))\n\t\tn += 8\n\t\tcopy(buffer[n:], msg.Data)\n\t\tn += len(msg.Data)\n\t\tbinary.LittleEndian.PutUint64(buffer[n:], uint64(len(msg.InterestingFor)))\n\t\tn += 8\n\t\tfor session := range msg.InterestingFor {\n\t\t\tbinary.LittleEndian.PutUint64(buffer[n:], uint64(session))\n\t\t\tn += 8\n\t\t}\n\t}\n\treturn buffer\n}", "func (*Context) Descriptor() ([]byte, []int) {\n\treturn file_ml_metadata_proto_metadata_store_proto_rawDescGZIP(), []int{8}\n}", "func UnmarshalJob(m map[string]json.RawMessage, result interface{}) (err error) {\n\tobj := new(Job)\n\terr = core.UnmarshalPrimitive(m, \"command_object\", &obj.CommandObject)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"command_object_id\", &obj.CommandObjectID)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"command_name\", &obj.CommandName)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"command_parameter\", &obj.CommandParameter)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"command_options\", &obj.CommandOptions)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalModel(m, \"inputs\", &obj.Inputs, UnmarshalVariableData)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalModel(m, \"settings\", &obj.Settings, UnmarshalVariableData)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"tags\", &obj.Tags)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"id\", &obj.ID)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"name\", &obj.Name)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"description\", &obj.Description)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"location\", &obj.Location)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"resource_group\", &obj.ResourceGroup)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"submitted_at\", &obj.SubmittedAt)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"submitted_by\", &obj.SubmittedBy)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"start_at\", &obj.StartAt)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"end_at\", &obj.EndAt)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"duration\", &obj.Duration)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalModel(m, \"status\", &obj.Status, UnmarshalJobStatus)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalModel(m, \"data\", &obj.Data, UnmarshalJobData)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"targets_ini\", &obj.TargetsIni)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalModel(m, \"bastion\", &obj.Bastion, UnmarshalTargetResourceset)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalModel(m, \"log_summary\", &obj.LogSummary, UnmarshalJobLogSummary)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"log_store_url\", &obj.LogStoreURL)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"state_store_url\", &obj.StateStoreURL)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"results_url\", &obj.ResultsURL)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"updated_at\", &obj.UpdatedAt)\n\tif err != nil {\n\t\treturn\n\t}\n\treflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))\n\treturn\n}", "func (*JobMessage) Descriptor() ([]byte, []int) {\n\treturn file_pkg_manager_http_grpc_job_proto_rawDescGZIP(), []int{0}\n}", "func (*JobId) Descriptor() ([]byte, []int) {\n\treturn file_proto_jobproc_worker_proto_rawDescGZIP(), []int{0}\n}", "func (this *L0JobContext) Copy(request string) JobContext {\n\treturn &L0JobContext{\n\t\trequest: request,\n\t\tjobID: this.jobID,\n\t\tlogic: this.logic,\n\t\tloadBalancerLogic: this.loadBalancerLogic,\n\t\tserviceLogic: this.serviceLogic,\n\t\tenvironmentLogic: this.environmentLogic,\n\t\tmutex: this.mutex,\n\t}\n}", "func encodeCreateJobFunctionResponse(_ context.Context, response interface{}) (interface{}, error) {\n\tres := response.(endpoints.CreateJobFunctionResponse)\n\terr := getError(res.Err)\n\tif err == nil {\n\t\treturn res.JobFunction.ToProto(), nil\n\t}\n\treturn nil, err\n}", "func Context(sc jaeger.SpanContext) zapcore.Field {\n\treturn zap.Object(\"context\", spanContext(sc))\n}", "func ConnectionStateExportKeyingMaterial(cs *tls.ConnectionState, label string, context []byte, length int) ([]byte, error)", "func (state RequestContextBatchState) Marshal() ([]byte, error) {\n\treturn []byte{byte(state)}, nil\n}", "func (_obj *Apichannels) Channels_exportMessageLinkWithContext(tarsCtx context.Context, params *TLchannels_exportMessageLink, _opt ...map[string]string) (ret ExportedMessageLink, err error) {\n\n\tvar length int32\n\tvar have bool\n\tvar ty byte\n\t_os := codec.NewBuffer()\n\terr = params.WriteBlock(_os, 1)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tvar _status map[string]string\n\tvar _context map[string]string\n\tif len(_opt) == 1 {\n\t\t_context = _opt[0]\n\t} else if len(_opt) == 2 {\n\t\t_context = _opt[0]\n\t\t_status = _opt[1]\n\t}\n\t_resp := new(requestf.ResponsePacket)\n\n\terr = _obj.s.Tars_invoke(tarsCtx, 0, \"channels_exportMessageLink\", _os.ToBytes(), _status, _context, _resp)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\t_is := codec.NewReader(tools.Int8ToByte(_resp.SBuffer))\n\terr = ret.ReadBlock(_is, 0, true)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tif len(_opt) == 1 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t} else if len(_opt) == 2 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t\tfor k := range _status {\n\t\t\tdelete(_status, k)\n\t\t}\n\t\tfor k, v := range _resp.Status {\n\t\t\t_status[k] = v\n\t\t}\n\n\t}\n\t_ = length\n\t_ = have\n\t_ = ty\n\treturn ret, nil\n}", "func (t *Template) AddB64Ctx(key keys.ContextB64, value interface{}) error {\n\tvalueBytes := []byte(fmt.Sprint(value))\n\tvalueB64 := base64.StdEncoding.EncodeToString(valueBytes)\n\treturn t.AddPairToVec(keys.ContextVec, string(key), valueB64)\n}", "func (decoder *EbpfDecoder) DecodeContext(ctx *Context) error {\n\toffset := decoder.cursor\n\tif len(decoder.buffer[offset:]) < 120 {\n\t\treturn fmt.Errorf(\"can't read context from buffer: buffer too short\")\n\t}\n\tctx.Ts = binary.LittleEndian.Uint64(decoder.buffer[offset : offset+8])\n\tctx.StartTime = binary.LittleEndian.Uint64(decoder.buffer[offset+8 : offset+16])\n\tctx.CgroupID = binary.LittleEndian.Uint64(decoder.buffer[offset+16 : offset+24])\n\tctx.Pid = binary.LittleEndian.Uint32(decoder.buffer[offset+24 : offset+28])\n\tctx.Tid = binary.LittleEndian.Uint32(decoder.buffer[offset+28 : offset+32])\n\tctx.Ppid = binary.LittleEndian.Uint32(decoder.buffer[offset+32 : offset+36])\n\tctx.HostPid = binary.LittleEndian.Uint32(decoder.buffer[offset+36 : offset+40])\n\tctx.HostTid = binary.LittleEndian.Uint32(decoder.buffer[offset+40 : offset+44])\n\tctx.HostPpid = binary.LittleEndian.Uint32(decoder.buffer[offset+44 : offset+48])\n\tctx.Uid = binary.LittleEndian.Uint32(decoder.buffer[offset+48 : offset+52])\n\tctx.MntID = binary.LittleEndian.Uint32(decoder.buffer[offset+52 : offset+56])\n\tctx.PidID = binary.LittleEndian.Uint32(decoder.buffer[offset+56 : offset+60])\n\t_ = copy(ctx.Comm[:], decoder.buffer[offset+60:offset+76])\n\t_ = copy(ctx.UtsName[:], decoder.buffer[offset+76:offset+92])\n\tctx.Flags = binary.LittleEndian.Uint32(decoder.buffer[offset+92 : offset+96])\n\tctx.EventID = events.ID(int32(binary.LittleEndian.Uint32(decoder.buffer[offset+96 : offset+100])))\n\t// offset 100:103 is used for padding\n\tctx.Retval = int64(binary.LittleEndian.Uint64(decoder.buffer[offset+104 : offset+112]))\n\tctx.StackID = binary.LittleEndian.Uint32(decoder.buffer[offset+112 : offset+116])\n\tctx.ProcessorId = binary.LittleEndian.Uint16(decoder.buffer[offset+116 : offset+118])\n\tctx.Argnum = uint8(binary.LittleEndian.Uint16(decoder.buffer[offset+118 : offset+120]))\n\tdecoder.cursor += int(ctx.GetSizeBytes())\n\treturn nil\n}", "func GetClientContextPayload(ctx context.Context) map[string]interface{} {\n\tif ctx == nil {\n\t\treturn make(map[string]interface{})\n\t}\n\n\tif v := ctx.Value(clientPayloadKey); v != nil {\n\t\treturn v.(clientPayload).m\n\t}\n\n\treturn make(map[string]interface{})\n}", "func messageIDFromContext(c echo.Context) (messageID tangle.MessageID, err error) {\n\tswitch messageIDString := c.Param(\"messageID\"); messageIDString {\n\tcase \"EmptyMessageID\":\n\t\tmessageID = tangle.EmptyMessageID\n\tdefault:\n\t\tmessageID, err = tangle.NewMessageID(messageIDString)\n\t}\n\n\treturn\n}", "func (p *protocolACKN) Context() context.Context {\n\treturn p.ctx\n}", "func handler(ctx context.Context, request events.APIGatewayProxyRequest) (events.APIGatewayProxyResponse, error) {\n\tfmt.Printf(\"My request: %+v\", request)\n\tfmt.Printf(\"My context: %+v\", ctx)\n\n\tlc, ok := lambdacontext.FromContext(ctx)\n\tif !ok {\n\t\treturn events.APIGatewayProxyResponse{\n\t\t\tStatusCode: http.StatusBadGateway,\n\t\t\tBody: \"Something went wrong :(\",\n\t\t}, nil\n\t}\n\n\tout := &struct {\n\t\tHeaders map[string]string `json:\"headers,omitempty\"`\n\t\tClientContext struct {\n\t\t\tClientInfo struct {\n\t\t\t\tAppPackageName string `json:\"app_package_name,omitempty\"`\n\t\t\t\tAppTitle string `json:\"app_title,omitempty\"`\n\t\t\t\tAppVersionCode string `json:\"app_version_code,omitempty\"`\n\t\t\t\tInstallationID string `json:\"installation_id,omitempty\"`\n\t\t\t} `json:\"client_info,omitemtpy,omitempty\"`\n\t\t\tCustom map[string]string `json:\"custom,omitempty\"`\n\t\t\tEnv map[string]string `json:\"env,omitempty\"`\n\t\t} `json:\"client_context,omitempty\"`\n\t\tAWSInfo struct {\n\t\t\tAwsRequestID string `json:\"aws_request_id,omitempty\"`\n\t\t\tInvokedFunctionArn string `json:\"invoked_function_arn,omitempty\"`\n\t\t} `json:\"aws_info,omitempty\"`\n\t\tNetlify map[string]interface{} `json:\"netlify,omitempty\"`\n\t}{}\n\n\tout.Headers = request.Headers\n\tout.ClientContext.Env = lc.ClientContext.Env\n\tout.ClientContext.Custom = lc.ClientContext.Custom\n\tout.ClientContext.ClientInfo.AppPackageName = lc.ClientContext.Client.AppPackageName\n\tout.ClientContext.ClientInfo.AppTitle = lc.ClientContext.Client.AppTitle\n\tout.ClientContext.ClientInfo.AppVersionCode = lc.ClientContext.Client.AppVersionCode\n\tout.ClientContext.ClientInfo.InstallationID = lc.ClientContext.Client.InstallationID\n\tout.AWSInfo.AwsRequestID = lc.AwsRequestID\n\tout.AWSInfo.InvokedFunctionArn = lc.InvokedFunctionArn\n\n\t// the netlify struct is encoded\n\tnfstr, ok := lc.ClientContext.Custom[\"netlify\"]\n\tif ok {\n\t\tfmt.Println(\"Found netlify context\")\n\t\tdecoded, err := base64.StdEncoding.DecodeString(nfstr)\n\t\tif err != nil {\n\t\t\treturn events.APIGatewayProxyResponse{\n\t\t\t\tStatusCode: http.StatusBadRequest,\n\t\t\t\tBody: err.Error(),\n\t\t\t}, nil\n\t\t}\n\t\tparsed := make(map[string]interface{})\n\t\tif err := json.Unmarshal(decoded, &parsed); err != nil {\n\t\t\treturn events.APIGatewayProxyResponse{\n\t\t\t\tStatusCode: http.StatusBadRequest,\n\t\t\t\tBody: err.Error(),\n\t\t\t}, nil\n\t\t}\n\t\tout.Netlify = parsed\n\t}\n\n\tfmt.Printf(\"Marshaling the output: %+v\\n\", out)\n\tbs, err := json.Marshal(out)\n\tif err != nil {\n\t\treturn events.APIGatewayProxyResponse{\n\t\t\tStatusCode: http.StatusInternalServerError,\n\t\t\tBody: err.Error(),\n\t\t}, nil\n\t}\n\tfmt.Println(\"It all seems good\")\n\treturn events.APIGatewayProxyResponse{\n\t\tStatusCode: http.StatusOK,\n\t\tBody: string(bs),\n\t}, nil\n}", "func toProtoJob(e *ent.Job) (*Job, error) {\n\tv := &Job{}\n\tbenefits := e.Benefits\n\tv.Benefits = benefits\n\tcreatedat := timestamppb.New(e.CreatedAt)\n\tv.CreatedAt = createdat\n\temploymenttype := toProtoJob_EmploymentType(e.EmploymentType)\n\tv.EmploymentType = employmenttype\n\tid := int32(e.ID)\n\tv.Id = id\n\tmaxsalary := e.MaxSalary\n\tv.MaxSalary = maxsalary\n\tminsalary := e.MinSalary\n\tv.MinSalary = minsalary\n\trequirements := e.Requirements\n\tv.Requirements = requirements\n\tresponsibilities := e.Responsibilities\n\tv.Responsibilities = responsibilities\n\tsalaryunit := toProtoJob_SalaryUnit(e.SalaryUnit)\n\tv.SalaryUnit = salaryunit\n\ttitle := e.Title\n\tv.Title = title\n\tupdatedat := timestamppb.New(e.UpdatedAt)\n\tv.UpdatedAt = updatedat\n\tif edg := e.Edges.Owner; edg != nil {\n\t\tid := int32(edg.ID)\n\t\tv.Owner = &User{\n\t\t\tId: id,\n\t\t}\n\t}\n\tfor _, edg := range e.Edges.Skills {\n\t\tid := int32(edg.ID)\n\t\tv.Skills = append(v.Skills, &Skill{\n\t\t\tId: id,\n\t\t})\n\t}\n\treturn v, nil\n}", "func (clientHandler) Write(ctx context.Context, bytes int64) context.Context { return ctx }", "func (*Job) Descriptor() ([]byte, []int) {\n\treturn file_proto_jobproc_worker_proto_rawDescGZIP(), []int{2}\n}", "func (rec *RawEventCreate) SetContext(m map[string]interface{}) *RawEventCreate {\n\trec.mutation.SetContext(m)\n\treturn rec\n}", "func (*Context) Descriptor() ([]byte, []int) {\n\treturn file_ric_action_ricaction_proto_rawDescGZIP(), []int{1}\n}", "func encodeJob(encoder *gob.Encoder, job CronJob) error {\n\treturn encoder.Encode(&job)\n}", "func (o *SendJobCommandParams) WithContext(ctx context.Context) *SendJobCommandParams {\n\to.SetContext(ctx)\n\treturn o\n}", "func createEncryptionContext(msgMeta *pb.MessageMetadata) *EncryptionContext {\n\tencCtx := EncryptionContext{\n\t\tAlgorithm: msgMeta.GetEncryptionAlgo(),\n\t\tParam: msgMeta.GetEncryptionParam(),\n\t\tUncompressedSize: int(msgMeta.GetUncompressedSize()),\n\t\tBatchSize: int(msgMeta.GetNumMessagesInBatch()),\n\t}\n\n\tif msgMeta.Compression != nil {\n\t\tencCtx.CompressionType = CompressionType(*msgMeta.Compression)\n\t}\n\n\tkeyMap := map[string]EncryptionKey{}\n\tfor _, k := range msgMeta.GetEncryptionKeys() {\n\t\tmetaMap := map[string]string{}\n\t\tfor _, m := range k.GetMetadata() {\n\t\t\tmetaMap[*m.Key] = *m.Value\n\t\t}\n\n\t\tkeyMap[*k.Key] = EncryptionKey{\n\t\t\tKeyValue: k.GetValue(),\n\t\t\tMetadata: metaMap,\n\t\t}\n\t}\n\n\tencCtx.Keys = keyMap\n\treturn &encCtx\n}", "func (*ParentContext) Descriptor() ([]byte, []int) {\n\treturn file_ml_metadata_proto_metadata_store_proto_rawDescGZIP(), []int{11}\n}", "func copyJobToHookJob(src common.Job) (dst HookJob) {\n\tdst.ID = src.UUID\n\tdst.Name = src.Name\n\tdst.Status = src.Status\n\tdst.Owner = src.Owner\n\tdst.StartTime = src.StartTime\n\tdst.CrackedHashes = src.CrackedHashes\n\tdst.TotalHashes = src.TotalHashes\n\tdst.Progress = src.Progress\n\tdst.Params = src.Parameters\n\tdst.ToolID = src.ToolUUID\n\tdst.PerformanceTitle = src.PerformanceTitle\n\tdst.PerformanceData = src.PerformanceData\n\tdst.OutputTitles = src.OutputTitles\n\tdst.OutputData = src.OutputData\n\n\treturn dst\n}", "func NewContext() {\n\tif messageQueue != nil {\n\t\treturn\n\t}\n\n\tmessageQueue = make(chan *Message, messageQueueLength)\n\tgo processMessageQueue()\n}", "func AddToClientContextPayload(ctx context.Context, key string, value interface{}) {\n\tif value != nil {\n\t\tGetClientContextPayload(ctx)[key] = value\n\t}\n}", "func (state RequestContextState) Marshal() ([]byte, error) {\n\treturn []byte{byte(state)}, nil\n}", "func BodyFrom(ctx context.Context) []byte {\n\tv, _ := ctx.Value(keyBody).([]byte)\n\treturn v\n}", "func GetServerContextPayload(ctx context.Context) map[string]interface{} {\n\tif ctx == nil {\n\t\treturn make(map[string]interface{})\n\t}\n\n\tif v := ctx.Value(serverPayloadKey); v != nil {\n\t\treturn v.(serverPayload).m\n\t}\n\n\treturn make(map[string]interface{})\n}", "func (*JobResources) Descriptor() ([]byte, []int) {\n\treturn file_toit_model_job_proto_rawDescGZIP(), []int{3}\n}", "func encodeCreateJobPlatformResponse(_ context.Context, response interface{}) (interface{}, error) {\n\tres := response.(endpoints.CreateJobPlatformResponse)\n\terr := getError(res.Err)\n\tif err == nil {\n\t\treturn res.JobPlatform.ToProto(), nil\n\t}\n\treturn nil, err\n}", "func (s CreateJobInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/json\"), protocol.Metadata{})\n\n\tif s.HopDestinations != nil {\n\t\tv := s.HopDestinations\n\n\t\tmetadata := protocol.Metadata{}\n\t\tls0 := e.List(protocol.BodyTarget, \"hopDestinations\", metadata)\n\t\tls0.Start()\n\t\tfor _, v1 := range v {\n\t\t\tls0.ListAddFields(v1)\n\t\t}\n\t\tls0.End()\n\t}\n\tif s.AccelerationSettings != nil {\n\t\tv := s.AccelerationSettings\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"accelerationSettings\", v, metadata)\n\t}\n\tif len(s.BillingTagsSource) > 0 {\n\t\tv := s.BillingTagsSource\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"billingTagsSource\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tvar ClientRequestToken string\n\tif s.ClientRequestToken != nil {\n\t\tClientRequestToken = *s.ClientRequestToken\n\t} else {\n\t\tClientRequestToken = protocol.GetIdempotencyToken()\n\t}\n\t{\n\t\tv := ClientRequestToken\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"clientRequestToken\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.JobTemplate != nil {\n\t\tv := *s.JobTemplate\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"jobTemplate\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Priority != nil {\n\t\tv := *s.Priority\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"priority\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.Queue != nil {\n\t\tv := *s.Queue\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"queue\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Role != nil {\n\t\tv := *s.Role\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"role\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Settings != nil {\n\t\tv := s.Settings\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"settings\", v, metadata)\n\t}\n\tif len(s.SimulateReservedQueue) > 0 {\n\t\tv := s.SimulateReservedQueue\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"simulateReservedQueue\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif len(s.StatusUpdateInterval) > 0 {\n\t\tv := s.StatusUpdateInterval\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"statusUpdateInterval\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.Tags != nil {\n\t\tv := s.Tags\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"tags\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\tif s.UserMetadata != nil {\n\t\tv := s.UserMetadata\n\n\t\tmetadata := protocol.Metadata{}\n\t\tms0 := e.Map(protocol.BodyTarget, \"userMetadata\", metadata)\n\t\tms0.Start()\n\t\tfor k1, v1 := range v {\n\t\t\tms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)})\n\t\t}\n\t\tms0.End()\n\n\t}\n\treturn nil\n}", "func WithBody(ctx context.Context, v []byte) context.Context {\n\treturn context.WithValue(ctx, keyBody, v)\n}", "func execmRawMessageMarshalJSON(_ int, p *gop.Context) {\n\targs := p.GetArgs(1)\n\tret, ret1 := args[0].(json.RawMessage).MarshalJSON()\n\tp.Ret(1, ret, ret1)\n}", "func SerializeJob(job *work.Job) ([]byte, error) {\n\treturn json.Marshal(job)\n}", "func (s MqttContext) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ClientId != nil {\n\t\tv := *s.ClientId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"clientId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.Password != nil {\n\t\tv := s.Password\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"password\", protocol.QuotedValue{ValueMarshaler: protocol.BytesValue(v)}, metadata)\n\t}\n\tif s.Username != nil {\n\t\tv := *s.Username\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"username\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func createJobWithCustomAttributes(w io.Writer, projectID, companyID, jobTitle string) (*talentpb.Job, error) {\n\tctx := context.Background()\n\n\t// Initialize a job service client.\n\tc, err := talent.NewJobClient(ctx)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"talent.NewJobClient: %w\", err)\n\t}\n\tdefer c.Close()\n\n\t// requisitionID shoud be the unique ID in your system\n\trequisitionID := fmt.Sprintf(\"job-with-custom-attribute-%s\", uuid.Must(uuid.NewV4()).String())\n\tjobToCreate := &talentpb.Job{\n\t\tCompany: fmt.Sprintf(\"projects/%s/companies/%s\", projectID, companyID),\n\t\tRequisitionId: requisitionID,\n\t\tTitle: jobTitle,\n\t\tApplicationInfo: &talentpb.Job_ApplicationInfo{\n\t\t\tUris: []string{\"https://googlesample.com/career\"},\n\t\t},\n\t\tDescription: \"Design, devolop, test, deploy, maintain and improve software.\",\n\t\tLanguageCode: \"en-US\",\n\t\tPromotionValue: 2,\n\t\tEmploymentTypes: []talentpb.EmploymentType{talentpb.EmploymentType_FULL_TIME},\n\t\tAddresses: []string{\"Mountain View, CA\"},\n\t\tCustomAttributes: map[string]*talentpb.CustomAttribute{\n\t\t\t\"someFieldString\": {\n\t\t\t\tFilterable: true,\n\t\t\t\tStringValues: []string{\"someStrVal\"},\n\t\t\t},\n\t\t\t\"someFieldLong\": {\n\t\t\t\tFilterable: true,\n\t\t\t\tLongValues: []int64{900},\n\t\t\t},\n\t\t},\n\t\tCompensationInfo: &talentpb.CompensationInfo{\n\t\t\tEntries: []*talentpb.CompensationInfo_CompensationEntry{\n\t\t\t\t{\n\t\t\t\t\tType: talentpb.CompensationInfo_BASE,\n\t\t\t\t\tUnit: talentpb.CompensationInfo_HOURLY,\n\t\t\t\t\tCompensationAmount: &talentpb.CompensationInfo_CompensationEntry_Amount{\n\t\t\t\t\t\tAmount: &money.Money{\n\t\t\t\t\t\t\tCurrencyCode: \"USD\",\n\t\t\t\t\t\t\tUnits: 1,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\t// Construct a createJob request.\n\treq := &talentpb.CreateJobRequest{\n\t\tParent: fmt.Sprintf(\"projects/%s\", projectID),\n\t\tJob: jobToCreate,\n\t}\n\n\tresp, err := c.CreateJob(ctx, req)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"CreateJob: %w\", err)\n\t}\n\n\tfmt.Fprintf(w, \"Created job with custom attributres: %q\\n\", resp.GetName())\n\tfmt.Fprintf(w, \"Custom long field has value: %v\\n\", resp.GetCustomAttributes()[\"someFieldLong\"].GetLongValues())\n\n\treturn resp, nil\n}", "func jobKey(j *pps.Job) string {\n\treturn fmt.Sprintf(\"%s@%s\", j.Pipeline, j.Id)\n}", "func (c *Context) Body() []byte {\n\treturn c.Request.Body()\n}", "func (*JobDetail) Descriptor() ([]byte, []int) {\n\treturn file_proto_jobproc_worker_proto_rawDescGZIP(), []int{3}\n}", "func (_obj *DataService) InsertMessageOneWayWithContext(tarsCtx context.Context, msg *Message, _opt ...map[string]string) (ret int32, err error) {\n\n\tvar length int32\n\tvar have bool\n\tvar ty byte\n\t_os := codec.NewBuffer()\n\terr = msg.WriteBlock(_os, 1)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tvar _status map[string]string\n\tvar _context map[string]string\n\tif len(_opt) == 1 {\n\t\t_context = _opt[0]\n\t} else if len(_opt) == 2 {\n\t\t_context = _opt[0]\n\t\t_status = _opt[1]\n\t}\n\t_resp := new(requestf.ResponsePacket)\n\n\terr = _obj.s.Tars_invoke(tarsCtx, 1, \"insertMessage\", _os.ToBytes(), _status, _context, _resp)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tif len(_opt) == 1 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t} else if len(_opt) == 2 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t\tfor k := range _status {\n\t\t\tdelete(_status, k)\n\t\t}\n\t\tfor k, v := range _resp.Status {\n\t\t\t_status[k] = v\n\t\t}\n\n\t}\n\t_ = length\n\t_ = have\n\t_ = ty\n\treturn ret, nil\n}", "func (m *JobsJob) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.contextValidateActions(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateContextMetaFilter(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateDataSourceFilter(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateHooks(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateIdmFilter(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateMergeAction(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateNodeEventFilter(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateParameters(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateResourcesDependencies(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateSchedule(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateTasks(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateUserEventFilter(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func ExtendPlushContext(ctx *plush.Context) {\n\tctx.Set(\"castArgs\", CastArgs)\n\tctx.Set(\"genValidArg\", GenerateValidArg)\n\tctx.Set(\"castToBytes\", CastToBytes)\n}", "func ObjectMultipartFeatureContext(s *godog.Suite) {\n\ts.Step(`^initiate multipart upload with key \"(.{1,})\"$`, initiateMultipartUploadWithKey)\n\ts.Step(`^initiate multipart upload status code is (\\d+)$`, initiateMultipartUploadStatusCodeIs)\n\n\ts.Step(`^upload the first part with key \"(.{1,})\"$`, uploadTheFirstPartWithKey)\n\ts.Step(`^upload the first part status code is (\\d+)$`, uploadTheFirstPartStatusCodeIs)\n\ts.Step(`^upload the second part with key \"(.{1,})\"$`, uploadTheSecondPartWithKey)\n\ts.Step(`^upload the second part status code is (\\d+)$`, uploadTheSecondPartStatusCodeIs)\n\ts.Step(`^upload the third part with key \"(.{1,})\"$`, uploadTheThirdPartWithKey)\n\ts.Step(`^upload the third part status code is (\\d+)$`, uploadTheThirdPartStatusCodeIs)\n\n\ts.Step(`^list multipart with key \"(.{1,})\"$`, listMultipartWithKey)\n\ts.Step(`^list multipart status code is (\\d+)$`, listMultipartStatusCodeIs)\n\ts.Step(`^list multipart object parts count is (\\d+)$`, listMultipartObjectPartsCountIs)\n\n\ts.Step(`^complete multipart upload with key \"(.{1,})\"$`, completeMultipartUploadWithKey)\n\ts.Step(`^complete multipart upload status code is (\\d+)$`, completeMultipartUploadStatusCodeIs)\n\n\ts.Step(`^abort multipart upload with key \"(.{1,})\"$`, abortMultipartUploadWithKey)\n\ts.Step(`^abort multipart upload status code is (\\d+)$`, abortMultipartUploadStatusCodeIs)\n\n\ts.Step(`^delete the multipart object with key \"(.{1,})\"$`, deleteTheMultipartObjectWithKey)\n\ts.Step(`^delete the multipart object status code is (\\d+)$`, deleteTheMultipartObjectStatusCodeIs)\n}", "func (tc ConcreteTemplatesCompiler) compileJob(job bpdep.Job, instance bpdep.Instance) (string, string, error) {\n\tjobReaders, err := tc.buildJobReaders(job)\n\tif err != nil {\n\t\treturn \"\", \"\", bosherr.WrapError(err, \"Building job readers\")\n\t}\n\n\tvar relJobs []bpreljob.Job\n\n\tfor _, jobReader := range jobReaders {\n\t\trelJob, err := jobReader.tarReader.Read()\n\t\tif err != nil {\n\t\t\treturn \"\", \"\", bosherr.WrapError(err, \"Reading job\")\n\t\t}\n\n\t\tdefer jobReader.tarReader.Close()\n\n\t\terr = tc.associatePackages(jobReader.rec, relJob)\n\t\tif err != nil {\n\t\t\treturn \"\", \"\", bosherr.WrapError(err, \"Preparing runtime dep packages\")\n\t\t}\n\t\trelJob.DeploymentJobTemplates = job.Templates\n\n\t\trelJobs = append(relJobs, relJob)\n\t}\n\n\trenderedArchivePath, err := tc.renderedArchivesCompiler.Compile(relJobs, instance)\n\tif err != nil {\n\t\treturn \"\", \"\", bosherr.WrapError(err, \"Compiling templates\")\n\t}\n\n\tdefer tc.renderedArchivesCompiler.CleanUp(renderedArchivePath)\n\n\tblobID, fingerprint, err := tc.blobstore.Create(renderedArchivePath)\n\tif err != nil {\n\t\treturn \"\", \"\", bosherr.WrapError(err, \"Creating compiled templates\")\n\t}\n\n\treturn blobID, fingerprint, nil\n}", "func (o CiphertextOutput) Context() pulumi.StringMapOutput {\n\treturn o.ApplyT(func(v *Ciphertext) pulumi.StringMapOutput { return v.Context }).(pulumi.StringMapOutput)\n}", "func (*JobID) Descriptor() ([]byte, []int) {\n\treturn file_api_ops_proto_rawDescGZIP(), []int{6}\n}", "func bindContext(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {\n\tparsed, err := abi.JSON(strings.NewReader(ContextABI))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil\n}", "func bindContext(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {\n\tparsed, err := abi.JSON(strings.NewReader(ContextABI))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil\n}", "func bindContext(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {\n\tparsed, err := abi.JSON(strings.NewReader(ContextABI))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil\n}", "func bindContext(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {\n\tparsed, err := abi.JSON(strings.NewReader(ContextABI))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil\n}", "func AddToServerContextPayload(ctx context.Context, key string, value interface{}) {\n\tif value != nil {\n\t\tGetServerContextPayload(ctx)[key] = value\n\t}\n}", "func (a Put) Marshal(tk *destination.Toolkit) (*destination.Job, error) {\n\n\t// Try to marshal the data passed directly to the receiver.\n\tdata, err := json.Marshal(&a)\n\tif err != nil {\n\t\treturn nil, &errors.Error{\n\t\t\tStatusCode: 400,\n\t\t\tMessage: \"Bad Request\",\n\t\t}\n\t}\n\n\t// Create a job with the data. Since the 'Context' key is not\n\t// set, the one from the event will automatically be applied.\n\tj := &destination.Job{\n\t\tData: data,\n\t}\n\n\t// Return the job including the marshaled data.\n\treturn j, nil\n}", "func (_obj *DataService) CreateActivityRecordWithContext(tarsCtx context.Context, wx_id string, activity_id string, _opt ...map[string]string) (ret int32, err error) {\n\n\tvar length int32\n\tvar have bool\n\tvar ty byte\n\t_os := codec.NewBuffer()\n\terr = _os.Write_string(wx_id, 1)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\terr = _os.Write_string(activity_id, 2)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tvar _status map[string]string\n\tvar _context map[string]string\n\tif len(_opt) == 1 {\n\t\t_context = _opt[0]\n\t} else if len(_opt) == 2 {\n\t\t_context = _opt[0]\n\t\t_status = _opt[1]\n\t}\n\t_resp := new(requestf.ResponsePacket)\n\n\terr = _obj.s.Tars_invoke(tarsCtx, 0, \"createActivityRecord\", _os.ToBytes(), _status, _context, _resp)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\t_is := codec.NewReader(tools.Int8ToByte(_resp.SBuffer))\n\terr = _is.Read_int32(&ret, 0, true)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tif len(_opt) == 1 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t} else if len(_opt) == 2 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t\tfor k := range _status {\n\t\t\tdelete(_status, k)\n\t\t}\n\t\tfor k, v := range _resp.Status {\n\t\t\t_status[k] = v\n\t\t}\n\n\t}\n\t_ = length\n\t_ = have\n\t_ = ty\n\treturn ret, nil\n}", "func testContext(t *testing.T) *context.Context {\n\tbucketName := []byte(\"jobs_test\")\n\tdb := testDB(t)\n\tdb.Update(func(tx *bolt.Tx) error {\n\t\t_, err := tx.CreateBucketIfNotExists(bucketName)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\treturn nil\n\t})\n\treturn context.New(bucketName, db)\n}", "func (*JobInfo) Descriptor() ([]byte, []int) {\n\treturn file_toit_model_job_proto_rawDescGZIP(), []int{22}\n}", "func (*CombinePayload) Descriptor() ([]byte, []int) {\n\treturn file_org_apache_beam_model_pipeline_v1_beam_runner_api_proto_rawDescGZIP(), []int{21}\n}", "func JobSubmitHandler(c *Context, w http.ResponseWriter, r *http.Request) {\n\ttype Request struct {\n\t\tJobs []Job `json:\"jobs\"`\n\t}\n\n\ttype Response struct {\n\t\tJIDs []uint64 `json:\"jids\"`\n\t}\n\n\taccount, err := Authenticate(c, w, r)\n\tif err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"error\": err,\n\t\t}).Error(\"Authentication failure.\")\n\t\treturn\n\t}\n\n\tvar req Request\n\terr = json.NewDecoder(r.Body).Decode(&req)\n\tif err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"error\": err,\n\t\t\t\"account\": account.Name,\n\t\t}).Error(\"Unable to parse JSON.\")\n\n\t\tRhoError{\n\t\t\tCode: CodeInvalidJobJSON,\n\t\t\tMessage: fmt.Sprintf(\"Unable to parse job payload as JSON: %v\", err),\n\t\t\tHint: \"Please supply valid JSON in your request.\",\n\t\t\tRetry: false,\n\t\t}.Report(http.StatusBadRequest, w)\n\t\treturn\n\t}\n\n\tjids := make([]uint64, len(req.Jobs))\n\tfor index, job := range req.Jobs {\n\t\t// Validate the job.\n\t\tif err := job.Validate(); err != nil {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"account\": account.Name,\n\t\t\t\t\"job\": job,\n\t\t\t\t\"error\": err,\n\t\t\t}).Error(\"Invalid job submitted.\")\n\n\t\t\terr.Report(http.StatusBadRequest, w)\n\t\t\treturn\n\t\t}\n\n\t\t// Pack the job into a SubmittedJob and store it.\n\t\tsubmitted := SubmittedJob{\n\t\t\tJob: job,\n\t\t\tCreatedAt: StoreTime(time.Now()),\n\t\t\tStatus: StatusQueued,\n\t\t\tAccount: account.Name,\n\t\t}\n\t\tjid, err := c.InsertJob(submitted)\n\t\tif err != nil {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"account\": account.Name,\n\t\t\t\t\"error\": err,\n\t\t\t}).Error(\"Unable to enqueue a submitted job.\")\n\n\t\t\tRhoError{\n\t\t\t\tCode: CodeEnqueueFailure,\n\t\t\t\tMessage: \"Unable to enqueue your job.\",\n\t\t\t\tRetry: true,\n\t\t\t}.Report(http.StatusServiceUnavailable, w)\n\t\t\treturn\n\t\t}\n\n\t\tjids[index] = jid\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"jid\": jid,\n\t\t\t\"job\": job,\n\t\t\t\"account\": account.Name,\n\t\t}).Info(\"Successfully submitted a job.\")\n\t}\n\n\tresponse := Response{JIDs: jids}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tjson.NewEncoder(w).Encode(response)\n}", "func prepareJSONPayload(rawReq *GenericRequest) (*bytes.Buffer, error) {\n\t// When payload ready, convert it to Json format\n\tbReqData, err := json.Marshal(&rawReq)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// @TODO Debug print marshal body\n\tfmt.Println(\"RAW Marshal BODY \" + string(bReqData))\n\n\t// Write json object to buffer\n\tbuffer := bytes.NewBuffer(bReqData)\n\n\treturn buffer, nil\n}", "func (c *JobsCreateCall) Context(ctx context.Context) *JobsCreateCall {\n\tc.ctx_ = ctx\n\treturn c\n}", "func (m *JobJob) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.contextValidateFilament(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateFile(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func ReadContext(path string) (*fintpb.Context, error) {\n\tbytes, err := os.ReadFile(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar message fintpb.Context\n\tif err := prototext.Unmarshal(bytes, &message); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &message, nil\n}", "func SendingContext(ctx context.Context, tctx cloudevents.HTTPTransportContext, targetURI *url.URL) context.Context {\n\tsendingCTX := cloudevents.ContextWithTarget(ctx, targetURI.String())\n\n\th := ExtractPassThroughHeaders(tctx)\n\tfor n, v := range h {\n\t\tfor _, iv := range v {\n\t\t\tsendingCTX = cloudevents.ContextWithHeader(sendingCTX, n, iv)\n\t\t}\n\t}\n\n\treturn sendingCTX\n}", "func (*JobRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_jobproc_worker_proto_rawDescGZIP(), []int{1}\n}", "func (w *Worker) Context() context.Context {\n\treturn w.ctx\n}", "func (*JobFeatures) Descriptor() ([]byte, []int) {\n\treturn file_toit_model_job_proto_rawDescGZIP(), []int{4}\n}", "func (s GetJobInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.AppId != nil {\n\t\tv := *s.AppId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"appId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.BranchName != nil {\n\t\tv := *s.BranchName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"branchName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.JobId != nil {\n\t\tv := *s.JobId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"jobId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}", "func (c *client) exportContext(parent context.Context) (context.Context, context.CancelFunc) {\n\tvar (\n\t\tctx context.Context\n\t\tcancel context.CancelFunc\n\t)\n\n\tif c.exportTimeout > 0 {\n\t\tctx, cancel = context.WithTimeout(parent, c.exportTimeout)\n\t} else {\n\t\tctx, cancel = context.WithCancel(parent)\n\t}\n\n\tif c.metadata.Len() > 0 {\n\t\tctx = metadata.NewOutgoingContext(ctx, c.metadata)\n\t}\n\n\treturn ctx, cancel\n}", "func (s JobExecution) MarshalFields(e protocol.FieldEncoder) error {\n\tif s.ApproximateSecondsBeforeTimedOut != nil {\n\t\tv := *s.ApproximateSecondsBeforeTimedOut\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"approximateSecondsBeforeTimedOut\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.ExecutionNumber != nil {\n\t\tv := *s.ExecutionNumber\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"executionNumber\", protocol.Int64Value(v), metadata)\n\t}\n\tif s.ForceCanceled != nil {\n\t\tv := *s.ForceCanceled\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"forceCanceled\", protocol.BoolValue(v), metadata)\n\t}\n\tif s.JobId != nil {\n\t\tv := *s.JobId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"jobId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.LastUpdatedAt != nil {\n\t\tv := *s.LastUpdatedAt\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"lastUpdatedAt\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.QueuedAt != nil {\n\t\tv := *s.QueuedAt\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"queuedAt\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif s.StartedAt != nil {\n\t\tv := *s.StartedAt\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"startedAt\",\n\t\t\tprotocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata)\n\t}\n\tif len(s.Status) > 0 {\n\t\tv := s.Status\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"status\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.StatusDetails != nil {\n\t\tv := s.StatusDetails\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetFields(protocol.BodyTarget, \"statusDetails\", v, metadata)\n\t}\n\tif s.ThingArn != nil {\n\t\tv := *s.ThingArn\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"thingArn\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.VersionNumber != nil {\n\t\tv := *s.VersionNumber\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"versionNumber\", protocol.Int64Value(v), metadata)\n\t}\n\treturn nil\n}", "func (j *Job) Save(ctx context.Context) (err error) {\n\tt := utils.FromTaskContext(ctx)\n\n\tcontent, err := msgpack.Marshal(j)\n\tif err != nil {\n\t\treturn\n\t}\n\n\terr = contexts.DB.Put(constants.FormatJobKey(t, j.Path), content, nil)\n\tif err != nil {\n\t\treturn\n\t}\n\n\treturn\n}", "func (s StartJobInput) MarshalFields(e protocol.FieldEncoder) error {\n\te.SetValue(protocol.HeaderTarget, \"Content-Type\", protocol.StringValue(\"application/x-amz-json-1.1\"), protocol.Metadata{})\n\n\tif s.CommitId != nil {\n\t\tv := *s.CommitId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"commitId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.CommitMessage != nil {\n\t\tv := *s.CommitMessage\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"commitMessage\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.CommitTime != nil {\n\t\tv := *s.CommitTime\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"commitTime\", protocol.TimeValue{V: v, Format: protocol.UnixTimeFormat}, metadata)\n\t}\n\tif s.JobId != nil {\n\t\tv := *s.JobId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"jobId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.JobReason != nil {\n\t\tv := *s.JobReason\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"jobReason\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif len(s.JobType) > 0 {\n\t\tv := s.JobType\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.BodyTarget, \"jobType\", protocol.QuotedValue{ValueMarshaler: v}, metadata)\n\t}\n\tif s.AppId != nil {\n\t\tv := *s.AppId\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"appId\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\tif s.BranchName != nil {\n\t\tv := *s.BranchName\n\n\t\tmetadata := protocol.Metadata{}\n\t\te.SetValue(protocol.PathTarget, \"branchName\", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)\n\t}\n\treturn nil\n}" ]
[ "0.5882201", "0.5235766", "0.52184343", "0.5215826", "0.50911605", "0.5015357", "0.5003859", "0.4904247", "0.48560473", "0.4854567", "0.48491663", "0.48325953", "0.48104867", "0.48014134", "0.47813973", "0.4768167", "0.47045356", "0.46879327", "0.46729985", "0.46666056", "0.4653357", "0.46519908", "0.4647546", "0.46357688", "0.46340892", "0.4593928", "0.45735604", "0.4567454", "0.4565604", "0.45531383", "0.4549965", "0.45494837", "0.45345137", "0.45237538", "0.4521958", "0.44801182", "0.44757372", "0.44747648", "0.447149", "0.44613922", "0.4451195", "0.44418257", "0.44374037", "0.44356844", "0.44344488", "0.44270292", "0.4418756", "0.4415515", "0.4408488", "0.44077316", "0.4401351", "0.4397972", "0.43961853", "0.43936032", "0.43920752", "0.4388114", "0.4385354", "0.43838167", "0.43715715", "0.4370819", "0.4364661", "0.43493265", "0.43473613", "0.43472043", "0.4340572", "0.43368694", "0.4328788", "0.4306772", "0.42998663", "0.42980763", "0.42954794", "0.42880502", "0.42872524", "0.42799625", "0.42787108", "0.42718267", "0.42708862", "0.42708862", "0.42708862", "0.42708862", "0.42682913", "0.42682177", "0.42658433", "0.4264377", "0.42600775", "0.42559338", "0.4255914", "0.42493248", "0.4247552", "0.424608", "0.42434382", "0.42369735", "0.42299116", "0.4228852", "0.42112994", "0.42083964", "0.4202732", "0.42021015", "0.42011118", "0.41992122" ]
0.63743937
0
Convert is file convert command
func Convert(c *cli.Context) { var wg sync.WaitGroup for _, path := range c.Args() { wg.Add(1) go func(p string) { defer wg.Done() writeFile(p) }(path) } wg.Wait() }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func convertFile(path string, convBootstrap convArray, convAddresses convAddrs) error {\n\tin, err := os.Open(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Create a temp file to write the output to on success\n\tout, err := atomicfile.New(path, 0600)\n\tif err != nil {\n\t\tin.Close()\n\t\treturn err\n\t}\n\n\terr = convert(in, out, convBootstrap, convAddresses)\n\n\tin.Close()\n\n\tif err != nil {\n\t\t// There was an error so abort writing the output and clean up temp file\n\t\tout.Abort()\n\t} else {\n\t\t// Write the output and clean up temp file\n\t\tout.Close()\n\t}\n\n\treturn err\n}", "func Convert(filename, destExt string) error {\n\tsrcFile, err := os.Open(filename)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer srcFile.Close()\n\n\tswitch destExt {\n\tcase \"png\":\n\t\terr := convertToPNG(filename, srcFile)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\tcase \"jpeg\", \"jpg\":\n\t\terr := convertToJPEG(filename, srcFile)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\tcase \"gif\":\n\t\terr := convertToGIF(filename, srcFile)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func convertFile(path string, overwrite bool, importPath string, protocPath string) error {\n\tif filepath.Ext(path) != \".proto\" {\n\t\treturn fmt.Errorf(\"convert requires a .proto file\")\n\t}\n\tfile, err := os.Open(path)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to read file %q: %v\", path, err)\n\t}\n\tdefer file.Close()\n\tfilename := filepath.Base(path)\n\tfileToWrite := strings.Replace(filename, \".proto\", \".gunk\", 1)\n\tfullpath := filepath.Join(filepath.Dir(path), fileToWrite)\n\tif _, err := os.Stat(fullpath); !os.IsNotExist(err) && !overwrite {\n\t\treturn fmt.Errorf(\"path already exists %q, use --overwrite\", fullpath)\n\t}\n\tvar b bytes.Buffer\n\tif err := loader.ConvertFromProto(&b, file, filename, importPath, protocPath); err != nil {\n\t\treturn err\n\t}\n\tresult, err := format.Source(b.Bytes())\n\tif err != nil {\n\t\t// Also print the source being formatted, since the go/format\n\t\t// error often points at a specific error in one of its lines.\n\t\tfmt.Fprintln(os.Stderr, b.String())\n\t\treturn err\n\t}\n\tif err := ioutil.WriteFile(fullpath, result, 0o644); err != nil {\n\t\treturn fmt.Errorf(\"unable to write to file %q: %v\", fullpath, err)\n\t}\n\treturn nil\n}", "func Convert(source, target string) error {\n\tss, err := sources(source)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"looking for sources in %s\", source)\n\t}\n\tfor _, s := range ss {\n\t\tt := filepath.Join(target, chext(s, \".json\"))\n\t\tlog.Printf(\"Transforming %s to %s\", s, t)\n\t\terr := transform(s, t)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"transforming %s\", s)\n\t\t}\n\t}\n\treturn nil\n}", "func convertRunCmd(cmd *cobra.Command, args []string) error {\n\tnoindent := viper.GetBool(\"noindent\")\n\n\tif len(args) == 0 {\n\t\traw, err := ioutil.ReadAll(os.Stdin)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"error reading file\")\n\t\t}\n\t\toutput, err := jsonify.Converter(raw, noindent)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"error converting input\")\n\t\t}\n\t\tfmt.Printf(\"%s\", output)\n\t\treturn nil\n\t}\n\n\tfor _, filepath := range args {\n\t\traw, err := ioutil.ReadFile(filepath)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"error reading file\")\n\t\t}\n\t\toutput, err := jsonify.Converter(raw, noindent)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"error converting input\")\n\t\t}\n\t\tfmt.Printf(\"%s\", output)\n\t}\n\n\treturn nil\n}", "func Convert(ctx context.Context, filePath string) error {\n\tfile, err := os.Open(filePath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer file.Close()\n\n\timgRef, err := vips.LoadImage(file)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := vips.Webpsave(imgRef.Image(), OutFilePath); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func InputFileConverter(input string, output string) (string, error) {\n\n\tif (input == \"\") {\n\t\treturn \"\", errors.New(\"no input file name\")\n\t}\n\tif (output == \"\") {\n\t\treturn \"\", errors.New(\"no output file name\")\n\t}\n\tdat, err := os.Open(input)\n\tif (err != nil) {\n\t\treturn \"\", errors.New(fmt.Sprintf(\"unable to open file: %v\", input))\n\t}\n\tdefer dat.Close()\n\treader := csv.NewReader(dat)\n\treader.Comma = '\\t'\n\treader.FieldsPerRecord = -1\n\tcsvData, err := reader.ReadAll()\n\tif err != nil {\n\t\tfmt.Println(\"unable to read file\")\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n\tvar oneRecord types.Record\n\tvar allRecords []types.Record\n\tfor _, each := range csvData {\n\t\toneRecord.Title = each[titleColumn]\n\t\toneRecord.IarchiveID = each[iarchiveColumn]\n\t\toneRecord.Oclc = each[oclcColumn]\n\t\tallRecords = append(allRecords, oneRecord)\n\t}\n\tstart := fmt.Sprintf(\"Processing %v records.\", len(allRecords))\n\tfmt.Println(start)\n\tjsondata, err := json.Marshal(allRecords) // convert to JSON\n\tif err != nil {\n\t\tfmt.Println(\"error marshalling records\")\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n\tjsonFile, err := os.Create(output)\n\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\tdefer jsonFile.Close()\n\tjsonFile.Write(jsondata)\n\tjsonFile.Close()\n\tmessage := fmt.Sprintf(\"Written to json file: %v\", string(output))\n\treturn message, nil\n}", "func (spirv *SPIRVCross) Convert(path, variant string, shader []byte, target, version string) (string, error) {\n\tbase := spirv.WorkDir.Path(filepath.Base(path), variant)\n\n\tif err := spirv.WorkDir.WriteFile(base, shader); err != nil {\n\t\treturn \"\", fmt.Errorf(\"unable to write shader to disk: %w\", err)\n\t}\n\n\tvar cmd *exec.Cmd\n\tswitch target {\n\tcase \"glsl\":\n\t\tcmd = exec.Command(spirv.Bin,\n\t\t\t\"--no-es\",\n\t\t\t\"--version\", version,\n\t\t)\n\tcase \"es\":\n\t\tcmd = exec.Command(spirv.Bin,\n\t\t\t\"--es\",\n\t\t\t\"--version\", version,\n\t\t)\n\tcase \"hlsl\":\n\t\tcmd = exec.Command(spirv.Bin,\n\t\t\t\"--hlsl\",\n\t\t\t\"--shader-model\", version,\n\t\t)\n\tdefault:\n\t\treturn \"\", fmt.Errorf(\"unknown target %q\", target)\n\t}\n\tcmd.Args = append(cmd.Args, \"--no-420pack-extension\", base)\n\n\tout, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"%s\\nfailed to run %v: %w\", out, cmd.Args, err)\n\t}\n\ts := string(out)\n\tif target != \"hlsl\" {\n\t\t// Strip Windows \\r in line endings.\n\t\ts = unixLineEnding(s)\n\t}\n\n\treturn s, nil\n}", "func convertFile(filename string) (media map[string]struct{}, err error) {\n\tsrc, err := ioutil.ReadFile(filename)\n\tif err != nil {\n\t\tlog.Fatal(\"Cannot read file \" + filename + \"\\n\" + err.Error())\n\t}\n\tname := filepath.Base(filename)\n\text := \".md\"\n\tbasename := base(name) // strip \".go\"\n\toutname := filepath.Join(*outDir, basename) + ext\n\tmd, media, err := convert(string(src))\n\tif err != nil {\n\t\treturn nil, errors.New(\"Error converting \" + filename + \"\\n\" + err.Error())\n\t}\n\terr = createPath(*outDir)\n\tif err != nil {\n\t\treturn nil, err // The error message from createPath is chatty enough.\n\t}\n\terr = ioutil.WriteFile(outname, []byte(md), 0644) // -rw-r--r--\n\tif err != nil {\n\t\treturn nil, errors.New(\"Cannot write file \" + outname + \" \\n\" + err.Error())\n\t}\n\treturn media, nil\n}", "func (c *ConvImg) Convert() error {\n\tfiles, err := findImages(c.Path, c.From, log.New(os.Stdout, \"convimg\",1))\n\tif err != nil {\n\t\treturn err\n\t}\n\tswitch c.To {\n\tcase \"png\":\n\t\tfmt.Printf(\"%v\", files)\n\t\terr = convertToPngs(files)\n\tcase \"jpg\":\n\t\terr = convertToJpgs(files)\n\tcase \"gif\":\n\t\terr = convertToGifs(files)\n\tdefault:\n\t\treturn errors.New(\"invalid extension\")\n\t}\n\tif err != nil {\n\t\tfmt.Printf(\"%v\", err)\n\t}\n\treturn nil\n}", "func FileConvertCodepage(fileName string, fromCP, toCP IDCodePage) error {\n\tswitch {\n\tcase fromCP == toCP:\n\t\treturn nil\n\tcase (fromCP != CP1251) && (fromCP != CP866):\n\t\treturn nil\n\tcase (toCP != CP1251) && (toCP != CP866):\n\t\treturn nil\n\t}\n\tiFile, err := os.Open(fileName)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer iFile.Close()\n\n\t//TODO need using system tmp folder\n\ttmpFileName := fileName + \"~\"\n\toFile, err := os.Create(tmpFileName)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer oFile.Close()\n\n\ts := \"\"\n\tiScanner := bufio.NewScanner(iFile)\n\tfor i := 0; iScanner.Scan(); i++ {\n\t\ts = iScanner.Text()\n\t\ts, err = StrConvertCodepage(s, fromCP, toCP)\n\t\tif err != nil {\n\t\t\toFile.Close()\n\t\t\tos.Remove(tmpFileName)\n\t\t\treturn fmt.Errorf(\"code page convert error on file '%s': %v\", fileName, err)\n\t\t}\n\t\tfmt.Fprintf(oFile, \"%s\\n\", s)\n\t}\n\toFile.Close()\n\tiFile.Close()\n\treturn os.Rename(tmpFileName, fileName)\n}", "func (c *Client) ConvertFile(filename string, width uint) (string, error) {\n\timage, err := ioutil.ReadFile(filename)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn c.ConvertBytes(image, width)\n}", "func (sfs *SoundFileInfoService) PerformSoundFileConversions(sourcePath string) (error, string, string) {\n\tfileBaseName := filepath.Base(sourcePath)\n\tfileName := strings.TrimSuffix(fileBaseName, filepath.Ext(fileBaseName))\n\n\t// create new destination path\n\tdestinationPath := \"/tmp/\"\n\n\twavDestinationPath := destinationPath + fileName + \".wav\"\n\tpcmaDestinationPath := destinationPath + fileName + \".PCMU\"\n\n\tlogrus.Infoln(\".wav destination path : \" + wavDestinationPath + \"\\n\")\n\n\t// generate .wav file\n\tcommand := fmt.Sprintf(\"ffmpeg -i %s -acodec pcm_s16le -ac 1 -ar 8000 %s >/dev/null 2>&1\", sourcePath, wavDestinationPath)\n\n\tlogrus.Infoln(\".wav conversion command : \" + command)\n\n\tout, error1 := exec.Command(\"sh\", \"-c\", command).Output()\n\tif error1 != nil {\n\t\tlogrus.Errorln(\"Error converting file to .wav : \")\n\t\tlogrus.Errorln(error1.Error())\n\t\tlogrus.Infoln(\"\\n\")\n\t\treturn errors.New(\"Error converting file to .wav\"), \"\", \"\"\n\t}\n\n\tfmt.Printf(\"%s\\n\\n\", out)\n\n\t// generate .pcma file\n\tfsPCMA := fmt.Sprintf(\"sox %s -t raw -r 8k -b 8 -c 1 -e u-law %s\", wavDestinationPath, pcmaDestinationPath)\n\tlogrus.Infoln(\"pcma command : \" + fsPCMA + \"\\n\")\n\tfsPCMAO, error2 := exec.Command(\"sh\", \"-c\", fsPCMA).Output()\n\tif error2 != nil {\n\t\tcmlutils.DeleteFile(wavDestinationPath)\n\t\tlogrus.Errorln(\"Error converting file to .pcma\")\n\t\tlogrus.Errorln(error2.Error())\n\t\treturn errors.New(\"Error converting file to .PCMA\"), \"\", \"\"\n\t}\n\tfmt.Printf(\"%s\\n\\n\", fsPCMAO)\n\n\treturn nil, wavDestinationPath, pcmaDestinationPath\n}", "func convert() {\n\tf, err := os.Open(\"10-million-combos.txt\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer f.Close()\n\n\tpws, err := os.Create(\"10-mil-pws.txt\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer pws.Close()\n\n\tscanner := bufio.NewScanner(f)\n\tfor scanner.Scan() {\n\t\tparts := strings.Split(scanner.Text(), \"\\t\")\n\t\tpw := parts[len(parts)-1]\n\t\tif len(parts) != 2 {\n\t\t\tswitch pw {\n\t\t\tcase \"markcgilberteternity2969\":\n\t\t\t\tpw = \"eternity2969\"\n\t\t\tcase \"sailer1216soccer1216\":\n\t\t\t\tpw = \"soccer1216\"\n\t\t\tdefault:\n\t\t\t\tfmt.Println(\"Couldn't read line: \", parts)\n\t\t\t}\n\t\t}\n\t\tfmt.Fprintln(pws, pw)\n\t}\n\tif err := scanner.Err(); err != nil {\n\t\tpanic(err)\n\t}\n}", "func Convert(inJa, inEn string, out string) error {\n\tc := NewConverter()\n\n\tlog.Printf(\"loading jawiki: %s\", inJa)\n\terr := c.Load(inJa, jaFilter)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.Printf(\"loading enwiki: %s\", inEn)\n\terr = c.Load(inEn, enFilter)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.Printf(\"writing\")\n\terr = c.Save(out)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlog.Printf(\"done\")\n\treturn nil\n}", "func (f *File) ConvertType(which TypeFlag) Object {\n\treturn NewError(\"Argument to %s not supported, got %s\", which, f.Type())\n}", "func convert(flacFile string, c chan string) {\n\tflac, err := os.Open(flacFile)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t// Step 1. Create a wav from flac\n\twav, err := flac2wav(*flac, wavFile)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\t// Everything vanishes once its time has come\n\tdefer cleanup(wav)\n\tlog.Printf(\"wav: %q\\n\", wav)\n\n\t// Step 2. Extract meta tags\n\ttags := metadata(*flac)\n\n\t// Step 2. Create mp3 from wav\n\tmp3, err := wav2mp3(wav, mp3File, tags)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfmt.Printf(\"Created file %s\\n\", mp3.Name())\n\tc <- mp3.Name()\n}", "func migrateFile(outFile, inFile string) error {\n\tw, err := os.OpenFile(outFile, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0666)\n\tif err != nil {\n\t\tif os.IsExist(err) {\n\t\t\treturn nil\n\t\t}\n\t\treturn err\n\t}\n\tr, err := os.Open(inFile)\n\tif os.IsNotExist(err) {\n\t\treturn err\n\t}\n\t_, err = io.Copy(w, r)\n\treturn err\n}", "func (c *Client) Convert(r io.Reader, filename string) (*Response, error) {\n\tbuf := &bytes.Buffer{}\n\tw := multipart.NewWriter(buf)\n\tpart, err := w.CreateFormFile(\"input\", filename)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif n, err := io.Copy(part, r); err != nil {\n\t\treturn nil, fmt.Errorf(\"could not copy file data into request (failed after %d bytes): %w\", n, err)\n\t}\n\tif err := w.Close(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treq, err := http.NewRequest(\"POST\", fmt.Sprintf(\"%v%v/convert\", c.protocol, c.endpoint), buf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Header.Set(\"Content-Type\", w.FormDataContentType())\n\n\tresp, err := c.httpClient.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tres := &Response{}\n\tif resp.StatusCode != http.StatusOK {\n\t\terr := json.NewDecoder(resp.Body).Decode(&res)\n\t\tif err != nil {\n\t\t\t// Invalid JSON can come from proxies etc, so try\n\t\t\t// to give something meaningful.\n\t\t\treturn nil, fmt.Errorf(\"non-OK status from convert server: %d (%v)\", resp.StatusCode, http.StatusText(resp.StatusCode))\n\t\t}\n\t\treturn nil, fmt.Errorf(\"non-OK status from convert server: %d (%v) with error: %v\", resp.StatusCode, http.StatusText(resp.StatusCode), res.Error)\n\t}\n\n\tif err := json.NewDecoder(resp.Body).Decode(&res); err != nil {\n\t\treturn nil, err\n\t}\n\treturn res, nil\n}", "func convert(in io.Reader, out io.Writer, convBootstrap convArray, convAddresses convAddrs) error {\n\tdata, err := ioutil.ReadAll(in)\n\tif err != nil {\n\t\treturn err\n\t}\n\tconfMap := make(map[string]interface{})\n\tif err = json.Unmarshal(data, &confMap); err != nil {\n\t\treturn err\n\t}\n\n\t// Convert bootstrap config\n\tconvertBootstrap(confMap, convBootstrap)\n\n\t// Convert addresses config\n\tconvertAddresses(confMap, convAddresses)\n\n\tfixed, err := json.MarshalIndent(confMap, \"\", \" \")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif _, err := out.Write(fixed); err != nil {\n\t\treturn err\n\t}\n\t_, err = out.Write([]byte(\"\\n\"))\n\treturn err\n}", "func (w *Workspace) convert(pt packType) {\n\t// itterate over all Projects\n\tfor pi, p := range w.Projects {\n\t\t// Itterate over all Systems\n\t\tfor si, s := range p.Systems {\n\t\t\t// Create new File Stack for this system\n\t\t\tvar files []*File\n\t\t\t// Itterate over all Files\n\t\t\tfor _, f := range s.Files {\n\t\t\t\t// Select the extention\n\t\t\t\tswitch filepath.Ext(f.FilePathName) {\n\t\t\t\t// For .axs files (Main Source Files)\n\t\t\t\tcase \".axs\":\n\t\t\t\t\tswitch f.Type {\n\t\t\t\t\tcase \"Source\", \"MasterSrc\":\n\t\t\t\t\t\tswitch pt {\n\t\t\t\t\t\tcase ptRelease:\n\t\t\t\t\t\t\t// Swap the extension to set file to compiled source code\n\t\t\t\t\t\t\tf.ChangeExtension(\"tkn\")\n\t\t\t\t\t\t\tif f.DeviceMaps == nil {\n\t\t\t\t\t\t\t\tf.AddDeviceMap(NewDeviceMap(\"Custom [0:1:0]\", \"Custom [0:1:0]\"))\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\n\t\t\t\t\tcase \"Module\":\n\t\t\t\t\t\t// Assign file in correct places\n\t\t\t\t\t\tswitch pt {\n\t\t\t\t\t\tcase ptRelease:\n\t\t\t\t\t\t\tf = nil\n\t\t\t\t\t\t}\n\t\t\t\t\t\tswitch pt {\n\t\t\t\t\t\tcase ptHandover:\n\t\t\t\t\t\t\t// Swap the extension to set file to compiled module\n\t\t\t\t\t\t\tf.ChangeExtension(\"tko\")\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\tcase \".axi\":\n\t\t\t\t\tswitch f.Type {\n\t\t\t\t\tcase \"Include\":\n\t\t\t\t\t\tswitch pt {\n\t\t\t\t\t\tcase ptRelease:\n\t\t\t\t\t\t\tf = nil\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif f != nil {\n\t\t\t\t\tfiles = append(files, f)\n\t\t\t\t}\n\t\t\t}\n\t\t\tw.Projects[pi].Systems[si].Files = files\n\t\t}\n\t}\n}", "func FileConvertToCAR(ctx context.Context, inPath, outPath string) (cid.Cid, uint64, error) {\n\tinF, err := os.Open(inPath)\n\tif err != nil {\n\t\treturn cid.Undef, 0, err\n\t}\n\tdefer inF.Close() //nolint:errcheck\n\n\tinStat, err := inF.Stat()\n\tif err != nil {\n\t\treturn cid.Undef, 0, err\n\t}\n\n\tinFile, err := files.NewReaderPathFile(inPath, inF, inStat)\n\tif err != nil {\n\t\treturn cid.Undef, 0, err\n\t}\n\n\toutF, err := os.Create(outPath)\n\tif err != nil {\n\t\treturn cid.Undef, 0, err\n\t}\n\n\tcid, carsz, err := convertToCAR(ctx, inFile, outF, true)\n\tif err != nil {\n\t\treturn cid, carsz, err\n\t}\n\n\terr = outF.Close()\n\tif err != nil {\n\t\treturn cid, carsz, err\n\t}\n\n\treturn cid, carsz, nil\n}", "func Convert(c *cli.Context) {\n\tinputFile := c.GlobalString(\"file\")\n\tdabFile := c.GlobalString(\"bundle\")\n\toutFile := c.String(\"out\")\n\tgenerateYaml := c.BoolT(\"yaml\")\n\ttoStdout := c.BoolT(\"stdout\")\n\tcreateD := c.BoolT(\"deployment\")\n\tcreateDS := c.BoolT(\"daemonset\")\n\tcreateRC := c.BoolT(\"replicationcontroller\")\n\tcreateChart := c.BoolT(\"chart\")\n\treplicas := c.Int(\"replicas\")\n\tsingleOutput := len(outFile) != 0 || outFile == \"-\" || toStdout\n\tcreateDeploymentConfig := c.BoolT(\"deploymentconfig\")\n\n\tif outFile == \"-\" {\n\t\ttoStdout = true\n\t\toutFile = \"\"\n\t}\n\n\t// Create Deployment by default if no controller has be set\n\tif !createD && !createDS && !createRC && !createDeploymentConfig {\n\t\tcreateD = true\n\t}\n\n\tkomposeObject := kobject.KomposeObject{\n\t\tServiceConfigs: make(map[string]kobject.ServiceConfig),\n\t}\n\n\tfile := inputFile\n\tif len(dabFile) > 0 {\n\t\tinputFormat = \"bundle\"\n\t\tfile = dabFile\n\t}\n\n\topt := kobject.ConvertOptions{\n\t\tToStdout: toStdout,\n\t\tCreateD: createD,\n\t\tCreateRC: createRC,\n\t\tCreateDS: createDS,\n\t\tCreateDeploymentConfig: createDeploymentConfig,\n\t\tCreateChart: createChart,\n\t\tGenerateYaml: generateYaml,\n\t\tReplicas: replicas,\n\t\tInputFile: file,\n\t\tOutFile: outFile,\n\t}\n\n\tvalidateFlags(opt, singleOutput, dabFile, inputFile)\n\n\t// loader parses input from file into komposeObject.\n\tl, err := loader.GetLoader(inputFormat)\n\tif err != nil {\n\t\tlogrus.Fatal(err)\n\t}\n\n\tkomposeObject = l.LoadFile(file)\n\n\t// transformer maps komposeObject to provider's primitives\n\tvar t transformer.Transformer\n\tif !createDeploymentConfig {\n\t\tt = new(kubernetes.Kubernetes)\n\t} else {\n\t\tt = new(openshift.OpenShift)\n\t}\n\n\tobjects := t.Transform(komposeObject, opt)\n\n\t// Print output\n\tkubernetes.PrintList(objects, opt)\n}", "func (glsl *GLSLValidator) ConvertCompute(path string, input []byte) ([]byte, error) {\n\tbase := glsl.WorkDir.Path(filepath.Base(path))\n\tpathout := base + \".out\"\n\n\tcmd := exec.Command(glsl.Bin,\n\t\t\"-G100\", // OpenGL ES 3.1.\n\t\t\"-w\", // Suppress warnings.\n\t\t\"-S\", \"comp\",\n\t\t\"-o\", pathout,\n\t\tpath,\n\t)\n\tcmd.Stdin = bytes.NewBuffer(input)\n\n\tout, err := cmd.Output()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"%s\\nfailed to run %v: %w\", out, cmd.Args, err)\n\t}\n\n\tcompiled, err := ioutil.ReadFile(pathout)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to read output %q: %w\", pathout, err)\n\t}\n\n\treturn compiled, nil\n}", "func (postprocessor *Postprocessor) Convert(\n\tpathProvider paths.Pather,\n\titemRoute route.Route,\n\tfiles []*model.File,\n\thtml string) (convertedContent string, converterError error) {\n\n\t// Thumbnails\n\timagePostProcessor := newImagePostprocessor(pathProvider, itemRoute, files, postprocessor.imageProvider)\n\thtml, imageConversionError := imagePostProcessor.Convert(html)\n\tif imageConversionError != nil {\n\t\tpostprocessor.logger.Warn(\"Error while converting images/thumbnails. Error: %s\", imageConversionError)\n\t}\n\n\t// Rewrite Links\n\thtml = rewireLinks(pathProvider, itemRoute, files, html)\n\n\t// Add Emojis\n\thtml = addEmojis(html)\n\n\treturn html, nil\n}", "func ConvertDoc(r io.Reader) (string, map[string]string, error) {\n\tf, err := NewLocalFile(r, \"/tmp\", \"sajari-convert-\")\n\tif err != nil {\n\t\treturn \"\", nil, fmt.Errorf(\"error creating local file: %v\", err)\n\t}\n\tdefer f.Done()\n\n\t// Meta data\n\tmc := make(chan map[string]string, 1)\n\tgo func() {\n\t\tmeta := make(map[string]string)\n\t\tmetaStr, err := exec.Command(\"wvSummary\", f.Name()).Output()\n\t\tif err != nil {\n\t\t\t// TODO: Remove this.\n\t\t\tlog.Println(\"wvSummary:\", err)\n\t\t}\n\n\t\t// Parse meta output\n\t\tinfo := make(map[string]string)\n\t\tfor _, line := range strings.Split(string(metaStr), \"\\n\") {\n\t\t\tif parts := strings.SplitN(line, \"=\", 2); len(parts) > 1 {\n\t\t\t\tinfo[strings.TrimSpace(parts[0])] = strings.TrimSpace(parts[1])\n\t\t\t}\n\t\t}\n\n\t\t// Convert parsed meta\n\t\tif tmp, ok := info[\"Last Modified\"]; ok {\n\t\t\tif t, err := time.Parse(time.RFC3339, tmp); err == nil {\n\t\t\t\tmeta[\"ModifiedDate\"] = fmt.Sprintf(\"%d\", t.Unix())\n\t\t\t}\n\t\t}\n\t\tif tmp, ok := info[\"Created\"]; ok {\n\t\t\tif t, err := time.Parse(time.RFC3339, tmp); err == nil {\n\t\t\t\tmeta[\"CreatedDate\"] = fmt.Sprintf(\"%d\", t.Unix())\n\t\t\t}\n\t\t}\n\n\t\tmc <- meta\n\t}()\n\n\t// Document body\n\tbc := make(chan string, 1)\n\tgo func() {\n\n\t\t// Save output to a file\n\t\toutputFile, err := ioutil.TempFile(\"/tmp\", \"sajari-convert-\")\n\t\tif err != nil {\n\t\t\t// TODO: Remove this.\n\t\t\tlog.Println(\"TempFile Out:\", err)\n\t\t\treturn\n\t\t}\n\t\tdefer os.Remove(outputFile.Name())\n\n\t\terr = exec.Command(\"wvText\", f.Name(), outputFile.Name()).Run()\n\t\tif err != nil {\n\t\t\t// TODO: Remove this.\n\t\t\tlog.Println(\"wvText:\", err)\n\t\t}\n\n\t\tvar buf bytes.Buffer\n\t\t_, err = buf.ReadFrom(outputFile)\n\t\tif err != nil {\n\t\t\t// TODO: Remove this.\n\t\t\tlog.Println(\"wvText:\", err)\n\t\t}\n\n\t\tbc <- buf.String()\n\t}()\n\n\t// TODO: Should errors in either of the above Goroutines stop things from progressing?\n\tbody := <-bc\n\tmeta := <-mc\n\n\t// TODO: Check for errors instead of len(body) == 0?\n\tif len(body) == 0 {\n\t\tf.Seek(0, 0)\n\t\treturn ConvertDocx(f)\n\t}\n\treturn body, meta, nil\n}", "func main() {\n\tflag.Parse()\n\tfor _, filename := range flag.Args() {\n\t\tlog.Println(\"Converting\", filename)\n\t\tmedia, err := convertFile(filename)\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"[Conversion Error] \" + err.Error())\n\t\t}\n\t\tif *dontCopyMedia == false && media != nil && (path.Clean(*outDir) != \".\" || *subDir == true) {\n\t\t\tlog.Println(\"Copying media\")\n\t\t\tout := *outDir\n\t\t\tif *subDir {\n\t\t\t\tout = filepath.Join(*outDir, base(filename))\n\t\t\t\terr := createPath(out)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Fatal(\"[CopyMedia Error] Cannot create subdir for media files.\\n\" + err.Error())\n\t\t\t\t}\n\t\t\t}\n\t\t\terr := copyFiles(out, media)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(\"[CopyMedia Error] cp failed:\\n\" + err.Error())\n\t\t\t}\n\t\t}\n\t}\n\tlog.Println(\"Done.\")\n}", "func TestConvert(t *testing.T) {\n\n\n err := filepath.Walk(\"../img/goods/med/\", func(path string, info os.FileInfo, err error) error {\n\n if err != nil {\n t.Logf(\"%v\", err)\n }\n if !info.IsDir() {\n rdr, err := os.Open(path)\n if err != nil {\n //t.Logf(\"%v\", err)\n return err\n }\n defer rdr.Close()\n sxt := filepath.Ext(rdr.Name())\n if ok := PNG.Find([]byte(sxt)); len(ok) == 0 {\n return err\n }\n ss := strings.Split(info.Name(), \".\")\n wtr, err := os.Create(\"../img/goods/med/\" + ss[0] + \".jpg\")\n if err != nil {\n t.Logf(\"%v\", err)\n }\n defer wtr.Close()\n\n //err = convertJPG2PNG(wtr, rdr)\n err = convertPNG2JPG(wtr, rdr)\n if err != nil {\n t.Logf(\"%v\", err)\n }\n return nil\n } else {\n t.Logf(\"Skipped directory: %s\", info.Name())\n }\n return nil\n })\n if err != nil {\n t.Errorf(\"%v\", err)\n }\n\n}", "func convertCommand(command bool) string {\n\tif command == true {\n\t\treturn \"ATTACK\"\n\t}\n\treturn \"RETREAT\"\n}", "func convertLSIFToSCIP(out *output.Output, inputFile, outputFile string) error {\n\tif out != nil {\n\t\tout.Writef(\"%s Converting %s into %s\", output.EmojiInfo, inputFile, outputFile)\n\t}\n\n\tctx := context.Background()\n\tuploadID := -time.Now().Nanosecond()\n\troot := codeintelUploadFlags.root\n\n\tif !isFlagSet(codeintelUploadFlagSet, \"root\") {\n\t\t// Best-effort infer the root; we have a strange cyclic init order where we're\n\t\t// currently trying to determine the filename that determines the root, but we\n\t\t// need the root when converting from LSIF to SCIP.\n\t\troot, _ = inferIndexRoot()\n\t}\n\n\trc, err := os.Open(inputFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer rc.Close()\n\n\tindex, err := libscip.ConvertLSIF(ctx, uploadID, rc, root)\n\tif err != nil {\n\t\treturn err\n\t}\n\tserialized, err := proto.Marshal(index)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn os.WriteFile(outputFile, serialized, os.ModePerm)\n}", "func aconvertDecode(videoFileNameWithExt string, title string) (string, error) {\n\tmp3FileName := makeFileName(title, \"mp3\")\n\tdeleteFile(mp3FileName) //remove target file if exists\n\tfluMp3File := flu.File(mp3FileName)\n\tif aconvertAPI == nil { //init aconvertAPI once\n\t\taconvertAPI = ac.NewClient(nil, nil, nil)\n\t}\n\n\tfmt.Printf(\"Start AConvert decoding %s\\n\", videoFileNameWithExt)\n\tr, err := aconvertAPI.Convert(context.Background(), flu.File(videoFileNameWithExt), make(ac.Opts).TargetFormat(\"mp3\"))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\terr = aconvertAPI.GET(r.URL()).Execute().DecodeBodyTo(fluMp3File).Error\n\treturn mp3FileName, err\n}", "func ConvertOutput(args cmdlargs) error {\n\n\tinputpath, err := filepath.Abs(args.folderIn)\n\tif err != nil {\n\t\treturn err\n\t}\n\toutpath, err := filepath.Abs(args.folderOut)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tiDir, err := os.Open(inputpath)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer iDir.Close()\n\n\ttype outArr struct {\n\t\tname string\n\t\tyearMap map[int]string\n\t\tstartYear int\n\t\tendYear int\n\t}\n\tfilenameMap := make(map[string]*outArr)\n\n\tfileNames, err := iDir.Readdir(-1)\n\tfor _, filename := range fileNames {\n\t\tif !filename.IsDir() && strings.HasSuffix(filename.Name(), \".csv\") {\n\t\t\tgetYearfromFilename := func(f string) (int, string, error) {\n\t\t\t\tnoSuff := strings.TrimSuffix(f, \".csv\")\n\t\t\t\tyearStr := noSuff[len(noSuff)-4:]\n\t\t\t\tname := noSuff[:len(noSuff)-4]\n\t\t\t\tyear64, err := strconv.ParseInt(yearStr, 10, 32)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn -1, \"\", err\n\t\t\t\t}\n\t\t\t\treturn int(year64), name, err\n\t\t\t}\n\t\t\tyear, name, err := getYearfromFilename(filename.Name())\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\t//fmt.Println(year, name)\n\t\t\tif _, ok := filenameMap[name]; !ok {\n\t\t\t\tentry := outArr{\n\t\t\t\t\tname: name,\n\t\t\t\t\tyearMap: make(map[int]string),\n\t\t\t\t\tstartYear: year,\n\t\t\t\t\tendYear: year,\n\t\t\t\t}\n\t\t\t\tfilenameMap[name] = &entry\n\t\t\t}\n\t\t\tfilenameMap[name].yearMap[year] = filepath.Join(inputpath, filename.Name())\n\t\t\tif filenameMap[name].startYear > year {\n\t\t\t\tfilenameMap[name].startYear = year\n\t\t\t}\n\t\t\tif filenameMap[name].endYear < year {\n\t\t\t\tfilenameMap[name].endYear = year\n\t\t\t}\n\t\t}\n\t}\n\tlastFolder := filepath.Base(inputpath)\n\tvar twoRunes string\n\tidxRunes := 0\n\tfor _, r := range lastFolder {\n\t\tif idxRunes >= 2 {\n\t\t\tbreak\n\t\t}\n\t\ttwoRunes = twoRunes + string(r)\n\t\tidxRunes++\n\t}\n\n\tfor _, fileEntyVal := range filenameMap {\n\n\t\tfmtFilename := args.filename\n\t\tif strings.Contains(args.filename, \"%s\") {\n\t\t\tfmtFilename = fmt.Sprintf(args.filename, twoRunes)\n\t\t}\n\n\t\toutputfile := filepath.Join(outpath, fmtFilename)\n\t\tmakeDir(outputfile)\n\t\tfmt.Println(outputfile)\n\t\toutFile, err := os.OpenFile(outputfile, os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0600)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\twriter := bufio.NewWriter(outFile)\n\t\twriter.WriteString(fmt.Sprintf(title, args.title))\n\t\twriter.WriteString(newLine)\n\t\twriter.WriteString(fmt.Sprintf(model, args.model))\n\t\twriter.WriteString(newLine)\n\t\twriter.WriteString(fmt.Sprintf(modelerName, args.modeller))\n\t\twriter.WriteString(newLine)\n\t\twriter.WriteString(fmt.Sprintf(simulation, args.simulation))\n\t\twriter.WriteString(newLine)\n\t\twriter.WriteString(fmt.Sprintf(site, args.site))\n\t\twriter.WriteString(newLine)\n\t\twriter.WriteString(header1)\n\t\twriter.WriteString(newLine)\n\t\twriter.WriteString(header2)\n\t\twriter.WriteString(newLine)\n\n\t\tfor year := fileEntyVal.startYear; year <= fileEntyVal.endYear; year++ {\n\t\t\theader, content, err := readDailyOutputFile(fileEntyVal.yearMap[year])\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tharvestIndex, err := getHarvestIndex(&header, &content)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\t// Model\n\t\t\twriter.WriteString(\"MO\")\n\t\t\twriter.WriteRune(sepeartor)\n\t\t\t// Planting.date\n\t\t\tsowIndex, date, err := getStageStartDate(\"1\", &header, &content)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\twriter.WriteString(date)\n\t\t\twriter.WriteRune(sepeartor)\n\t\t\t// Treatment\n\t\t\twriter.WriteString(args.treatment)\n\t\t\twriter.WriteRune(sepeartor)\n\t\t\t// Yield\n\t\t\tdate, err = getYield(harvestIndex, &header, &content)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\twriter.WriteString(date)\n\t\t\twriter.WriteRune(sepeartor)\n\t\t\t// Emergence\n\t\t\t_, date, err = getStageStartDate(\"2\", &header, &content)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\twriter.WriteString(date)\n\t\t\twriter.WriteRune(sepeartor)\n\t\t\t// Ant\n\t\t\tantIndex, date, err := getStageStartDate(\"5\", &header, &content)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\twriter.WriteString(date)\n\t\t\twriter.WriteRune(sepeartor)\n\t\t\t// Mat\n\t\t\t_, date, err = getStageStartDate(\"6\", &header, &content)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\twriter.WriteString(date)\n\t\t\twriter.WriteRune(sepeartor)\n\t\t\t// FLN\n\t\t\twriter.WriteString(\"na\")\n\t\t\twriter.WriteRune(sepeartor)\n\t\t\t// GNumber\n\t\t\twriter.WriteString(\"na\")\n\t\t\twriter.WriteRune(sepeartor)\n\t\t\t// Biom-an\n\t\t\tdate, err = getAboveBiomass(antIndex, &header, &content)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\twriter.WriteString(date)\n\t\t\twriter.WriteRune(sepeartor)\n\t\t\t// Biom-ma\n\t\t\tdate, err = getAboveBiomass(harvestIndex, &header, &content)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\twriter.WriteString(date)\n\t\t\twriter.WriteRune(sepeartor)\n\t\t\t// MaxLAI\n\t\t\tdate, err = getMaxLAI(sowIndex, harvestIndex, &header, &content)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\twriter.WriteString(date)\n\t\t\twriter.WriteRune(sepeartor)\n\t\t\t// Wdrain\n\t\t\tdate, _, err = getCumm(sowIndex, harvestIndex, \"Recharge\", &header, &content)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\twriter.WriteString(date)\n\t\t\twriter.WriteRune(sepeartor)\n\t\t\t// CumET\n\t\t\tdate, _, err = getCumm(sowIndex, harvestIndex, \"Act_ET\", &header, &content)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\twriter.WriteString(date)\n\t\t\twriter.WriteRune(sepeartor)\n\t\t\t// SoilAvW\n\t\t\tdate, err = getPlantAvailWater(harvestIndex, &header, &content)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\twriter.WriteString(date)\n\t\t\twriter.WriteRune(sepeartor)\n\t\t\t// Runoff\n\t\t\tdate, _, err = getCumm(sowIndex, harvestIndex, \"RunOff\", &header, &content)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\twriter.WriteString(date)\n\t\t\twriter.WriteRune(sepeartor)\n\t\t\t// Transp\n\t\t\t_, actET, err := getCumm(sowIndex, harvestIndex, \"Act_ET\", &header, &content)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\t_, actEv, err := getCumm(sowIndex, harvestIndex, \"Act_Ev\", &header, &content)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\twriter.WriteString(fmt.Sprintf(\"%.2f\", actET-actEv))\n\t\t\twriter.WriteRune(sepeartor)\n\t\t\t// CroN-an\n\t\t\tdate, err = getValueAt(antIndex, \"AbBiomN\", &header, &content)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\twriter.WriteString(date)\n\t\t\twriter.WriteRune(sepeartor)\n\t\t\t// CroN-ma\n\t\t\tdate, err = getValueAt(harvestIndex, \"AbBiomN\", &header, &content)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\twriter.WriteString(date)\n\t\t\twriter.WriteRune(sepeartor)\n\t\t\t// Nleac\n\t\t\tdate, _, err = getCumm(sowIndex, harvestIndex, \"NLeach\", &header, &content)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\twriter.WriteString(date)\n\t\t\twriter.WriteRune(sepeartor)\n\t\t\t// GrainN\n\t\t\tdate, err = getNGrain(harvestIndex, &header, &content)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\twriter.WriteString(date)\n\t\t\twriter.WriteRune(sepeartor)\n\t\t\t// Nmin\n\t\t\tdate, _, err = getCumm(sowIndex, harvestIndex, \"NetNmin\", &header, &content)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\twriter.WriteString(date)\n\t\t\twriter.WriteRune(sepeartor)\n\t\t\t// Nvol\n\t\t\tdate, _, err = getCumm(sowIndex, harvestIndex, \"NH3\", &header, &content)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\twriter.WriteString(date)\n\t\t\twriter.WriteRune(sepeartor)\n\t\t\t// Nimmo\n\t\t\twriter.WriteString(\"na\")\n\t\t\twriter.WriteRune(sepeartor)\n\t\t\t// SoilN\n\t\t\tdate, err = getSoilMinN(harvestIndex, &header, &content)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\twriter.WriteString(date)\n\t\t\twriter.WriteRune(sepeartor)\n\t\t\t// Nden\n\t\t\tdate, _, err = getCumm(sowIndex, harvestIndex, \"Denit\", &header, &content)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\twriter.WriteString(date)\n\t\t\twriter.WriteRune(sepeartor)\n\t\t\t// cumPARi\n\t\t\twriter.WriteString(\"na\")\n\t\t\twriter.WriteString(newLine)\n\t\t}\n\n\t\twriter.Flush()\n\t\toutFile.Close()\n\t}\n\treturn nil\n}", "func Convert(w io.Writer, img image.Image, filename string) error {\n\n\tf, err := imaging.FormatFromFilename(filename)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn imaging.Encode(w, img, f)\n}", "func (s *Search) convert2FileLink(file *Ed2kFileStruct) *com.Ed2kFileLink {\n\tif file.Type != \"Video\" {\n\t\treturn nil\n\t}\n\n\t// filtered by matched items\n\tfileInfo := com.ToFileInfo(file.Name, s.myKeywordStruct.Items)\n\tif fileInfo == nil {\n\t\treturn nil\n\t}\n\n\t// check if season matched with user input\n\t// we don't care about episode\n\tif s.myKeywordStruct.MyKeyword.Season != -1 && s.myKeywordStruct.MyKeyword.Season != fileInfo.Season {\n\t\treturn nil\n\t}\n\n\tfileLink := com.Ed2kFileLink{FileInfo: *fileInfo, Name: file.Name, Size: file.Size, Avail: file.Avail, Hash: file.Hash[:]}\n\n\treturn &fileLink\n}", "func TransformCommand(c *cli.Context) error {\n\tif c.NArg() != 1 {\n\t\tcli.ShowCommandHelp(c, \"transform\")\n\n\t\treturn fmt.Errorf(\"Missing required argument FILE\")\n\t}\n\n\tfhirVersion := c.GlobalString(\"fhir\")\n\tfilename := c.Args().Get(0)\n\n\tfileContent, err := ioutil.ReadFile(filename)\n\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"Error reading file %s\", filename)\n\t}\n\n\titer := jsoniter.ConfigFastest.BorrowIterator(fileContent)\n\tdefer jsoniter.ConfigFastest.ReturnIterator(iter)\n\n\tres := iter.Read()\n\n\tif res == nil {\n\t\treturn errors.Wrapf(err, \"Error parsing file %s as JSON\", filename)\n\t}\n\n\tout, err := doTransform(res.(map[string]interface{}), fhirVersion)\n\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Error performing transformation\")\n\t}\n\n\toutJson, err := jsoniter.ConfigFastest.MarshalIndent(out, \"\", \" \")\n\n\tos.Stdout.Write(outJson)\n\tos.Stdout.Write([]byte(\"\\n\"))\n\n\treturn nil\n}", "func Convert(src, dst interface{}, options ...convert.Options) error {\n\treturn converter.Convert(src, dst, options...)\n}", "func main() {\n\t// Full speed ahead\n\truntime.GOMAXPROCS(runtime.NumCPU())\n\n\tsync := make(chan string)\n\tfor _, filename := range flag.Args() {\n\t\tgo convert(filename, sync)\n\t}\n\tfor i, _ := range flag.Args() {\n\t\ts := <- sync\n\t\tfmt.Printf(\"Processed #%d: %s\\n\", i, s)\n\t}\n}", "func ImageConvertHandler(ctx *gin.Context) {\n\tvar (\n\t\tip = ctx.ClientIP()\n\t\tdb = ctx.MustGet(\"db\").(*gorm.DB)\n\t\terr error\n\t\tfile *models.File\n\t\ttoken = ctx.MustGet(\"token\").(*models.Token)\n\t\tinput = ctx.MustGet(\"inputParam\").(*ImageConvertInput)\n\t\trequestID = ctx.GetInt64(\"requestId\")\n\t\timageConvertSrv *service.ImageConvert\n\t\tconvertData []byte\n\t)\n\n\tif file, err = models.FindFileByUID(input.FileUID, false, db); err != nil {\n\t\tctx.JSON(400, &Response{\n\t\t\tRequestID: requestID,\n\t\t\tSuccess: false,\n\t\t\tErrors: generateErrors(err, \"fileUid\"),\n\t\t})\n\t\treturn\n\t}\n\n\timageConvertSrv = &service.ImageConvert{\n\t\tBaseService: service.BaseService{DB: db},\n\t\tToken: token,\n\t\tFile: file,\n\t\tIP: &ip,\n\t\tType: input.Type,\n\t\tWidth: input.Width,\n\t\tHeight: input.Height,\n\t\tLeft: input.Left,\n\t\tTop: input.Top,\n\t}\n\n\tif isTesting {\n\t\timageConvertSrv.RootPath = testingChunkRootPath\n\t}\n\n\tif err = imageConvertSrv.Validate(); !reflect.ValueOf(err).IsNil() {\n\t\tctx.JSON(400, &Response{\n\t\t\tRequestID: requestID,\n\t\t\tSuccess: false,\n\t\t\tErrors: generateErrors(err, \"\"),\n\t\t})\n\t\treturn\n\t}\n\n\tif convertData, err = imageConvertSrv.Execute(context.Background()); err != nil {\n\t\tctx.JSON(400, &Response{\n\t\t\tRequestID: requestID,\n\t\t\tSuccess: false,\n\t\t\tErrors: generateErrors(err, \"\"),\n\t\t})\n\t\treturn\n\t}\n\n\tctx.Header(\"Last-Modified\", file.UpdatedAt.Format(time.RFC1123))\n\tctx.Header(\"Content-Disposition\", fmt.Sprintf(`attachment; filename=\"%s\"`, file.Name))\n\n\tif input.OpenInBrowser {\n\t\tctx.Header(\"Content-Disposition\", fmt.Sprintf(`inline; filename=\"%s\"`, file.Name))\n\t}\n\tctx.Set(\"ignoreRespBody\", true)\n\n\tctx.Data(http.StatusOK, JpegContentType, convertData)\n}", "func convert(in string) (out string, media map[string]struct{}, err error) {\n\tconst (\n\t\tneither = iota\n\t\tcomment\n\t\tcode\n\t)\n\tlastLine := neither\n\tmedia = map[string]struct{}{}\n\n\t// Remove carriage returns.\n\tin = strings.Replace(in, \"\\r\", \"\", -1)\n\t// Split at newline and process each line.\n\tfor _, line := range strings.Split(in, \"\\n\") {\n\t\t// Skip the line if it is a Go directive like //go:generate\n\t\tif isDirective(line) {\n\t\t\tcontinue\n\t\t}\n\t\t// Determine if the line belongs to a comment.\n\t\tif isInComment(line) {\n\t\t\t// Close the code block if a new comment begins.\n\t\t\tif lastLine == code {\n\t\t\t\tout += \"```\\n\\n\"\n\t\t\t}\n\t\t\tlastLine = comment\n\t\t\t// Detect `![image](path)` tags and add the path to the\n\t\t\t// media list.\n\t\t\tpath, err := extractMediaPath(line)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", nil, errors.New(\"Unable to extract media path from line \" + line + \"\\n\" + err.Error())\n\t\t\t}\n\t\t\tif path != \"\" {\n\t\t\t\tmedia[path] = struct{}{}\n\t\t\t}\n\n\t\t\trepl, path, err := replaceHypeTag(line)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", nil, errors.New(\"Failed generating Hype tag from line \" + line + \"\\n\" + err.Error())\n\t\t\t}\n\t\t\tif repl != \"\" && path != \"\" {\n\t\t\t\tout += repl\n\t\t\t\tmedia[path] = struct{}{}\n\t\t\t} else {\n\t\t\t\t// Strip out any comment delimiter and add the line to the output.\n\t\t\t\tout += allCommentDelims.ReplaceAllString(line, \"\") + \"\\n\"\n\t\t\t}\n\t\t} else { // not in comment\n\t\t\t// Open a new code block if the last line was a comment,\n\t\t\t// but take care of empty lines between two comment lines.\n\t\t\tif lastLine == comment && len(line) > 0 {\n\t\t\t\tlastLine = code\n\t\t\t\tout += \"\\n```go\\n\"\n\t\t\t}\n\t\t\t// Add code lines verbatim to the output.\n\t\t\tout += line + \"\\n\"\n\t\t}\n\t}\n\tif lastLine == code {\n\t\tout += \"\\n```\\n\"\n\t}\n\treturn out, media, nil\n}", "func (s *Scrape) Convert(ci *pb.Request) error {\n\tvar err error\n\tvar title string\n\tvar bodyBuf *bytes.Buffer\n\n\tswitch ci.Type {\n\tcase pb.ContentType_TEXT:\n\t\ttitle, bodyBuf, err = s.extractTextFromUri(ci)\n\tcase pb.ContentType_PDF:\n\t\ttitle, bodyBuf, err = s.extractTextFromPdf(ci)\n\t}\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(title) > 3 && len(ci.Title) < 1 {\n\t\tif len(ci.Title) < 1 && len(title) < 1 && bodyBuf.Len() > 0 {\n\t\t\tci.Title = stringsx.Clean(bodyBuf.String()[:s.cfg.TitleLengthLimit])\n\t\t} else if s.cfg.TitleLengthLimit > 0 && uint32(len(title)) > s.cfg.TitleLengthLimit {\n\t\t\tci.Title = stringsx.Clean(title[:s.cfg.TitleLengthLimit])\n\t\t} else {\n\t\t\tci.Title = stringsx.Clean(title)\n\t\t}\n\t}\n\n\tci.Type = pb.ContentType_TEXT\n\n\tcreatedTime := time.Now()\n\tci.Created = uint64(createdTime.Unix())\n\tif bodyBuf != nil && bodyBuf.Len() > 0 {\n\t\tci.Text = bodyBuf.String()\n\t\tci.Size = uint64(bodyBuf.Len())\n\t\tci.Length = uint64(bodyBuf.Len())\n\t}\n\n\tlocalFilename, err := GetFilePath(ci)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfullFilename := filepath.Join(s.cfg.TmpPath, localFilename)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(path.Dir(fullFilename), os.ModePerm)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = ioutil.WriteFile(fullFilename, []byte(bodyBuf.String()), 0644)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (a *Adapter) toFile(message string) {\n\n\tif !a.cfg.File {\n\t\treturn\n\t}\n\n\t_, err := a.lf.WriteString(message + \"\\n\")\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n}", "func GetConvertedFiles(files []scan.File) (convertedFiles []scan.File, convertedPaths []string) {\n\tvar toBeConverted []scan.File\n\tfor _, f := range files {\n\t\tif scan.ConvertPattern.MatchString(f.Path) {\n\t\t\ttoBeConverted = append(toBeConverted, f)\n\t\t}\n\t}\n\n\tfor _, file := range toBeConverted {\n\t\ttmppath, err := ioutil.TempDir(\"\", \"ebconv\")\n\t\tfpath := filepath.Join(tmppath, file.Name)\n\n\t\t// Get content from the file as a string\n\t\tcontent, err := docconv.ConvertPath(file.Path)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Error converting %s, file not scanned\\n\", file.Path)\n\t\t\tcontinue\n\t\t}\n\n\t\t// Write content to new temp file\n\t\terr = ioutil.WriteFile(fpath, []byte(content.Body), 0644)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Error writing converted file %s, file not scanned\\n\", file.Path)\n\t\t\tcontinue\n\t\t}\n\n\t\tconvertedPaths = append(convertedPaths, tmppath)\n\t\tvar convertedFile scan.File\n\t\tconvertedFile.Path = fpath\n\t\tconvertedFile.Name = file.Path\n\t\tconvertedFiles = append(convertedFiles, convertedFile)\n\t}\n\n\treturn convertedFiles, convertedPaths\n\n}", "func ConvertAndResize(inpath, outpath string, width, height int) error {\n\tcmd := exec.Command(\n\t\t\"tifig\",\n\t\tinpath,\n\t\toutpath,\n\t\t\"--crop\",\n\t\t\"-w\", strconv.Itoa(width),\n\t\t\"-h\", strconv.Itoa(height),\n\t)\n\tout, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\terr = fmt.Errorf(\"%v: %s\", err, out)\n\t}\n\treturn err\n}", "func (r *RenderEnv) File2File(src, dst string, vars *variables.Variables, nounset, noempty bool) (err error) {\n\tb, err := ioutil.ReadFile(src)\n\terrutils.Elogf(\"Can not open template file %s for reading: %v\", src, err)\n\tr.Text2File(string(b), dst, vars, nounset, noempty)\n\treturn\n}", "func run(path string, overwrite bool) error {\n\tfi, err := os.Stat(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// Look for a .gunkconfig\n\tabsPath, _ := filepath.Abs(path)\n\tcfg, err := config.Load(filepath.Dir(absPath))\n\tvar cfgProtocPath, cfgProtocVer, importPath string\n\tif err == nil {\n\t\timportPath = filepath.Join(cfg.Dir, cfg.ImportPath)\n\t\tcfgProtocPath = cfg.ProtocPath\n\t\tcfgProtocVer = cfg.ProtocVersion\n\t}\n\tprotocPath, err := downloader.CheckOrDownloadProtoc(cfgProtocPath, cfgProtocVer)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// Determine whether the path is a file or a directory.\n\t// If it is a file convert the file.\n\tif !fi.IsDir() {\n\t\treturn convertFile(path, overwrite, importPath, protocPath)\n\t}\n\t// If the path is a directory and has a .proto extension then error.\n\tif filepath.Ext(path) == \".proto\" {\n\t\treturn fmt.Errorf(\"%s is a directory, should be a proto file\", path)\n\t}\n\t// Handle the case where it is a directory. Loop through\n\t// the files and if we have a .proto file attempt to\n\t// convert it.\n\tfiles, err := ioutil.ReadDir(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, f := range files {\n\t\t// If the file is not a .proto file\n\t\tif f.IsDir() || filepath.Ext(f.Name()) != \".proto\" {\n\t\t\tcontinue\n\t\t}\n\t\tif err := convertFile(filepath.Join(path, f.Name()), overwrite, importPath, protocPath); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (s *Search) convert2FileLinks(files []*Ed2kFileStruct) []*com.Ed2kFileLink {\n\tvar fileLinks []*com.Ed2kFileLink\n\tfor _, file := range files {\n\t\tlink := s.convert2FileLink(file)\n\t\tif link != nil {\n\t\t\tfileLinks = append(fileLinks, link)\n\t\t}\n\t}\n\n\treturn fileLinks\n}", "func converttga(fname string) {\n\tdefer wg.Done()\n\tfile, err := os.Open(fname)\n\tif err != nil {\n\t\tfmt.Println(\"Could not open file: \", fname, \"\\n>>\", err.Error())\n\t\treturn\n\t}\n\n\timg, err := tga.Decode(file)\n\tif err != nil {\n\t\tfmt.Println(\"Error decoding file: \", fname, \"\\n>>\", err.Error())\n\t\treturn\n\t}\n\n\tnewname := getfinaldir(fname)\n\tpngfile, err := os.Create(newname)\n\tif err != nil {\n\t\tfmt.Println(\"Error, could not create file: \", newname)\n\t\treturn\n\t}\n\terr = png.Encode(pngfile, img)\n\tif err != nil {\n\t\tfmt.Println(\"Could not Encode file to png: \", fname)\n\t}\n}", "func ConvertFileInfo(\n\tin os.FileInfo,\n\tsymlinkTarget string) (out *FileInfo, err error) {\n\tout, err = convertFileInfo(in, symlinkTarget, gUserRegistry, gGroupRegistry)\n\treturn\n}", "func Convert(opts Options) error {\n\t// Set default options where appropriate.\n\tif opts.Path == \"\" {\n\t\topts.Path = \".\"\n\t}\n\tif opts.Writer == nil {\n\t\topts.Writer = os.Stdout\n\t}\n\n\tservices := disco.NewWithCredentialsSource(noCredentials{})\n\tmoduleStorage := module.NewStorage(filepath.Join(command.DefaultDataDir, \"modules\"), services)\n\n\tmod, err := module.NewTreeModule(\"\", opts.Path)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"creating tree module\")\n\t}\n\n\tif err = mod.Load(moduleStorage); err != nil {\n\t\treturn errors.Wrapf(err, \"loading module\")\n\t}\n\n\tgs, err := buildGraphs(mod, true, opts)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"importing Terraform project graphs\")\n\t}\n\n\t// Filter resource name properties if requested.\n\tif opts.FilterResourceNames {\n\t\tfilterAutoNames := opts.ResourceNameProperty == \"\"\n\t\tfor _, g := range gs {\n\t\t\tfor _, r := range g.Resources {\n\t\t\t\tif !r.IsDataSource {\n\t\t\t\t\til.FilterProperties(r, func(key string, _ il.BoundNode) bool {\n\t\t\t\t\t\tif filterAutoNames {\n\t\t\t\t\t\t\tsch := r.Schemas().PropertySchemas(key).Pulumi\n\t\t\t\t\t\t\treturn sch == nil || sch.Default == nil || !sch.Default.AutoNamed\n\t\t\t\t\t\t}\n\t\t\t\t\t\treturn key != opts.ResourceNameProperty\n\t\t\t\t\t})\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Annotate nodes with the location of their original definition if requested.\n\tif opts.AnnotateNodesWithLocations {\n\t\tfor _, g := range gs {\n\t\t\taddLocationAnnotations(g)\n\t\t}\n\t}\n\n\tgenerator, err := newGenerator(\"auto\", opts)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"creating generator\")\n\t}\n\n\tif err = gen.Generate(gs, generator); err != nil {\n\t\treturn errors.Wrapf(err, \"generating code\")\n\t}\n\n\treturn nil\n}", "func (c *Client) Convert(reader io.Reader, width uint) (string, error) {\n\timage, err := ioutil.ReadAll(reader)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn c.ConvertBytes(image, width)\n}", "func NewConvertCmd() *cobra.Command {\n\tconvertCmd.Flags().Bool(\"noindent\", false, \"skip format json\")\n\treturn convertCmd\n}", "func Convert(opts Options) (map[string][]byte, Diagnostics, error) {\n\t// Set default options where appropriate.\n\tif opts.Root == nil {\n\t\tcwd, err := os.Getwd()\n\t\tif err != nil {\n\t\t\treturn nil, Diagnostics{}, err\n\t\t}\n\t\topts.Root = afero.NewBasePathFs(afero.NewOsFs(), cwd)\n\t}\n\tif opts.ProviderInfoSource == nil {\n\t\topts.ProviderInfoSource = il.PluginProviderInfoSource\n\t}\n\n\t// Attempt to load the config as TF11 first. If this succeeds, use TF11 semantics unless either the config\n\t// or the options specify otherwise.\n\tgeneratedFiles, useTF12, tf11Err := convertTF11(opts)\n\tif !useTF12 {\n\t\tif tf11Err != nil {\n\t\t\treturn nil, Diagnostics{}, tf11Err\n\t\t}\n\t\treturn generatedFiles, Diagnostics{}, nil\n\t}\n\n\tvar tf12Files []*syntax.File\n\tvar diagnostics hcl.Diagnostics\n\n\tif tf11Err == nil {\n\t\t// Parse the config.\n\t\tparser := syntax.NewParser()\n\t\tfor filename, contents := range generatedFiles {\n\t\t\terr := parser.ParseFile(bytes.NewReader(contents), filename)\n\t\t\tcontract.Assert(err == nil)\n\t\t}\n\t\tif parser.Diagnostics.HasErrors() {\n\t\t\treturn nil, Diagnostics{All: parser.Diagnostics, files: parser.Files}, nil\n\t\t}\n\t\ttf12Files, diagnostics = parser.Files, append(diagnostics, parser.Diagnostics...)\n\t} else {\n\t\tfiles, diags := parseTF12(opts)\n\t\tif !diags.HasErrors() {\n\t\t\ttf12Files, diagnostics = files, append(diagnostics, diags...)\n\t\t} else if opts.TerraformVersion != \"11\" {\n\t\t\treturn nil, Diagnostics{All: diags, files: files}, nil\n\t\t} else {\n\t\t\treturn nil, Diagnostics{}, tf11Err\n\t\t}\n\t}\n\n\ttf12Files, program, programDiags, err := convertTF12(tf12Files, opts)\n\tif err != nil {\n\t\treturn nil, Diagnostics{}, err\n\t}\n\n\tdiagnostics = append(diagnostics, programDiags...)\n\tif diagnostics.HasErrors() {\n\t\treturn nil, Diagnostics{All: diagnostics, files: tf12Files}, nil\n\t}\n\n\tswitch opts.TargetLanguage {\n\tcase LanguageTypescript:\n\t\ttsFiles, genDiags, _ := hcl2nodejs.GenerateProgram(program)\n\t\tgeneratedFiles, diagnostics = tsFiles, append(diagnostics, genDiags...)\n\tcase LanguagePulumi:\n\t\tgeneratedFiles = map[string][]byte{}\n\t\tfor _, f := range tf12Files {\n\t\t\tgeneratedFiles[f.Name] = f.Bytes\n\t\t}\n\tcase LanguagePython:\n\t\tpyFiles, genDiags, _ := hcl2python.GenerateProgram(program)\n\t\tgeneratedFiles, diagnostics = pyFiles, append(diagnostics, genDiags...)\n\tcase LanguageCSharp:\n\t\tcsFiles, genDiags, _ := hcl2dotnet.GenerateProgram(program)\n\t\tgeneratedFiles, diagnostics = csFiles, append(diagnostics, genDiags...)\n\tcase LanguageGo:\n\t\tgoFiles, genDiags, _ := hcl2go.GenerateProgram(program)\n\t\tgeneratedFiles, diagnostics = goFiles, append(diagnostics, genDiags...)\n\t}\n\n\tif diagnostics.HasErrors() {\n\t\treturn nil, Diagnostics{All: diagnostics, files: tf12Files}, nil\n\t}\n\n\treturn generatedFiles, Diagnostics{All: diagnostics, files: tf12Files}, nil\n}", "func (c *CLI) Run(args []string) int {\n\tflags := flag.NewFlagSet(\"convert\", flag.ContinueOnError)\n\tflags.SetOutput(c.errStream)\n\tflags.StringVar(&from, \"from\", \"jpg\",\n\t\t\"input file format (support: jpg/png/gif, default: jpg)\")\n\tflags.StringVar(&from, \"f\", \"jpg\",\n\t\t\"input file format (support: jpg/png/gif, default: jpg)\")\n\tflags.StringVar(&to, \"to\", \"png\",\n\t\t\"output file format (support: jpg/png/gif, default: png)\")\n\tflags.StringVar(&to, \"t\", \"png\",\n\t\t\"output file format (support: jpg/png/gif, default: png)\")\n\tflags.Parse(args[1:])\n\tpath := flags.Arg(0)\n\n\tconverter := imageconv.NewConverter(path, from, to)\n\tfmt.Println(converter)\n\terr := filepath.Walk(converter.Path, converter.CrawlFile)\n\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\tos.Exit(1)\n\t}\n\n\tqueue := make(chan imageconv.ImageFile)\n\tfor _, image := range converter.Images {\n\t\twg.Add(1)\n\t\tgo converter.GetImages(queue, &wg)\n\t\tqueue <- image\n\t}\n\n\tclose(queue)\n\twg.Wait()\n\n\treturn ExitCodeOK\n\n}", "func processArgs(args []string) string {\r\n\r\n\toriginFile = args[1]\r\n\tfileNameWithExtension := strings.Split(path.Base(originFile), \".\")\r\n\tfileName = fileNameWithExtension[0]\r\n\tdirInputFlag = 0\r\n\ttargetFormat = \"txt\"\r\n\ttargetDir, dirErr := os.Getwd()\r\n\tfmt.Println(\"Dir selected\", targetDir)\r\n\tif dirErr != nil {\r\n\t\tlog.Fatal(dirErr)\r\n\t}\r\n\r\n\tfor i := range args {\r\n\r\n\t\tif strings.ToUpper(args[i]) == \"-O\" {\r\n\t\t\tdirInputFlag = 1\r\n\t\t\ttargetDir = args[i+1]\r\n\t\t}\r\n\r\n\t\tif strings.ToUpper(args[i]) == \"-T\" {\r\n\t\t\ttargetFormat = args[i+1]\r\n\t\t}\r\n\t}\r\n\r\n\treturn targetDir\r\n}", "func ConvertImage(img image.Image, format string) {\n\tf, err := os.Create(fmt.Sprintf(\"rome.%s\", format))\n\tif err != nil {\n\t\tlog.Panicln(\"Could not create file\")\n\t}\n\tdefer f.Close()\n\tswitch format {\n\tcase \"png\":\n\t\tpng.Encode(f, img)\n\tcase \"jpg\":\n\t\tjpeg.Encode(f, img, &jpeg.Options{Quality: *quality})\n\tcase \"webp\":\n\t\twebp.Encode(f, img, &webp.Options{Lossless: false, Quality: float32(*quality)})\n\tdefault:\n\t\tlog.Panicln(\"Format not supported\")\n\t}\n}", "func comandoMKFILE(comando string) {\n\tfmt.Println(\"\\nEJECUTANDO: \" + comando)\n\tif strings.Compare(comando, \"\") != 0 {\n\t\ts := strings.Split(comando, \" -\")\n\t\tatribP := 0\n\t\tatribPath := \"\"\n\t\tatribID := \"\"\n\t\tatribSize:= 0\n\t\tatribCont := \"\"\n\t\tif len(s) > 2 {\n\t\t\tfor i := 1; i < len(s); i++ {\n\t\t\t\ts2 := strings.Split(s[i], \"->\")\n\t\t\t\tif len(s2) > 0 {\n\t\t\t\t\tif len(s2) == 1 {\n\t\t\t\t\t\t//parametro p\n\t\t\t\t\t\tatribP = 1\n\t\t\t\t\t} else if len(s2) > 1 {\n\t\t\t\t\t\tswitch strings.ToLower(strings.TrimSpace(s2[0])) {\n\t\t\t\t\t\tcase \"id\":\n\t\t\t\t\t\t\tatribID = strings.ToLower(strings.TrimSpace(s2[1]))\n\t\t\t\t\t\tcase \"path\":\n\t\t\t\t\t\t\tatribPath = strings.ToLower(strings.TrimSpace(strings.ReplaceAll(s2[1], \"\\\"\", \"\")))\n\t\t\t\t\t\tcase \"size\":\n\t\t\t\t\t\t\tif strings.Contains(s2[1], \"-\"){\n\t\t\t\t\t\t\t\tfmt.Println(\"RESULTADO: El tamano del archivo no puede ser negativo\")\n\t\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tatribSize = atributoSize(strings.ToLower(strings.TrimSpace(strings.ReplaceAll(s2[1], \"\\\"\", \"\"))))\n\t\t\t\t\t\tcase \"cont\":\n\t\t\t\t\t\t\tatribCont = strings.TrimSpace(strings.ReplaceAll(s2[1], \"\\\"\", \"\"))\n\t\t\t\t\t\tdefault:\n\t\t\t\t\t\t\tfmt.Println(\"RESULTADO: Parametro no permitido para el comando MKFILE\")\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\t/*fmt.Println(atribP)\n\t\t\tfmt.Println(atribPath)\n\t\t\tfmt.Println(atribId)*/\n\t\t\tif strings.Compare(atribPath, \"\") != 0 {\n\t\t\t\tif strings.Compare(atribID, \"\") != 0 {\n\t\t\t\t\tcrearFile(atribID, atribPath, atribP, int64(atribSize), atribCont)\n\t\t\t\t} else {\n\t\t\t\t\tfmt.Println(\"RESULTADO: Debe ingresar el id de la particion en la que desea crear el archivo\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tfmt.Println(\"RESULTADO: Debe ingresar la ruta del archivo a crear\")\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\tfmt.Println(\"RESULTADO: Faltan parametros obligatorios para el comando MKDIR\")\n\t\t}\n\t}\n}", "func ConvertPath(c *Client, path string) (*Response, error) {\n\tf, err := os.Open(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer f.Close()\n\n\treturn c.Convert(f, f.Name())\n}", "func ConvertRaw(r io.Reader, w io.Writer, apikey string) {\n\tstart := time.Now()\n\tfmt.Fprintln(os.Stderr, \"starting.\")\n\n\tg := FullyProcessRaw(r, apikey)\n\n\tfmt.Fprintln(os.Stderr, \"writing fully processed graph to output Writer.\")\n\tw.Write([]byte(g.String()))\n\n\tfmt.Fprintf(os.Stderr, \"done. took %s.\\n\", time.Since(start))\n}", "func ConvertImageCommand(args []string) (*ImageCommandResult, error) {\n\tsize := len(args)\n\n\tcmdArr := make([]*C.char, size)\n\tfor i, s := range args {\n\t\tcmdArr[i] = C.CString(s)\n\t}\n\n\tempty := C.CString(\"\")\n\tmetaStr := C.AcquireString(empty)\n\tC.free(unsafe.Pointer(empty))\n\n\tdefer func() {\n\t\tfor i := range cmdArr {\n\t\t\tC.free(unsafe.Pointer(cmdArr[i]))\n\t\t}\n\n\t\tC.DestroyString(metaStr)\n\t}()\n\n\timageInfo := newImageInfo()\n\n\tvar exc *C.ExceptionInfo = C.AcquireExceptionInfo()\n\tdefer C.DestroyExceptionInfo(exc)\n\n\tok := C.ConvertImageCommand(\n\t\timageInfo.info,\n\t\tC.int(size), // argc\n\t\t&cmdArr[0], // argv\n\t\t&metaStr, // metadata\n\t\texc, // exception\n\t)\n\tif C.int(ok) == 0 {\n\t\timageInfo.Destroy()\n\t\treturn nil, newExceptionInfo(exc)\n\t}\n\n\tret := &ImageCommandResult{\n\t\tInfo: imageInfo,\n\t\tMeta: C.GoString(metaStr),\n\t}\n\treturn ret, nil\n}", "func jfCvt(ctx *cli.Context, f convert) error {\n\targv := ctx.RootArgv().(*rootT)\n\t//fmt.Printf(\"[%s]:\\n %+v\\n %v\\n\", ctx.Command().Name, argv, ctx.Args())\n\n\t// input data\n\tvar dd string\n\tif ctx.IsSet(\"--in\") { // -i,--in option is specified\n\t\tdata, err := ioutil.ReadAll(argv.Filei)\n\t\tabortOn(\"Input\", err)\n\t\targv.Filei.Close()\n\t\tdd = string(data)\n\t} else {\n\t\tdd = strings.Join(ctx.Args(), \" \")\n\t\tif dd == \"\" {\n\t\t\tctx.WriteUsage()\n\t\t\tos.Exit(0)\n\t\t}\n\t}\n\n\tfmt.Printf(f(dd))\n\treturn nil\n}", "func NewConvertCommand(reportLocation string, containerized bool) *ConvertCommand {\n\tcmd := &ConvertCommand{\n\t\tCommand: Command{\n\t\t\treportLocation: reportLocation,\n\t\t\tVersion: OVConvertCommand, //convert command 'results' version (report and artifacts)\n\t\t\tType: command.Convert,\n\t\t\tState: command.StateUnknown,\n\t\t},\n\t}\n\n\tcmd.Command.init(containerized)\n\treturn cmd\n}", "func ToFile(path ...string) Dest {\n\treturn ToModeFile(0600, path...)\n}", "func parseConvert(option, value string) (*Option, error) {\n\tsplitoption := strings.Fields(option)\n\n\tif len(splitoption) == 0 {\n\t\treturn nil, fmt.Errorf(\"there is an unspecified convert option at an unknown line\")\n\t} else if len(splitoption) == 1 || len(splitoption) > 2 {\n\t\treturn nil, fmt.Errorf(\"there is a misconfigured convert option: %q.\\nIs it in format <option>:<whitespaces><regex><whitespaces><regex>?\", option)\n\t}\n\n\tfuncRe, err := regexp.Compile(\"^\" + splitoption[0] + \"$\")\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"an error occurred compiling the regex for the first field in the convert option: %q\\n%v\", option, err)\n\t}\n\n\tfieldRe, err := regexp.Compile(\"^\" + splitoption[1] + \"$\")\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"an error occurred compiling the regex for the second field in the convert option: %q\\n%v\", option, err)\n\t}\n\n\treturn &Option{\n\t\tCategory: categoryConvert,\n\t\tRegex: map[int]*regexp.Regexp{0: funcRe, 1: fieldRe},\n\t\tValue: value,\n\t}, nil\n}", "func (t *TransformCmd) Run() error {\n\n\tjt, err := jsont.NewJSONTransformerWithFile(t.transFile)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, f := range t.inFiles {\n\n\t\toutData, err := jt.TransformWithFile(f)\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfileName := filepath.Base(f)\n\t\tdotTokens := strings.Split(fileName, \".\")\n\t\toutFileName := strings.Join(dotTokens[:len(dotTokens)-1], \".\") + \".out.\" + dotTokens[len(dotTokens)-1]\n\n\t\toutFile := filepath.Join(t.outDir, outFileName)\n\n\t\tif err := ioutil.WriteFile(outFile, outData, 0644); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func formatFile(name string, b []byte) []byte {\n\tsrc, err := format.Source(b)\n\tif err != nil {\n\t\t// Should never happen, but can arise when developing this code.\n\t\t// The user can compile the output to see the error.\n\t\tlog.Printf(\"warning: internal error: invalid Go generated in %s: %s\", name, err)\n\t\tlog.Printf(\"warning: compile the package to analyze the error\")\n\t\treturn b\n\t}\n\treturn src\n}", "func convertDBtoSQL(fileName string) {\n\tdb, err := LoadDBFile(fileName)\n\tif err != nil {\n panic(err)\n }\n defer db.Close()\n\t\n\ttailHeight := lastBlockHeight(db)\n\tdata := convert(db, tailHeight)\n\n\t//fmt.Println(data)\n\n\t// //create new file and write data in the file\n\tfile, err := os.Create(\"dappleyweb.sql\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer file.Close()\n\n\t_, err = file.Write([]byte(data))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}", "func convertPathToAbs(outfile string, target map[string]string) string {\n\towd := target[\"owd\"]\n\tif !filepath.IsAbs(outfile) {\n\t\toutfile = filepath.Join(owd, outfile)\n\t}\n\treturn outfile\n}", "func ToFileType(name string) FileType {\n\tswitch name {\n\tcase \"fasta\":\n\t\treturn FastaFile\n\tcase \"fastq\":\n\t\treturn FastqFile\n\tcase \"gb\", \"genbank\":\n\t\treturn GenBankFile\n\tcase \"emb\", \"embl\":\n\t\treturn EMBLFile\n\tdefault:\n\t\treturn DefaultFile\n\t}\n}", "func (c *Converter) convertInclude(include *nast.IncludeDirective) error {\n\n\tc.includecount++\n\tif c.includecount > 20 {\n\t\treturn &parser.Error{\n\t\t\tMessage: \"Error when processing includes: Include-loop detected\",\n\t\t\tStartPosition: ast.NewPosition(\"\", 1, 1),\n\t\t\tEndPosition: ast.NewPosition(\"\", 20, 70),\n\t\t}\n\t}\n\n\tfilesnames := make([]string, 1)\n\tfilesnames[0] = include.File\n\n\tfile, err := c.getIncludedFile(include)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tp := NewParser().(*Parser)\n\tp.SetFilename(include.File)\n\tparsed, err := p.Parse(file)\n\tif err != nil {\n\t\t// override the position of the error with the position of the include\n\t\t// this way the error gets displayed at the correct location\n\t\t// the message does contain the original location\n\t\treturn &parser.Error{\n\t\t\tMessage: err.Error(),\n\t\t\tStartPosition: include.Start(),\n\t\t\tEndPosition: include.End(),\n\t\t}\n\t}\n\n\tif usesTimeTracking(parsed) {\n\t\tc.usesTimeTracking = true\n\t}\n\n\treplacements := make([]ast.Node, len(parsed.Elements))\n\tfor i := range parsed.Elements {\n\t\treplacements[i] = parsed.Elements[i]\n\t}\n\treturn ast.NewNodeReplacement(replacements...)\n}", "func convert(from, to runtime.Object) error {\n\tb, err := json.Marshal(from)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn json.Unmarshal(b, &to)\n}", "func (a assets) format() error {\n\tfor path, content := range a.files {\n\t\tsrc, err := imports.Process(path, content, nil)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"format file %s: %w\", path, err)\n\t\t}\n\t\tif err := os.WriteFile(path, src, 0644); err != nil {\n\t\t\treturn fmt.Errorf(\"write file %s: %w\", path, err)\n\t\t}\n\t}\n\treturn nil\n}", "func (c *ContactService) Convert(contact *Contact, user *User) (User, error) {\n\treturn c.Repository.convert(contact, user)\n}", "func (e ext) ConvertExt(v interface{}) interface{} {\n\tpanic(\"ConvertExt not supported\")\n}", "func (a *Arguments) Convert() *blackbox_exporter.Config {\n\treturn &blackbox_exporter.Config{\n\t\tBlackboxConfigFile: a.ConfigFile,\n\t\tBlackboxConfig: a.ConfigStruct,\n\t\tBlackboxTargets: a.Targets.Convert(),\n\t\tProbeTimeoutOffset: a.ProbeTimeoutOffset.Seconds(),\n\t}\n}", "func (app *adapter) ToFile(ins *JSONFile) (File, error) {\n\treturn createFileFromJSON(ins)\n}", "func Run(path string, overwrite bool) error {\n\tif filepath.Ext(path) != \".proto\" {\n\t\treturn fmt.Errorf(\"convert requires a .proto file\")\n\t}\n\treader, err := os.Open(path)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to read file %q: %v\", path, err)\n\t}\n\tdefer reader.Close()\n\n\t// Parse the proto file.\n\tparser := proto.NewParser(reader)\n\td, err := parser.Parse()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to parse proto file %q: %v\", path, err)\n\t}\n\n\tfileToWrite := strings.Replace(filepath.Base(path), \".proto\", \".gunk\", 1)\n\tfullpath := filepath.Join(filepath.Dir(path), fileToWrite)\n\n\tif _, err := os.Stat(fullpath); !os.IsNotExist(err) && !overwrite {\n\t\treturn fmt.Errorf(\"path already exists %q, use --overwrite\", fullpath)\n\t}\n\n\t// Start converting the proto declarations to gunk.\n\tb := builder{}\n\tfor _, e := range d.Elements {\n\t\tif err := b.handleProtoType(e); err != nil {\n\t\t\treturn fmt.Errorf(\"%v\\n\", err)\n\t\t}\n\t}\n\n\t// Convert the proto package and imports to gunk.\n\ttranslatedPkg := b.handlePackage()\n\ttranslatedImports := b.handleImports()\n\n\t// Add the converted package and imports, and then\n\t// add all the rest of the converted types. This will\n\t// keep the order that things were declared.\n\tw := &strings.Builder{}\n\tw.WriteString(translatedPkg)\n\tw.WriteString(\"\\n\\n\")\n\tw.WriteString(translatedImports)\n\tw.WriteString(\"\\n\")\n\tfor _, l := range b.translatedDeclarations {\n\t\tw.WriteString(\"\\n\")\n\t\tw.WriteString(l)\n\t\tw.WriteString(\"\\n\")\n\t}\n\n\t// TODO: We should run this through the Gunk generator to\n\t// make sure that it compiles?\n\n\tresult := []byte(w.String())\n\tresult, err = format.Source(result)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := ioutil.WriteFile(fullpath, result, 0644); err != nil {\n\t\treturn fmt.Errorf(\"unable to write to file %q: %v\", fullpath, err)\n\t}\n\n\treturn nil\n}", "func convertFileInfo(\n\tin os.FileInfo,\n\tsymlinkTarget string,\n\tuserRegistry sys.UserRegistry,\n\tgroupRegistry sys.GroupRegistry) (out *FileInfo, err error) {\n\t// Grab system-specific info.\n\tstatT, ok := in.Sys().(*syscall.Stat_t)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"Unexpected sys value: %v\", in.Sys())\n\t}\n\n\tif statT.Size < 0 {\n\t\tpanic(fmt.Sprintf(\"Unexpected size: %d\", statT.Size))\n\t}\n\n\t// Create the basic struct.\n\tout = &FileInfo{\n\t\tName: in.Name(),\n\t\tPermissions: in.Mode() & permissionBits,\n\t\tUid: sys.UserId(statT.Uid),\n\t\tGid: sys.GroupId(statT.Gid),\n\t\tMTime: in.ModTime(),\n\t\tSize: uint64(statT.Size),\n\t\tContainingDevice: statT.Dev,\n\t\tInode: statT.Ino,\n\t\tTarget: symlinkTarget,\n\t}\n\n\t// Attempt to look up user info.\n\tusername, err := userRegistry.FindById(out.Uid)\n\n\tif _, ok := err.(sys.NotFoundError); ok {\n\t\terr = nil\n\t} else if err != nil {\n\t\treturn nil, fmt.Errorf(\"Looking up user: %v\", err)\n\t} else {\n\t\tout.Username = &username\n\t}\n\n\t// Attempt to look up group info.\n\tgroupname, err := groupRegistry.FindById(out.Gid)\n\n\tif _, ok := err.(sys.NotFoundError); ok {\n\t\terr = nil\n\t} else if err != nil {\n\t\treturn nil, fmt.Errorf(\"Looking up group: %v\", err)\n\t} else {\n\t\tout.Groupname = &groupname\n\t}\n\n\t// Convert the type.\n\ttypeBits := in.Mode() & (os.ModeType | os.ModeCharDevice)\n\tswitch typeBits {\n\tcase 0:\n\t\tout.Type = TypeFile\n\tcase os.ModeDir:\n\t\tout.Type = TypeDirectory\n\tcase os.ModeSymlink:\n\t\tout.Type = TypeSymlink\n\tcase os.ModeDevice:\n\t\tout.Type = TypeBlockDevice\n\t\tout.DeviceNumber = statT.Rdev\n\tcase os.ModeDevice | os.ModeCharDevice:\n\t\tout.Type = TypeCharDevice\n\t\tout.DeviceNumber = statT.Rdev\n\tcase os.ModeNamedPipe:\n\t\tout.Type = TypeNamedPipe\n\tcase os.ModeSocket:\n\t\tout.Type = TypeSocket\n\tdefault:\n\t\treturn out, fmt.Errorf(\"Unhandled mode: %v\", in.Mode())\n\t}\n\n\treturn out, nil\n}", "func CreateFromFile(path string) (*Converter, error) {\n\tvar data, err = ioutil.ReadFile(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar dict map[string]string\n\terr = json.Unmarshal(data, &dict)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn createFromDict(dict)\n}", "func formatSourceAndWrite(resFileName string, buf *bytes.Buffer) error {\n\tout, err := os.Create(resFileName)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error while create result file %w\", err)\n\t}\n\n\tp, err := format.Source(buf.Bytes())\n\tif err != nil {\n\t\tos.Remove(resFileName)\n\t\treturn err\n\t}\n\t_, err = out.Write(p)\n\tif err != nil {\n\t\tos.Remove(resFileName)\n\t\treturn err\n\t}\n\treturn nil\n}", "func (r *RenderEnv) Text2File(text, dst string, vars *variables.Variables, nounset, noempty bool) (err error) {\n\ttd := os.Getenv(\"TEMP\")\n\tf, err := ioutil.TempFile(td, \"prjstart-*\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Text2File: %w\", err)\n\t}\n\n\tresult, err := r.Text2String(text, vars, nounset, noempty)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Text2File: %w\", err)\n\t}\n\n\t_, err = f.WriteString(result)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Text2File: %w\", err)\n\t}\n\n\terr = f.Close()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Text2File: %w\", err)\n\t}\n\terr = os.Rename(f.Name(), dst)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Text2File: %w\", err)\n\t}\n\treturn\n}", "func processFile(file *File) {\n\tapplyTransformers(file)\n\tanalyzeFile(file)\n}", "func (g *Generator) generateConverters(file *FileDescriptor) {\n\tg.file = file\n\n\tjavaClsName := javaConverterName(file)\n\n\tpathComp := append(strings.Split(g.ConverterPackage, \".\"), fmt.Sprintf(\"%s.java\", javaClsName))\n\tg.Reset()\n\tpopulatePbToBeanConverter(g, file, javaClsName)\n\tg.Response.File = append(g.Response.File, &plugin.CodeGeneratorResponse_File{\n\t\tName: proto.String(path.Join(pathComp...)),\n\t\tContent: proto.String(g.String()),\n\t})\n}", "func (conn *protectedConn) convert() error {\n\tconn.mutex.Lock()\n\tfile := os.NewFile(uintptr(conn.socketFd), \"\")\n\t// dup the fd and return a copy\n\tfileConn, err := net.FileConn(file)\n\t// closes the original fd\n\tfile.Close()\n\tconn.socketFd = socketError\n\tif err != nil {\n\t\tconn.mutex.Unlock()\n\t\treturn err\n\t}\n\tconn.Conn = fileConn\n\tconn.mutex.Unlock()\n\treturn nil\n}", "func flac2wav(in os.File, n Namer) (os.File, error) {\n\tout := n(in)\n\tcmd := exec.Command(\"flac\",\n\t\t\"-f\", // overwrite any existing file\n\t\t\"--silent\",\t// output is useless because we are multiplexing goroutines\n\t\t\"-d\", in.Name(), // decode file (input)\n\t\t\"-o\", out.Name()) // output file\n\tcmd.Stderr = os.Stderr\n\tcmd.Stdout = os.Stdout\n\terr := cmd.Run()\n\treturn out, err\n}", "func convertToTarget(dir string, targetNamePrefix string) string {\n\treturn targetNamePrefix + strings.ReplaceAll(dir, \"/\", \"-\")\n}", "func (c *V3Loader) ConvertToIR(composefilepath string, serviceName string) (irtypes.IR, error) {\n\tlogrus.Debugf(\"About to load configuration from docker compose file at path %s\", composefilepath)\n\tconfig, err := ParseV3(composefilepath)\n\tif err != nil {\n\t\tlogrus.Warnf(\"Error while loading docker compose config : %s\", err)\n\t\treturn irtypes.IR{}, err\n\t}\n\tlogrus.Debugf(\"About to start loading docker compose to intermediate rep\")\n\treturn c.convertToIR(filepath.Dir(composefilepath), *config, serviceName)\n}", "func (m *Master) ConvertT() error {\n\treturn m.WriteByte(convertT)\n}", "func (ra *FixedAvro) Convert(message map[string]interface{}) (string, *errs.ParserError) {\n\tconvertedMessage := string(`{\n\t\t\"definitions\" : {\n\t\t \"%s\": %s\n\t\t},\n\t\t\"$ref\" : \"#/definitions/%s\"\n\t}`)\n\n\trefKey := fmt.Sprintf(\"%s:%s\", message[\"type\"].(string), message[\"name\"].(string))\n\tfAvro := NewFixedAvro(message[\"type\"].(string), refKey, message[\"size\"].(float64))\n\n\tjFAvro, _ := json.Marshal(fAvro)\n\ttranslated := fmt.Sprintf(\"%s\", string(jFAvro))\n\treturn fmt.Sprintf(convertedMessage, refKey, translated, refKey), nil\n}", "func convert(excelPath string) {\r\n\tvar listFiles []string\r\n\tvar listNames []string\r\n\tvar firstName, lastName string\r\n\r\n\tlistFiles, listNames = findFiles(excelPath)\r\n\r\n\tfor _, sl := range configSetting.List {\r\n\t\tfor i, ln := range listNames {\r\n\t\t\tif configSetting.IsFileSplit {\r\n\t\t\t\tfirstName = strings.Split(strings.Split(ln, \".\")[0], configSetting.SplitKey)[0]\r\n\t\t\t} else {\r\n\t\t\t\tfirstName = strings.Split(ln, \".\")[0]\r\n\t\t\t}\r\n\t\t\tlastName = strings.Split(ln, \".\")[len(strings.Split(ln, \".\"))-1]\r\n\t\t\tif sl.XlBook == firstName && (lastName == \"xlsx\" || lastName == \"xls\" || lastName == \"xlsm\") {\r\n\t\t\t\txlsxData, err := readXlsx(listFiles[i], sl.XlSheet, sl.MaxRows, sl.Keys)\r\n\t\t\t\tif err != nil {\r\n\t\t\t\t\tfmt.Println(\"[WARNING]excel data is wrong:\", err)\r\n\t\t\t\t\tcontinue\r\n\t\t\t\t}\r\n\t\t\t\tif _, ok := configMap[sl.Json]; ok {\r\n\t\t\t\t\tconfigMap[sl.Json] = append(configMap[sl.Json], xlsxData...)\r\n\t\t\t\t} else {\r\n\t\t\t\t\tconfigMap[sl.Json] = xlsxData\r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n}", "func outputOHIJmxFile(filename string, d []*domainOutput) {\n\tlog.Info(\"New File: \" + filename + \".new\\n\")\n\tm, err := yaml.Marshal(&collectOutput{Collect: d})\n\tif err != nil {\n\t\tfmt.Printf(\"error: %v\", err)\n\t}\n\tfmt.Printf(\"%s\", string(m))\n}", "func GetPDFConv() []byte {\n\tscript := []byte(`\nimport Quartz as Quartz\nfrom CoreFoundation import NSImage\nfrom os.path import realpath, basename\nfrom sys import argv\n\n\ndef png_to_pdf(args):\n image = NSImage.alloc().initWithContentsOfFile_(args[0])\n page_init = Quartz.PDFPage.alloc().initWithImage_(image)\n pdf = Quartz.PDFDocument.alloc().initWithData_(page_init.dataRepresentation())\n\n for index, file_path in enumerate(args[1:]):\n image = NSImage.alloc().initWithContentsOfFile_(file_path)\n page_init = Quartz.PDFPage.alloc().initWithImage_(image)\n pdf.insertPage_atIndex_(page_init, index + 1)\n\n pdf.writeToFile_(realpath(__file__)[:-len(basename(__file__))] + 'aggr.pdf')\n\n\nif __name__ == '__main__':\n\tpng_to_pdf(argv[1:])\n`)\n\treturn bytes.ReplaceAll(script, []byte{0x09}, []byte{0x20, 0x20, 0x20, 0x20})\n}", "func Convert(markdownText string) string {\n\treturn string(blackfriday.Run(\n\t\t[]byte(markdownText),\n\t\tblackfriday.WithRenderer(&confluenceRenderer{}),\n\t))\n}", "func newConverterManager(archive string, ignoreEmpty bool) (*ConverterManager, error) {\n\t// build dir path values\n\tpwd, err := os.Getwd()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tt := time.Now()\n\toRoot := filepath.Join(\n\t\tpwd,\n\t\tfmt.Sprintf(\"medium-to-hugo_%d%02d%02d_%02d%02d\", t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute()))\n\toIn := filepath.Join(oRoot, \"in\")\n\toOut := filepath.Join(oRoot, \"out\")\n\n\t// create the directories\n\t// 1. root and output directories\n\terr = os.MkdirAll(oRoot, os.ModePerm)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = os.MkdirAll(oOut, os.ModePerm)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpostsPath := filepath.Join(oOut, HContentType)\n\timagesPath := filepath.Join(postsPath, HImagesDirName)\n\n\t// 2. input dir, unzip will create and extract contents\n\tfiles, err := unzipFile(archive, oIn)\n\tif err != nil || len(files) == 0 {\n\t\treturn nil, fmt.Errorf(\"couldn't extract archive: %s => %s\", archive, err)\n\t}\n\n\tmediumPosts := filepath.Join(oIn, \"posts\")\n\texists, _ := fileExists(mediumPosts)\n\tif !exists {\n\t\treturn nil, fmt.Errorf(\"couldn't find posts content in the medium extract archive: %s\", oIn)\n\t}\n\n\t// create a markdown converter\n\top := md.Options{\n\t\tCodeBlockStyle: \"fenced\",\n\t}\n\tconverter := md.NewConverter(\"\", true, &op)\n\t// don't remove br tags\n\tconverter.Keep(\"br\")\n\tconverter.AddRules(ruleOverrides...)\n\n\tmgr := &ConverterManager{\n\t\tInPath: oIn,\n\t\tMediumPostsPath: mediumPosts,\n\t\tOutputPath: oOut,\n\t\tPostsPath: postsPath,\n\t\tImagesPath: imagesPath,\n\t\tIgnoreEmpty: ignoreEmpty,\n\t\tMDConverter: converter,\n\t}\n\n\treturn mgr, nil\n}", "func convertIfNeeded(content []byte) ([]byte, error) {\n\tversion, err := getVersion(content)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Switch contains FALLTHROUGH to convert from a certain version to the latest.\n\tswitch version {\n\tcase strconv.Itoa(coreutils.GetCliConfigVersion()):\n\t\treturn content, nil\n\tcase \"0\":\n\t\tcontent, err = convertConfigV0toV1(content)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfallthrough\n\tcase \"1\":\n\t\terr = createHomeDirBackup()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\terr = convertCertsDir()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfallthrough\n\tcase \"2\":\n\t\tcontent, err = convertConfigV2toV3(content)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfallthrough\n\tcase \"3\", \"4\":\n\t\tcontent, err = convertConfigV4toV5(content)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfallthrough\n\tcase \"5\":\n\t\tcontent, err = convertConfigV5toV6(content)\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Save config after all conversions (also updates version).\n\tresult := new(Config)\n\terr = json.Unmarshal(content, &result)\n\tif errorutils.CheckError(err) != nil {\n\t\treturn nil, err\n\t}\n\tresult.Version = strconv.Itoa(coreutils.GetCliConfigVersion())\n\terr = saveConfig(result)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcontent, err = json.Marshal(&result)\n\tif err != nil {\n\t\treturn nil, errorutils.CheckError(err)\n\t}\n\treturn content, err\n}", "func processResource(rn *VaultResource, data map[string]interface{}) (err error) {\n\t// step: determine the resource path\n\tfilename := rn.GetFilename()\n\tif !filepath.IsAbs(filename) {\n\t\tfilename = filepath.Join(options.outputDir, filename)\n\t}\n\t// step: format and write the file\n\tswitch rn.format {\n\tcase \"yaml\":\n\t\tfallthrough\n\tcase \"yml\":\n\t\terr = writeYAMLFile(filename, data, rn.fileMode)\n\tcase \"json\":\n\t\terr = writeJSONFile(filename, data, rn.fileMode)\n\tcase \"ini\":\n\t\terr = writeIniFile(filename, data, rn.fileMode)\n\tcase \"csv\":\n\t\terr = writeCSVFile(filename, data, rn.fileMode)\n\tcase \"env\":\n\t\terr = writeEnvFile(filename, data, rn.fileMode)\n\tcase \"cert\":\n\t\terr = writeCertificateFile(filename, data, rn.fileMode)\n\tcase \"txt\":\n\t\terr = writeTxtFile(filename, data, rn.fileMode)\n\tcase \"flatten\":\n\t\terr = writeFlattenFiles(filename, data, rn.fileMode)\n\tcase \"bundle\":\n\t\terr = writeCertificateBundleFile(filename, data, rn.fileMode)\n\tcase \"credential\":\n\t\terr = writeCredentialFile(filename, data, rn.fileMode)\n\tcase \"template\":\n\t\terr = writeTemplateFile(filename, data, rn.fileMode, rn.templateFile)\n\tcase \"aws\":\n\t\terr = writeAwsCredentialFile(filename, data, rn.fileMode)\n\tdefault:\n\t\treturn fmt.Errorf(\"unknown output format: %s\", rn.format)\n\t}\n\t// step: check for an error\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// step: check if we need to execute a command\n\tif rn.execPath != \"\" {\n\t\tglog.V(10).Infof(\"executing the command: %s for resource: %s\", rn.execPath, filename)\n\t\tparts := strings.Split(rn.execPath, \" \")\n\t\tvar args []string\n\t\tif len(parts) > 1 {\n\t\t\targs = parts[1:]\n\t\t} else {\n\t\t\targs = []string{filename}\n\t\t}\n\n\t\tcmd := exec.Command(parts[0], args...)\n\t\tcmd.Start()\n\t\ttimer := time.AfterFunc(options.execTimeout, func() {\n\t\t\tif err = cmd.Process.Kill(); err != nil {\n\t\t\t\tglog.Errorf(\"failed to kill the command, pid: %d, error: %s\", cmd.Process.Pid, err)\n\t\t\t}\n\t\t})\n\t\t// step: wait for the command to finish\n\t\terr = cmd.Wait()\n\t\ttimer.Stop()\n\t}\n\n\treturn err\n}", "func (c ConvertOpts) Execute(_ []string) (err error) {\n\tif Opts.Debug {\n\t\tfmt.Fprintf(os.Stderr, \"Options: %#v\\n\", Opts)\n\t}\n\tset, err := mapping.NewMappingSet(Opts.Mapping)\n\tif err != nil {\n\t\treturn\n\t}\n\tif Opts.Debug {\n\t\tfmt.Fprintf(os.Stderr, \"Mapping Set: %#v\\n\", set)\n\t}\n\n\tinputs := mapping.NewInputsFromFlags(Opts.Inputs)\n\n\toutput := operator.NewOperatorOutput(Opts.Target)\n\tset.GenerateOutput(inputs, output)\n\tif Opts.Debug {\n\t\tfmt.Fprintf(os.Stderr, \"OperatorOutput: %#v\\n\", output)\n\t}\n\n\tif Opts.Target == \"\" {\n\t\tfmt.Println(output)\n\t} else {\n\t\tvar data []byte = []byte(output.String())\n\t\tif err = ioutil.WriteFile(Opts.Target, data, 0600); err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\n\treturn nil\n}", "func ImageToPdfGm(w io.Writer, r io.Reader, contentType string) error {\n\t//log.Printf(\"converting image %s to %s\", contentType, destfn)\n\timgtyp := \"\"\n\tif false && contentType != \"\" {\n\t\timgtyp = contentType[strings.Index(contentType, \"/\")+1:] + \":\"\n\t}\n\n\tcmd := exec.Command(*ConfGm, \"convert\", imgtyp+\"-\", \"pdf:-\")\n\t// cmd.Stdin = io.TeeReader(r, os.Stderr)\n\tcmd.Stdin = r\n\tcmd.Stdout = w\n\terrout := bytes.NewBuffer(nil)\n\tcmd.Stderr = errout\n\tlogger.Debug(\"msg\", \"ImageToPdfGm calls\", \"command\", cmd)\n\terr := runWithTimeout(cmd)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"gm convert exited %s while converting %s: %s\",\n\t\t\terr, r, errout.Bytes())\n\t}\n\tif len(errout.Bytes()) > 0 {\n\t\tlogger.Warn(\"msg\", \"gm convert\", \"r\", r, \"stderr\", errout.String())\n\t}\n\treturn nil\n}", "func convertSCIPToLSIFGraph(out *output.Output, inputFile, outputFile string) error {\n\tif out != nil {\n\t\tout.Writef(\"%s Converting %s into %s\", output.EmojiInfo, inputFile, outputFile)\n\t}\n\ttmp, err := os.Create(outputFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer tmp.Close()\n\n\tdata, err := os.ReadFile(inputFile)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"failed to read SCIP index '%s'\", inputFile)\n\t}\n\tindex := scip.Index{}\n\terr = proto.Unmarshal(data, &index)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"failed to parse protobuf file '%s'\", inputFile)\n\t}\n\tels, err := scip.ConvertSCIPToLSIF(&index)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"failed to convert SCIP index at '%s' to LSIF\", inputFile)\n\t}\n\terr = scip.WriteNDJSON(scip.ElementsToJsonElements(els), tmp)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"failed to write LSIF JSON output to '%s'\", tmp.Name())\n\t}\n\terr = tmp.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func preprocess(filename, preprocessor string) ([]byte, error) {\n\tcc := exec.Command(preprocessor, \"-C\", \"-E\", \"-DC2GO\", filename)\n\tdata, err := cc.Output()\n\tif exitErr, ok := err.(*exec.ExitError); ok {\n\t\treturn nil, fmt.Errorf(\"%s\", exitErr.Stderr)\n\t}\n\treturn data, err\n}", "func convert(img image.Image, w, h int) (*Image, error) {\n\tif img == nil {\n\t\treturn nil, errors.New(\"No image found\")\n\t}\n\t// set output image size\n\twidth := w\n\theight := h\n\tif w <= 0 || h <= 0 {\n\t\twidth = imageWidth\n\t\tsz := img.Bounds()\n\t\theight = (sz.Max.Y * width * 10) / (sz.Max.X * 16)\n\t}\n\timg = resize.Resize(uint(width), uint(height), img, resize.Lanczos3)\n\n\ttable := []byte(ASCII)\n\tbuf := new(bytes.Buffer)\n\n\tfor i := 0; i < height; i++ {\n\t\tfor j := 0; j < width; j++ {\n\t\t\tp := img.At(j, i)\n\t\t\tg := color.GrayModel.Convert(p)\n\t\t\ty, _, _, _ := g.RGBA()\n\t\t\tpos := int(y * 16 / 1 >> 16)\n\t\t\t_ = buf.WriteByte(table[pos])\n\t\t}\n\t\t_ = buf.WriteByte('\\n')\n\t}\n\n\treturn &Image{Data: string(buf.Bytes())}, nil\n}" ]
[ "0.67846465", "0.6675129", "0.63838804", "0.63182414", "0.62801427", "0.62730193", "0.61669016", "0.6140353", "0.6136235", "0.60967994", "0.6027835", "0.5972999", "0.5927276", "0.5762783", "0.565666", "0.56553465", "0.5606723", "0.5505648", "0.5443074", "0.54286844", "0.53785783", "0.53773826", "0.53699625", "0.5341749", "0.5322001", "0.52884287", "0.5287751", "0.52566373", "0.5245365", "0.52330124", "0.52250963", "0.5191454", "0.51826876", "0.5166313", "0.5143168", "0.5125575", "0.5121391", "0.5120439", "0.5110305", "0.5093933", "0.50938374", "0.50605106", "0.5052312", "0.504679", "0.5045446", "0.5006773", "0.49881425", "0.4986135", "0.49690548", "0.4965073", "0.49619043", "0.49291393", "0.4924889", "0.49056724", "0.49028844", "0.48933282", "0.4876883", "0.48604107", "0.48591602", "0.48368344", "0.483157", "0.48174185", "0.47946572", "0.4773039", "0.47622272", "0.47556245", "0.47543323", "0.4752005", "0.47411293", "0.47292447", "0.47290963", "0.47220334", "0.47117865", "0.47117457", "0.47040296", "0.46904394", "0.46720523", "0.4671161", "0.46670297", "0.4659249", "0.4651605", "0.46425736", "0.46370167", "0.4632442", "0.46293902", "0.46266145", "0.46246758", "0.46190497", "0.46136618", "0.46116024", "0.4594982", "0.45927754", "0.4591726", "0.4578515", "0.45754954", "0.45707244", "0.4570536", "0.45641968", "0.45641962", "0.45614907" ]
0.6845794
0
PopCount returns the population count (number of set bits) of x.
func Test17(x uint64) int { return int(pc[byte(x>>(0*8))] + pc[byte(x>>(1*8))] + pc[byte(x>>(2*8))] + pc[byte(x>>(3*8))] + pc[byte(x>>(4*8))] + pc[byte(x>>(5*8))] + pc[byte(x>>(6*8))] + pc[byte(x>>(7*8))]) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func PopCount(x uint64) int {\n\tvar c uint64\n\tfor i := 0; i < 64; i++ {\n\t\tc += (x >> i) & 1\n\t}\n\treturn int(c)\n}", "func popcnt(x uint64) int {\n\t// Code adapted from https://chessprogramming.wikispaces.com/Population+Count.\n\tx = x - ((x >> 1) & k1)\n\tx = (x & k2) + ((x >> 2) & k2)\n\tx = (x + (x >> 4)) & k4\n\tx = (x * kf) >> 56\n\treturn int(x)\n}", "func PopCount(x uint64) int {\n\tn := 0\n\tfor x != 0 {\n\t\tx = x & (x - 1)\n\t\tn++\n\t}\n\treturn n\n}", "func PopCount(x uint64) int {\n\treturn int(\n\t\tpc[byte(x>>(0*8))] +\n\t\t\tpc[byte(x>>(1*8))] +\n\t\t\tpc[byte(x>>(2*8))] +\n\t\t\tpc[byte(x>>(3*8))] +\n\t\t\tpc[byte(x>>(4*8))] +\n\t\t\tpc[byte(x>>(5*8))] +\n\t\t\tpc[byte(x>>(6*8))] +\n\t\t\tpc[byte(x>>(7*8))])\n}", "func PopCount(x uint64) int {\n\treturn int(pc[byte(x>>(0*8))] +\n\t\tpc[byte(x>>(1*8))] +\n\t\tpc[byte(x>>(2*8))] +\n\t\tpc[byte(x>>(3*8))] +\n\t\tpc[byte(x>>(4*8))] +\n\t\tpc[byte(x>>(5*8))] +\n\t\tpc[byte(x>>(6*8))] +\n\t\tpc[byte(x>>(7*8))])\n}", "func PopCount(x uint64) int {\n\treturn int(pc[byte(x>>(0*8))] +\n\t\tpc[byte(x>>(1*8))] +\n\t\tpc[byte(x>>(2*8))] +\n\t\tpc[byte(x>>(3*8))] +\n\t\tpc[byte(x>>(4*8))] +\n\t\tpc[byte(x>>(5*8))] +\n\t\tpc[byte(x>>(6*8))] +\n\t\tpc[byte(x>>(7*8))])\n}", "func PopCount(x uint64) int {\n\treturn int(pc[byte(x>>(0*8))]) +\n\t\tint(pc[byte(x>>(1*8))]) +\n\t\tint(pc[byte(x>>(2*8))]) +\n\t\tint(pc[byte(x>>(3*8))]) +\n\t\tint(pc[byte(x>>(4*8))]) +\n\t\tint(pc[byte(x>>(5*8))]) +\n\t\tint(pc[byte(x>>(6*8))]) +\n\t\tint(pc[byte(x>>(7*8))])\n}", "func PopCount(x uint64) int {\n\tvar sum byte\n\tfor i := uint64(0); i < 8; i++ {\n\t\tsum += pc[byte(x>>(i*8))]\n\t}\n\treturn int(sum)\n}", "func PopCount(x uint64) int {\n\t//fmt.Println(pc)\n\treturn int(pc[byte(x>>(0*8))] +\n\t\tpc[byte(x>>(1*8))] +\n\t\tpc[byte(x>>(2*8))] +\n\t\tpc[byte(x>>(3*8))] +\n\t\tpc[byte(x>>(4*8))] +\n\t\tpc[byte(x>>(5*8))] +\n\t\tpc[byte(x>>(6*8))] +\n\t\tpc[byte(x>>(7*8))])\n}", "func PopCount(num int) int {\n\tres := 0\n\n\tfor i := 0; i < 70; i++ {\n\t\tif ((num >> uint(i)) & 1) == 1 {\n\t\t\tres++\n\t\t}\n\t}\n\n\treturn res\n}", "func PopCount(x uint64) uint64 {\n\tvar _sum byte\n\n\tvar i uint64\n\tfor i < 8 {\n\t\t_sum += pc[byte(x>>(i*8))]\n\t\ti += 1\n\t}\n\treturn uint64(_sum)\n}", "func PopCount(number int) int {\n\tcounts := 0\n\tfor number > 0 {\n\t\tcounts += number & 1\n\t\tnumber = number >> 1\n\t}\n\t\n\treturn counts\n}", "func popcount(x uint64) (n uint64) {\n\tx -= (x >> 1) & 0x5555555555555555\n\tx = (x>>2)&0x3333333333333333 + x&0x3333333333333333\n\tx += x >> 4\n\tx &= 0x0f0f0f0f0f0f0f0f\n\tx *= 0x0101010101010101\n\treturn x >> 56\n}", "func PopCount(num int, ub int) int {\n\tres := 0\n\n\tfor i := 0; i < ub; i++ {\n\t\tif ((num >> uint(i)) & 1) == 1 {\n\t\t\tres++\n\t\t}\n\t}\n\n\treturn res\n}", "func PopulationCount(x byte) int {\n\tcount := 0\n\tfor x != 0 {\n\t\tcount++\n\t\tx &= (x - 1)\n\t}\n\treturn count\n}", "func PopCount(x uint64) int {\n result := 0\n for i := 0; i <= 8; i++ {\n result += int(pc[byte(x>>(i*8))])\n }\n return result\n /*\n return int(pc[byte(x>>(0*8))] +\n pc[byte(x>>(1*8))] +\n pc[byte(x>>(2*8))] +\n pc[byte(x>>(3*8))] +\n pc[byte(x>>(4*8))] +\n pc[byte(x>>(5*8))] +\n pc[byte(x>>(6*8))] +\n pc[byte(x>>(7*8))])*/\n}", "func popcount(x uint64) int {\n\tx -= (x >> 1) & m1 //put count of each 2 bits into those 2 bits\n\tx = (x & m2) + ((x >> 2) & m2) //put count of each 4 bits into those 4 bits\n\tx = (x + (x >> 4)) & m4 //put count of each 8 bits into those 8 bits\n\treturn int((x * h01) >> 56) //returns left 8 bits of x + (x<<8) + (x<<16) + (x<<24) + ...\n}", "func PopCountClears(x uint64) int {\n\tn := 0\n\tfor x != 0 {\n\t\tx = x & (x - 1)\n\t\tn++\n\t}\n\treturn n\n}", "func PopCount(v []byte) int {\n\tif haveSsse3 {\n\t\treturn popCountSsse3(v)\n\t}\n\treturn popCountGeneric(v)\n}", "func PopCountByClear(x int) int {\n\tcounts := 0\n\tfor x > 0 {\n\t\tx = x&(x-1)\n\t\tcounts++\n\t}\n\treturn counts\n}", "func LoopPopCount(x uint64) int {\n\tvar cnt int\n\tvar i byte\n\tfor i = 0; i < 64; i += 8 {\n\t\tcnt += int(pc[byte(x>>i)])\n\t}\n\treturn cnt\n}", "func popCount(bitboard uint64) (count int) {\n\tif bitboard != 0 && (bitboard&bitboard-1 == 0) {\n\t\tcount = 1\n\t} else if bitboard != 0 {\n\t\tfor bitboard != 0 {\n\t\t\tcount++\n\t\t\tbitboard &= bitboard - 1\n\t\t}\n\t}\n\treturn count\n}", "func PopCountByShifting(x uint64) (count int) {\n\tfor i := 0; i < 64; i++ {\n\t\tif x&(1<<i) != 0 {\n\t\t\tcount++\n\t\t}\n\t}\n\treturn\n}", "func PopCountByClearing(x uint64) (count int) {\n\tfor x != 0 {\n\t\tx = x & (x - 1)\n\t\tcount++\n\t}\n\treturn\n}", "func population_count(n uint) (count uint) {\n\tfor i := n; i > 0; { // start from n and continue until zero\n\t\tcount += i & 1 // if the least significant bit is set then add it to the count\n\t\ti >>= 1 // bit shift right, discarding the bit we've just checked\n\t}\n\treturn\n}", "func PopulationCount(scope *Scope, x tf.Output) (y tf.Output) {\n\tif scope.Err() != nil {\n\t\treturn\n\t}\n\topspec := tf.OpSpec{\n\t\tType: \"PopulationCount\",\n\t\tInput: []tf.Input{\n\t\t\tx,\n\t\t},\n\t}\n\top := scope.AddOperation(opspec)\n\treturn op.Output(0)\n}", "func PopCountLoop(x uint64) int {\n\tvar total byte\n\tvar i uint64\n\tfor i = 1; i < 8; i++ {\n\t\ttotal += pc[byte(x>>(i*8))]\n\t}\n\treturn int(total)\n}", "func popcountSet(a *blockAry) uint64 {\n\tc := uint64(0)\n\tfor _, aValue := range *a {\n\t\tc += popcount(aValue)\n\t}\n\treturn c\n}", "func (c *Consumer) PopCount() int32 {\n\tvar popCount int32\n\tfor _, queue := range c.queues {\n\t\tpopCount += queue.popCount\n\t}\n\treturn popCount\n}", "func popcount8(val uint8) uint {\n\treturn emu.Popcount8(val)\n}", "func PopulationCounts(xs []byte) int {\n\tcount := 0\n\tfor _, x := range xs {\n\t\tcount += PopulationCount(x)\n\t}\n\treturn count\n}", "func TestPopCountPerformance(t *testing.T) {\n\tfor i := 0; i < 10; i++ {\n\t\tstart := time.Now()\n\t\tPopCount(0)\n\t\tdu := time.Since(start)\n\t\tfmt.Printf(\"%d: %d\\n\", i, du)\n\t}\n}", "func CountPopRows(ctx context.Context, db SQLHandle, cond PopValues) (count int, err error) {\n\tif _, err = queryWithJSONArgs(ctx, db, func(int) []interface{} { return []interface{}{&count} }, SQLCountPopRows, cond); err != nil {\n\t\treturn 0, formatError(\"CountPopRows\", err)\n\t}\n\treturn count, nil\n}", "func (x *intSet) count() int {\n\tn := 0\n\ty := *x\n\tfor y != 0 {\n\t\ty &= (y - 1)\n\t\tn++\n\t}\n\treturn n\n}", "func (s *Stack) Count() int {\n\treturn s.flips\n}", "func popCntq(uint) (ret uint)", "func (b *Bitset) Count() int {\n\tbitlen := b.bitlength\n\tcount := 0\n\tfor i := 0; i < bitlen; i++ {\n\t\tif b.IsSet(i) {\n\t\t\tcount++\n\t\t}\n\t}\n\treturn count\n}", "func popcountSetXor(a, b *blockAry) uint64 {\n\tc := uint64(0)\n\n\tfor key, aValue := range *a {\n\t\tif bValue, found := (*b)[key]; found {\n\t\t\tc += popcount(aValue ^ bValue)\n\t\t} else {\n\t\t\tc += popcount(aValue)\n\t\t}\n\t}\n\tfor key, bValue := range *b {\n\t\tif _, found := (*a)[key]; !found {\n\t\t\tc += popcount(bValue)\n\t\t}\n\t}\n\n\treturn c\n\n}", "func (g *Grid) Count() int32 {\n\treturn int32(len(g.set))\n}", "func population_count_divideconquer(n uint) (count uint) {\n\ti := uint32(n) - ((uint32(n) >> 1) & 0x55555555)\n i = (i & 0x33333333) + ((i >> 2) & 0x33333333)\n return uint((((i + (i >> 4)) & 0x0f0f0f0f) * 0x01010101) >> 24)\n}", "func population_count_kernighan(n uint) (count uint) {\n\tfor i := n; i > 0; {\n\t\tcount++\n\t\ti = i & (i - 1)\n\t}\n\treturn\n}", "func (s *Stack) Pop() int {\n\tno := s.Peek()\n\ts.x = s.x[:len(s.x)-1]\n\treturn no\n}", "func PopCnt64Before(n uint64, iBit uint32) uint32 {\n\tn = n & ((1 << iBit) - 1)\n\n\tn -= (n >> 1) & m1 // put count of each 2 bits into those 2 bits\n\tn = (n & m2) + ((n >> 2) & m2) // put count of each 4 bits into those 4 bits\n\tn = (n + (n >> 4)) & m4 // put count of each 8 bits into thoes 8 bits\n\n\treturn uint32((n * h01) >> 56) // returns left 8 bits of x + (x << 8) + (x << 16) + (x<<24) + ...\n}", "func OnesCount8(x uint8) int {\n\treturn int(pop8tab[x])\n}", "func popcountSliceGeneric(s []uint64) (n uint64) {\n\tcnt := uint64(0)\n\tfor _, x := range s {\n\t\tx -= (x >> 1) & 0x5555555555555555\n\t\tx = (x>>2)&0x3333333333333333 + x&0x3333333333333333\n\t\tx += x >> 4\n\t\tx &= 0x0f0f0f0f0f0f0f0f\n\t\tx *= 0x0101010101010101\n\t\tcnt += x >> 56\n\t}\n\treturn cnt\n}", "func popcountSetAnd(a, b *blockAry) uint64 {\n\tc := uint64(0)\n\tfor key, aValue := range *a {\n\t\tif bValue, found := (*b)[key]; found {\n\t\t\tc += popcount(aValue & bValue)\n\t\t} else {\n\t\t\tc += popcount(aValue)\n\t\t}\n\t}\n\treturn c\n}", "func (b *BitSet) Count() int {\n\tn := 0\n\tl := b.LowBit()\n\th := b.HighBit()\n\tfor i := l; i <= h; i++ { // for all values up to highest\n\t\tif b.Test(i) { // if this value is included\n\t\t\tn++ // count it\n\t\t}\n\t}\n\treturn n\n}", "func popcountSetAndNot(a, b *blockAry) uint64 {\n\tc := uint64(0)\n\n\tfor key, aValue := range *a {\n\t\tif bValue, found := (*b)[key]; found {\n\t\t\tc += popcount(aValue &^ bValue)\n\t\t} else {\n\t\t\tc += popcount(aValue)\n\t\t}\n\t}\n\treturn c\n}", "func (s *Stack) Count() uint {\n\treturn s.count\n}", "func (cp *Pool) GetCount() int64 {\n\tp := cp.pool()\n\tif p == nil {\n\t\treturn 0\n\t}\n\treturn p.GetCount()\n}", "func (s IntSet) Count() int {\n\treturn len(s)\n}", "func (cp *Pool) GetSettingCount() int64 {\n\tp := cp.pool()\n\tif p == nil {\n\t\treturn 0\n\t}\n\treturn p.GetSettingCount()\n}", "func (k Keeper) GetPoolCount(ctx sdk.Context) uint64 {\n\tstore := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.PoolCountKey))\n\tbyteKey := types.KeyPrefix(types.PoolCountKey)\n\tbz := store.Get(byteKey)\n\n\t// Count doesn't exist: no element\n\tif bz == nil {\n\t\treturn 0\n\t}\n\n\t// Parse bytes\n\tcount, err := strconv.ParseUint(string(bz), 10, 64)\n\tif err != nil {\n\t\t// Panic because the count should be always formattable to iint64\n\t\tpanic(\"cannot decode count\")\n\t}\n\n\treturn count\n}", "func (r *SlidingWindow) Count() int {return r.count}", "func BitLen(x int64) (n int)", "func bitCount(b byte) uint8 {\n\treturn counts[b]\n}", "func (upq *UnsavedPostQuery) CountX(ctx context.Context) int {\n\tcount, err := upq.Count(ctx)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn count\n}", "func (sq *ShopQuery) CountX(ctx context.Context) int {\n\tcount, err := sq.Count(ctx)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn count\n}", "func (hq *HarborQuery) CountX(ctx context.Context) int {\n\tcount, err := hq.Count(ctx)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn count\n}", "func (b Bits) Count() int {\n\treturn bits.OnesCount64(uint64(b))\n}", "func PRepCount() float64 {\n\tpReps.Mutex.Lock()\n\tcount := len(pReps.PReps)\n\tpReps.Mutex.Unlock()\n\treturn float64(count)\n}", "func (bm BitMap) BitCount(ctx context.Context, start, end int64) (int64, error) {\n\treq := newRequest(\"*4\\r\\n$8\\r\\nBITCOUNT\\r\\n$\")\n\treq.addStringInt2(bm.name, start, end)\n\treturn bm.c.cmdInt(ctx, req)\n}", "func (stack *Stack) Pop() int {\n\tif stack.length == 0 {\n\t\treturn 0\n\t}\n\ttop := stack.top\n\tstack.top = top.prev\n\tstack.length--\n\treturn top.value\n}", "func (s PgPromotionStore) Count() int {\n\tvar n int\n\ts.db.Get(&n, \"SELECT COUNT(*) FROM public.promotion\")\n\treturn n\n}", "func (xmlmc *XmlmcInstStruct) GetCount() uint64 {\n\n\treturn xmlmc.count\n}", "func (pq *PrizeQuery) CountX(ctx context.Context) int {\n\tcount, err := pq.Count(ctx)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn count\n}", "func (cq *CartQuery) CountX(ctx context.Context) int {\n\tcount, err := cq.Count(ctx)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn count\n}", "func (nimq *NetInterfaceModeQuery) CountX(ctx context.Context) int {\n\tcount, err := nimq.Count(ctx)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn count\n}", "func (s *Stack) Pop() int {\n\tif s.length == 0 {\n\t\treturn MIN\n\t}\n\n\tn := s.top\n\ts.top = n.prev\n\ts.length--\n\treturn n.value\n}", "func (wq *WorkflowQuery) CountX(ctx context.Context) int {\n\tcount, err := wq.Count(ctx)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn count\n}", "func (g *GaugeInt64) Count() int64 {\n\treturn g.Value()\n}", "func Zcount(key string, min, max float64) (int, error) {\n\tzset, err := zsetOf(key)\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\tif zset == nil {\n\t\treturn 0, nil\n\t}\n\treturn zset.count(min, max), nil\n}", "func countSetBits(number int) int {\n\n\tcount := 0\n\ttemp_number := number\n\n\tfor temp_number > 0 {\n\n\t\tcount += temp_number & 1\n\t\ttemp_number = temp_number >> 1\n\t}\n\treturn count\n}", "func (s stack) count() int {\n\treturn len(s.items)\n}", "func ExampleIntSet_Pop() {\n\ts1 := gset.NewIntSet()\n\ts1.Add([]int{1, 2, 3, 4}...)\n\n\tfmt.Println(s1.Pop())\n\n\t// May Output:\n\t// 1\n}", "func (rq *ReceiptQuery) CountX(ctx context.Context) int {\n\tcount, err := rq.Count(ctx)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn count\n}", "func (bits *BitArray) Count() int {\n\tlength := 0\n\n\tfor i := 0; i < bits.lenpad; i += _BytesPW {\n\t\tw := bytes2word(bits.bytes[i : i+_BytesPW])\n\t\tlength += countbits64(w)\n\t}\n\n\treturn length\n}", "func (b Bitboard) Count() uint8 {\n\treturn countTable[b&0xff] +\n\t\tcountTable[(b>>8)&0xff] +\n\t\tcountTable[(b>>16)&0xff] +\n\t\tcountTable[(b>>24)&0xff] +\n\t\tcountTable[(b>>32)&0xff] +\n\t\tcountTable[(b>>40)&0xff] +\n\t\tcountTable[(b>>48)&0xff] +\n\t\tcountTable[b>>56]\n}", "func (s *StickerSet) GetCount() (value int) {\n\tif s == nil {\n\t\treturn\n\t}\n\treturn s.Count\n}", "func (m *Max) Count() int {\n\treturn m.count\n}", "func (omq *OutcomeMeasureQuery) CountX(ctx context.Context) int {\n\tcount, err := omq.Count(ctx)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn count\n}", "func (wq *WifiQuery) CountX(ctx context.Context) int {\n\tcount, err := wq.Count(ctx)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn count\n}", "func (p *Population) Len() int {\n\treturn len(p.members)\n}", "func (pgq *PlayGroupQuery) CountX(ctx context.Context) int {\n\tcount, err := pgq.Count(ctx)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn count\n}", "func (c *Aggregator) Count() (uint64, error) {\n\treturn c.state.count, nil\n}", "func popcountSetOr(a, b *blockAry) uint64 {\n\tc := uint64(0)\n\n\tfor key, aValue := range *a {\n\t\tif bValue, found := (*b)[key]; found {\n\t\t\tc += popcount(aValue | bValue)\n\t\t} else {\n\t\t\tc += popcount(aValue)\n\t\t}\n\t}\n\tfor key, bValue := range *b {\n\t\tif _, found := (*a)[key]; !found {\n\t\t\tc += popcount(bValue)\n\t\t}\n\t}\n\n\treturn c\n}", "func (gq *GoodsQuery) CountX(ctx context.Context) int {\n\tcount, err := gq.Count(ctx)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn count\n}", "func (bq *BrowserQuery) CountX(ctx context.Context) int {\n\tcount, err := bq.Count(ctx)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn count\n}", "func (biq *BankItemQuery) CountX(ctx context.Context) int {\n\tcount, err := biq.Count(ctx)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn count\n}", "func GetNumIPSets() (int, error) {\n\treturn getValue(numIPSets)\n}", "func (store *EntryStore) Count() int64 {\n\tprop := store.db.GetProperty(\"rocksdb.estimate-num-keys\")\n\tc, _ := strconv.ParseInt(prop, 10, 64)\n\treturn c\n}", "func (w *MetricWindow) Pop() bitflow.Value {\n\tif w.Empty() {\n\t\treturn 0\n\t}\n\tval := w.data[w.first]\n\tw.first = w.inc(w.first)\n\tw.full = false\n\treturn val\n}", "func (fq *ForumQuery) CountX(ctx context.Context) int {\n\tcount, err := fq.Count(ctx)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn count\n}", "func (mvq *ModuleVersionQuery) CountX(ctx context.Context) int {\n\tcount, err := mvq.Count(ctx)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn count\n}", "func (o VpcIpamScopeOutput) PoolCount() pulumi.IntOutput {\n\treturn o.ApplyT(func(v *VpcIpamScope) pulumi.IntOutput { return v.PoolCount }).(pulumi.IntOutput)\n}", "func (cq *ConfirmationQuery) CountX(ctx context.Context) int {\n\tcount, err := cq.Count(ctx)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn count\n}", "func (wtq *WorkerTypeQuery) CountX(ctx context.Context) int {\n\tcount, err := wtq.Count(ctx)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn count\n}", "func (q storestateQuery) CountG() (int64, error) {\n\treturn q.Count(boil.GetDB())\n}", "func (lbq *LatestBlockQuery) CountX(ctx context.Context) int {\n\tcount, err := lbq.Count(ctx)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn count\n}", "func TestCount128(t *testing.T) {\n\thm, _ := NewHashMap(128)\n\ttestCountN(testN, hm)\n}", "func (siq *SubItemQuery) CountX(ctx context.Context) int {\n\tcount, err := siq.Count(ctx)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn count\n}" ]
[ "0.85887784", "0.8507372", "0.84685475", "0.8449659", "0.843988", "0.843988", "0.8418389", "0.83115256", "0.8252126", "0.8202739", "0.81959707", "0.80563897", "0.80499727", "0.7954959", "0.7939985", "0.7730451", "0.77174306", "0.76712596", "0.7617381", "0.7527805", "0.7507031", "0.7387151", "0.73562926", "0.729315", "0.72672963", "0.7159438", "0.688786", "0.676796", "0.6762896", "0.6620859", "0.660507", "0.63074565", "0.62934476", "0.6026644", "0.6025796", "0.5993962", "0.5967957", "0.5919972", "0.5878526", "0.5869151", "0.58506626", "0.57950574", "0.5772024", "0.57451624", "0.57130605", "0.5672315", "0.56686556", "0.5660799", "0.5643674", "0.56411594", "0.56019425", "0.55580515", "0.55469304", "0.5536447", "0.55066806", "0.5495064", "0.5469873", "0.5466835", "0.5452025", "0.5438399", "0.54293907", "0.5408842", "0.5407806", "0.5399245", "0.5365541", "0.53626996", "0.53615355", "0.5345296", "0.533003", "0.5322889", "0.5322696", "0.53158146", "0.53151256", "0.5312724", "0.5301696", "0.5300842", "0.52984613", "0.52970845", "0.5293405", "0.52773565", "0.5254051", "0.5253253", "0.525163", "0.52475834", "0.52457696", "0.52429307", "0.5239382", "0.52306056", "0.5228447", "0.5216702", "0.5215743", "0.5215086", "0.5212891", "0.52117294", "0.5196723", "0.51960415", "0.5194476", "0.5188281", "0.5183872", "0.51823306", "0.5179782" ]
0.0
-1
check the first parameter, true if it wants only a Context check if the handler needs a Context , has the first parameter as type of Context it's usefuly in NewRoute inside route.go
func hasContextParam(handlerType reflect.Type) bool { //if the handler doesn't take arguments, false if handlerType.NumIn() == 0 { return false } //if the first argument is not a pointer, false p1 := handlerType.In(0) if p1.Kind() != reflect.Ptr { return false } //but if the first argument is a context, true if p1.Elem() == contextType { return true } return false }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func hasContextAndRenderer(handlerType reflect.Type) bool {\n\n\t//first check if we have pass 2 arguments\n\tif handlerType.NumIn() < 2 {\n\t\treturn false\n\t}\n\n\tfirstParamIsContext := hasContextParam(handlerType)\n\n\t//the first argument/parameter is always context if exists otherwise it's only Renderer or ResponseWriter,Request.\n\tif firstParamIsContext == false {\n\t\treturn false\n\t}\n\n\tp2 := handlerType.In(1)\n\tif p2.Kind() != reflect.Ptr {\n\t\treturn false\n\t}\n\t//but if the first argument is a context, true\n\tif p2.Elem() == rendererType {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (r *Request) Context() context.Context", "func mainHandleFunc(w http.ResponseWriter, r *http.Request) {\n\t// safe type conversion\n\t// value := r.Context().Value(\"user\")\n\t// user, check := value.(*m.User)\n\t// if !check {\n\t// \tlog.Println(\"User not Found in context\")\n\t// \thttp.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)\n\t// \treturn\n\t// }\n\treturn\n}", "func (o *TelemetryDruidScanRequestAllOf) HasContext() bool {\n\tif o != nil && o.Context != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (m *Router) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}", "func ContextHandler(rootCtx context.Context) martini.Handler {\n\treturn func(res http.ResponseWriter, req *http.Request, c martini.Context) {\n\t\tc.Map(withReq(rootCtx, req))\n\t}\n}", "func ContextService(ctx context.Context) (s *Service) {\n\tif d, ok := ctx.Value(contextHandlerDetailsKey).(*handlerDetails); ok {\n\t\ts = d.s\n\t}\n\treturn\n}", "func TestContextIsAccessibleWithGo17Context(t *testing.T) {\n\tsuccHand := func(w http.ResponseWriter, r *http.Request) {\n\t\tr = r.WithContext(context.WithValue(r.Context(), \"dummykey\", \"dummyval\"))\n\t\ttoken := Token(r)\n\t\tif token == \"\" {\n\t\t\tt.Errorf(\"Token is inaccessible in the success handler\")\n\t\t}\n\t}\n\n\thand := New()\n\tchain := alice.New(hand.Handler).Then(http.HandlerFunc(succHand))\n\n\t// we need a request that passes. Let's just use a safe method for that.\n\treq := dummyGet()\n\twriter := httptest.NewRecorder()\n\n\tchain.ServeHTTP(writer, req)\n}", "func (o *ShortenerAPI) Context() *middleware.Context {\n\tif o.context == nil {\n\t\to.context = middleware.NewRoutableContext(o.spec, o, nil)\n\t}\n\n\treturn o.context\n}", "func TestAllowContext(t *testing.T) {\n\ttt := lt.New(t)\n\tappCtx := tt.NewAppCtx(\"test-http\")\n\tappCtx.Config().Request.AllowContext = true\n\n\t// Build handler\n\th := http.NewServer()\n\tvar gotContext journey.Ctx\n\th.HandleFunc(\"/test\", http.GET, func(\n\t\tctx journey.Ctx, w http.ResponseWriter, r *http.Request,\n\t) {\n\t\tctx.Trace(\"http.test\", \"Test endpoint called\")\n\t\tgotContext = ctx\n\t\tw.Head(http.StatusOK)\n\t})\n\n\taddr := startServer(appCtx, h)\n\n\t// Prepare context\n\tctx := journey.New(appCtx)\n\tctx.Trace(\"prepare\", \"Prepare context\")\n\tctx.Store(\"lang\", \"en_GB\")\n\tctx.Store(\"ip\", \"10.0.0.21\")\n\tctx.Store(\"flag\", 3)\n\n\t// Send request\n\tclient := http.Client{}\n\tres, err := client.Get(ctx, fmt.Sprintf(\"http://%s/test\", addr))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif http.StatusOK != res.StatusCode {\n\t\tt.Errorf(\"expect to get status %d, but got %d\", http.StatusOK, res.StatusCode)\n\t}\n\n\t// Compare\n\tif ctx.UUID() == gotContext.UUID() {\n\t\tt.Error(\"expect contexts to be different\")\n\t}\n\tctx.RangeValues(func(key, expect interface{}) bool {\n\t\tv := gotContext.Load(key)\n\t\tif v != nil {\n\t\t\tt.Errorf(\"expect key %s to NOT be present\", key)\n\t\t}\n\t\treturn false\n\t})\n}", "func hasRendererParam(handlerType reflect.Type) bool {\n\t//if the handler doesn't take arguments, false\n\tif handlerType.NumIn() == 0 {\n\t\treturn false\n\t}\n\n\t//if the first argument is not a pointer, false\n\tp1 := handlerType.In(0)\n\tif p1.Kind() != reflect.Ptr {\n\t\treturn false\n\t}\n\t//but if the first argument is a renderer, true\n\tif p1.Elem() == rendererType {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (ri *RestInvoker) ContextDo(ctx context.Context, req *rest.Request, options ...InvocationOption) (*rest.Response, error) {\n\topts := getOpts(string(req.GetRequest().Host()), options...)\n\topts.Protocol = common.ProtocolRest\n\tif len(opts.Filters) == 0 {\n\t\topts.Filters = ri.opts.Filters\n\t}\n\tif string(req.GetRequest().URI().Scheme()) != \"cse\" {\n\t\treturn nil, fmt.Errorf(\"Scheme invalid: %s, only support cse://\", req.GetRequest().URI().Scheme())\n\t}\n\tif req.GetHeader(\"Content-Type\") == \"\" {\n\t\treq.SetHeader(\"Content-Type\", \"application/json\")\n\t}\n\tnewReq := req.Copy()\n\tdefer newReq.Close()\n\tresp := rest.NewResponse()\n\tnewReq.SetHeader(common.HeaderSourceName, config.SelfServiceName)\n\tinv := invocation.CreateInvocation()\n\twrapInvocationWithOpts(inv, opts)\n\tinv.AppID = config.GlobalDefinition.AppID\n\tinv.MicroServiceName = string(req.GetRequest().Host())\n\tinv.Args = newReq\n\tinv.Reply = resp\n\tinv.Ctx = ctx\n\tinv.URLPathFormat = req.Req.URI().String()\n\tinv.MethodType = req.GetMethod()\n\tc, err := handler.GetChain(common.Consumer, ri.opts.ChainName)\n\tif err != nil {\n\t\tlager.Logger.Errorf(err, \"Handler chain init err.\")\n\t\treturn nil, err\n\t}\n\tc.Next(inv, func(ir *invocation.InvocationResponse) error {\n\t\terr = ir.Err\n\t\treturn err\n\t})\n\treturn resp, err\n}", "func (route *Route) DoesMatchContext(c *Context) bool {\n\n\t// by default, we match\n\tvar match bool = true\n\tif len(route.MatcherFuncs) > 0 {\n\n\t\t// there are some matcher functions, so don't automatically\n\t\t// match by default - let the matchers decide\n\t\tmatch = false\n\n\t\t// loop through the matcher functions\n\t\tfor _, f := range route.MatcherFuncs {\n\t\t\t// modify 'match' based on the result of the matcher function\n\t\t\tswitch f(c) {\n\t\t\tcase NoMatch:\n\t\t\t\tmatch = false\n\t\t\tcase Match:\n\t\t\t\tmatch = true\n\t\t\t}\n\n\t\t}\n\n\t}\n\n\t// return the result\n\treturn match\n\n}", "func (m HTTPMethod) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}", "func CheckHeaderValueAndForwardWithRequestContext(h http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {\n\t\taccessToken := req.Header.Get(common.FlorenceHeaderKey)\n\t\tif accessToken != \"\" {\n\t\t\treq = addUserAccessTokenToRequestContext(accessToken, req)\n\t\t}\n\n\t\th.ServeHTTP(w, req)\n\t})\n}", "func HandleContext(ctx context.Context, handler Handler) {\n\tformat, _ := os.LookupEnv(\"FN_FORMAT\")\n\tif format != \"\" && format != \"http-stream\" {\n\t\tlog.Fatal(\"only http-stream format is supported, please set function.format=http-stream against your fn service\")\n\t}\n\tpath := os.Getenv(\"FN_LISTENER\")\n\tstartHTTPServer(ctx, handler, path)\n}", "func (o *WeaviateAPI) Context() *middleware.Context {\n\tif o.context == nil {\n\t\to.context = middleware.NewRoutableContext(o.spec, o, nil)\n\t}\n\n\treturn o.context\n}", "func ReqFromContext(ctx context.Context) (*http.Request, bool) {\n\tr, ok := ctx.Value(requestKey).(*http.Request)\n\treturn r, ok\n}", "func (r *subprocess) isContext(n ast.Node, ctx *gosec.Context) bool {\n\tselector, indent, err := gosec.GetCallInfo(n, ctx)\n\tif err != nil {\n\t\treturn false\n\t}\n\tif selector == \"exec\" && indent == \"CommandContext\" {\n\t\treturn true\n\t}\n\treturn false\n}", "func (_obj *DataService) HasPhoneOneWayWithContext(tarsCtx context.Context, phone string, phoneExist *bool, _opt ...map[string]string) (ret int32, err error) {\n\n\tvar length int32\n\tvar have bool\n\tvar ty byte\n\t_os := codec.NewBuffer()\n\terr = _os.Write_string(phone, 1)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\terr = _os.Write_bool((*phoneExist), 2)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tvar _status map[string]string\n\tvar _context map[string]string\n\tif len(_opt) == 1 {\n\t\t_context = _opt[0]\n\t} else if len(_opt) == 2 {\n\t\t_context = _opt[0]\n\t\t_status = _opt[1]\n\t}\n\t_resp := new(requestf.ResponsePacket)\n\n\terr = _obj.s.Tars_invoke(tarsCtx, 1, \"hasPhone\", _os.ToBytes(), _status, _context, _resp)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tif len(_opt) == 1 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t} else if len(_opt) == 2 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t\tfor k := range _status {\n\t\t\tdelete(_status, k)\n\t\t}\n\t\tfor k, v := range _resp.Status {\n\t\t\t_status[k] = v\n\t\t}\n\n\t}\n\t_ = length\n\t_ = have\n\t_ = ty\n\treturn ret, nil\n}", "func (_obj *DataService) HasUserOneWayWithContext(tarsCtx context.Context, wx_id string, userExist *bool, _opt ...map[string]string) (ret int32, err error) {\n\n\tvar length int32\n\tvar have bool\n\tvar ty byte\n\t_os := codec.NewBuffer()\n\terr = _os.Write_string(wx_id, 1)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\terr = _os.Write_bool((*userExist), 2)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tvar _status map[string]string\n\tvar _context map[string]string\n\tif len(_opt) == 1 {\n\t\t_context = _opt[0]\n\t} else if len(_opt) == 2 {\n\t\t_context = _opt[0]\n\t\t_status = _opt[1]\n\t}\n\t_resp := new(requestf.ResponsePacket)\n\n\terr = _obj.s.Tars_invoke(tarsCtx, 1, \"hasUser\", _os.ToBytes(), _status, _context, _resp)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tif len(_opt) == 1 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t} else if len(_opt) == 2 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t\tfor k := range _status {\n\t\t\tdelete(_status, k)\n\t\t}\n\t\tfor k, v := range _resp.Status {\n\t\t\t_status[k] = v\n\t\t}\n\n\t}\n\t_ = length\n\t_ = have\n\t_ = ty\n\treturn ret, nil\n}", "func (h *Handler) Context() *snow.Context { return h.engine.Context() }", "func IsUserRequestCtx(ctx context.Context) bool {\n\treturn ctxutils.IsAPIGwCtx(ctx)\n}", "func Context(h http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t// user service\n\t\tu := user.New(cognitoID, cognitoSecret)\n\t\tcontext.Set(r, \"userService\", u)\n\n\t\t// session helper\n\t\ts := session.New()\n\t\tcontext.Set(r, \"session\", s)\n\n\t\tvar netTransport = &http.Transport{\n\t\t\tDial: (&net.Dialer{\n\t\t\t\tTimeout: 5 * time.Second,\n\t\t\t}).Dial,\n\t\t\tTLSHandshakeTimeout: 5 * time.Second,\n\t\t}\n\n\t\t// support timeout and net transport.\n\t\tc := &http.Client{\n\t\t\tTimeout: time.Second * 10,\n\t\t\tTransport: netTransport,\n\t\t}\n\n\t\t// http client\n\t\tcontext.Set(r, \"client\", c)\n\n\t\tp := post.New(dynamoTablePosts, dynamoEndpoint, nil)\n\t\tcontext.Set(r, \"postService\", p)\n\n\t\tl := like.New(dynamoTableLikes, dynamoEndpoint, nil)\n\t\tcontext.Set(r, \"likeService\", l)\n\n\t\th.ServeHTTP(w, r)\n\t})\n}", "func ContextRequest(ctx context.Context) (request *http.Request) {\n\tif d, ok := ctx.Value(contextHandlerDetailsKey).(*handlerDetails); ok {\n\t\trequest = d.request\n\t}\n\treturn\n}", "func (o *GetHelloOKBody) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}", "func (_obj *Apipayments) Payments_validateRequestedInfoOneWayWithContext(tarsCtx context.Context, params *TLpayments_validateRequestedInfo, _opt ...map[string]string) (ret Payments_ValidatedRequestedInfo, err error) {\n\n\tvar length int32\n\tvar have bool\n\tvar ty byte\n\t_os := codec.NewBuffer()\n\terr = params.WriteBlock(_os, 1)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tvar _status map[string]string\n\tvar _context map[string]string\n\tif len(_opt) == 1 {\n\t\t_context = _opt[0]\n\t} else if len(_opt) == 2 {\n\t\t_context = _opt[0]\n\t\t_status = _opt[1]\n\t}\n\t_resp := new(requestf.ResponsePacket)\n\n\terr = _obj.s.Tars_invoke(tarsCtx, 1, \"payments_validateRequestedInfo\", _os.ToBytes(), _status, _context, _resp)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tif len(_opt) == 1 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t} else if len(_opt) == 2 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t\tfor k := range _status {\n\t\t\tdelete(_status, k)\n\t\t}\n\t\tfor k, v := range _resp.Status {\n\t\t\t_status[k] = v\n\t\t}\n\n\t}\n\t_ = length\n\t_ = have\n\t_ = ty\n\treturn ret, nil\n}", "func (_obj *WebApiAuth) LoginLog_GetOneWayWithContext(tarsCtx context.Context, req *LoginLog, res *LoginLog, _opt ...map[string]string) (err error) {\n\n\tvar length int32\n\tvar have bool\n\tvar ty byte\n\t_os := codec.NewBuffer()\n\terr = req.WriteBlock(_os, 1)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = (*res).WriteBlock(_os, 2)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar _status map[string]string\n\tvar _context map[string]string\n\tif len(_opt) == 1 {\n\t\t_context = _opt[0]\n\t} else if len(_opt) == 2 {\n\t\t_context = _opt[0]\n\t\t_status = _opt[1]\n\t}\n\t_resp := new(requestf.ResponsePacket)\n\n\terr = _obj.s.Tars_invoke(tarsCtx, 1, \"LoginLog_Get\", _os.ToBytes(), _status, _context, _resp)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(_opt) == 1 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t} else if len(_opt) == 2 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t\tfor k := range _status {\n\t\t\tdelete(_status, k)\n\t\t}\n\t\tfor k, v := range _resp.Status {\n\t\t\t_status[k] = v\n\t\t}\n\n\t}\n\t_ = length\n\t_ = have\n\t_ = ty\n\treturn nil\n}", "func (_obj *WebApiAuth) SysUser_GetOneWayWithContext(tarsCtx context.Context, req *SysUser, res *SysUser, _opt ...map[string]string) (err error) {\n\n\tvar length int32\n\tvar have bool\n\tvar ty byte\n\t_os := codec.NewBuffer()\n\terr = req.WriteBlock(_os, 1)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = (*res).WriteBlock(_os, 2)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar _status map[string]string\n\tvar _context map[string]string\n\tif len(_opt) == 1 {\n\t\t_context = _opt[0]\n\t} else if len(_opt) == 2 {\n\t\t_context = _opt[0]\n\t\t_status = _opt[1]\n\t}\n\t_resp := new(requestf.ResponsePacket)\n\n\terr = _obj.s.Tars_invoke(tarsCtx, 1, \"SysUser_Get\", _os.ToBytes(), _status, _context, _resp)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(_opt) == 1 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t} else if len(_opt) == 2 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t\tfor k := range _status {\n\t\t\tdelete(_status, k)\n\t\t}\n\t\tfor k, v := range _resp.Status {\n\t\t\t_status[k] = v\n\t\t}\n\n\t}\n\t_ = length\n\t_ = have\n\t_ = ty\n\treturn nil\n}", "func ContextDetail(ctx context.Context, key interface{}) (value interface{}) {\n\tif d, ok := ctx.Value(contextHandlerDetailsKey).(*handlerDetails); ok && d.details != nil {\n\t\tvalue = d.details[key]\n\t}\n\treturn\n}", "func (_obj *WebApiAuth) SysConfig_GetOneWayWithContext(tarsCtx context.Context, req *SysConfig, res *SysConfig, _opt ...map[string]string) (err error) {\n\n\tvar length int32\n\tvar have bool\n\tvar ty byte\n\t_os := codec.NewBuffer()\n\terr = req.WriteBlock(_os, 1)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = (*res).WriteBlock(_os, 2)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar _status map[string]string\n\tvar _context map[string]string\n\tif len(_opt) == 1 {\n\t\t_context = _opt[0]\n\t} else if len(_opt) == 2 {\n\t\t_context = _opt[0]\n\t\t_status = _opt[1]\n\t}\n\t_resp := new(requestf.ResponsePacket)\n\n\terr = _obj.s.Tars_invoke(tarsCtx, 1, \"SysConfig_Get\", _os.ToBytes(), _status, _context, _resp)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(_opt) == 1 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t} else if len(_opt) == 2 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t\tfor k := range _status {\n\t\t\tdelete(_status, k)\n\t\t}\n\t\tfor k, v := range _resp.Status {\n\t\t\t_status[k] = v\n\t\t}\n\n\t}\n\t_ = length\n\t_ = have\n\t_ = ty\n\treturn nil\n}", "func ContextHandler(ctx context.Context, handler func(context.Context, Request)) Handler {\n\treturn HandlerFunc(func(req Request) {\n\t\thandler(ctx, req)\n\t})\n}", "func FromIncomingContext(ctx context.Context) (Context, bool) {\n\tmd, ok := metadata.FromIncomingContext(ctx)\n\tif !ok {\n\t\treturn Context{}, false\n\t}\n\tvalues := md.Get(ContextHeader)\n\tif len(values) != 1 {\n\t\treturn Context{}, false\n\t}\n\tvar result Context\n\tif err := result.UnmarshalString(values[0]); err != nil {\n\t\treturn Context{}, false\n\t}\n\treturn result, true\n}", "func ContextResponseWriter(ctx context.Context) (rw ResponseWriter) {\n\tif d, ok := ctx.Value(contextHandlerDetailsKey).(*handlerDetails); ok {\n\t\trw, _ = d.rw.(ResponseWriter)\n\t}\n\treturn\n}", "func HasContext(status *statuspb.Status) bool {\n\tcontext := status.GetContext()\n\treturn context != nil\n}", "func (m *EnableTotpAuth) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}", "func (m *CloudInitNetWorkRoute) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}", "func (m DNSServers1) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}", "func GetContext(handler http.Handler) http.HandlerFunc {\n\t// Set the context here\n\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\ttoken, err := validateToken(r.Header.Get(\"Authorization\"))\n\t\tif err != nil {\n\t\t\tlog.Println(\"No valid token found!\")\n\t\t} else {\n\t\t\tctx.Set(r, \"user\", token.Claims[\"userid\"])\n\t\t\tctx.Set(r, \"role\", token.Claims[\"role\"])\n\t\t}\n\t\thandler.ServeHTTP(w, r)\n\t\t// Remove context contents\n\t\tctx.Clear(r)\n\t}\n}", "func (o *OAuth2ConsentRequest) HasContext() bool {\n\tif o != nil && o.Context != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (h *Handler) BindContext(ctx context.Context, server core.Server) {\n\tswitch s := server.(type) {\n\tcase *http.Server:\n\t\ts.Handler = h\n\t\ts.BaseContext = func(l net.Listener) context.Context {\n\t\t\treturn ctx\n\t\t}\n\tcase *fasthttp.Server:\n\t\ts.Handler = h.ServeFastHTTP\n\t}\n}", "func HandlerRoute1(ctx *ugo.RequestCtx) error {\n\treturn ctx.HTTPResponse(\"OK\", 200)\n}", "func BindContext(hndl http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {\n\t\tprint(\"Binding context\\n\")\n\t\tctx := OpenCtx(req)\n\t\tprint(\"BindContext: \", ctx, \"\\n\")\n\n\t\tdefer closeCtx(req)\n\t\thndl.ServeHTTP(w, req)\n\t})\n}", "func (ctx *Context) IsGet() bool {\r\n\treturn ctx.Is(\"GET\")\r\n}", "func (_obj *WebApiAuth) LoginLog_CreateOneWayWithContext(tarsCtx context.Context, req *LoginLog, res *LoginLog, _opt ...map[string]string) (err error) {\n\n\tvar length int32\n\tvar have bool\n\tvar ty byte\n\t_os := codec.NewBuffer()\n\terr = req.WriteBlock(_os, 1)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = (*res).WriteBlock(_os, 2)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar _status map[string]string\n\tvar _context map[string]string\n\tif len(_opt) == 1 {\n\t\t_context = _opt[0]\n\t} else if len(_opt) == 2 {\n\t\t_context = _opt[0]\n\t\t_status = _opt[1]\n\t}\n\t_resp := new(requestf.ResponsePacket)\n\n\terr = _obj.s.Tars_invoke(tarsCtx, 1, \"LoginLog_Create\", _os.ToBytes(), _status, _context, _resp)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(_opt) == 1 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t} else if len(_opt) == 2 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t\tfor k := range _status {\n\t\t\tdelete(_status, k)\n\t\t}\n\t\tfor k, v := range _resp.Status {\n\t\t\t_status[k] = v\n\t\t}\n\n\t}\n\t_ = length\n\t_ = have\n\t_ = ty\n\treturn nil\n}", "func (o *TelemetryDruidScanRequestAllOf) GetContextOk() (*TelemetryDruidQueryContext, bool) {\n\tif o == nil || o.Context == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Context, true\n}", "func FromContext(ctx context.Context) (Service, bool) {\n\tif ctx == nil {\n\t\treturn nil, false\n\t}\n\ts, ok := ctx.Value(serviceKey{}).(Service)\n\treturn s, ok\n}", "func (m *AuthRequest) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}", "func NeedAdminFromCtx(ctx context.Context) bool {\n\tif ctx == nil {\n\t\treturn false\n\t}\n\tval := ctx.Value(constants.ContextNeedAdmin)\n\tif needAdmin, ok := val.(bool); ok {\n\t\treturn needAdmin\n\t}\n\treturn false\n}", "func (ctx *Context) Context() context.Context {\n\tif ctx.HTTPReq != nil {\n\t\treturn ctx.HTTPReq.Context()\n\t} else if ctx.WSConn != nil {\n\t\treturn ctx.WSConn.Context()\n\t}\n\treturn context.Background()\n}", "func (r *router) handle(c *Context){\n\tn, params := r.getRoute(c.Method, c.Path)\n\tif n != nil {\n\t\tc.Params = params\n\t\t// connection between Context and Router!\n\t\t// it's important\n\t\tkey := c.Method + \"-\" + n.pattern\n\t\t// 两种函数都放到一起了\n\t\tc.handlers = append(c.handlers, r.handlers[key])\n\t\t//r.handlers[key](c)\n\t}else{\n\t\tc.handlers = append(c.handlers, func(c *Context){\n\t\t\tc.String(http.StatusNotFound, \"404 NOT FOUND%s\\n\", c.Path)\n\t\t})\n\t}\n\t//放在这里一起执行, 中间执行, 其逻辑导致\"并行\"效果\n\tc.Next()\n}", "func getContext() context.Context {\n\treturn context.TODO()\n}", "func (w *ServerInterfaceWrapper) Check(ctx echo.Context) error {\n\tvar err error\n\n\t// Parameter object where we will unmarshal all parameters from the context\n\tvar params CheckParams\n\t// ------------- Optional query parameter \"username\" -------------\n\tif paramValue := ctx.QueryParam(\"username\"); paramValue != \"\" {\n\n\t}\n\n\terr = runtime.BindQueryParameter(\"form\", true, false, \"username\", ctx.QueryParams(), &params.Username)\n\tif err != nil {\n\t\treturn echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf(\"Invalid format for parameter username: %s\", err))\n\t}\n\n\t// Invoke the callback with all the unmarshalled arguments\n\terr = w.Handler.Check(ctx, params)\n\treturn err\n}", "func (r *Route) prepare() {\n\tif r.middlewareHandlers != nil {\n\t\tr.hasMiddleware = true\n\t}\n\tconvertedMiddleware := MiddlewareHandlerFunc(func(ctx *Context, next Handler) {\n\t\tr.handler.Serve(ctx)\n\t\t//except itself\n\t\tif r.middlewareHandlers != nil && len(r.middlewareHandlers) > 1 {\n\t\t\tnext.Serve(ctx)\n\t\t}\n\t})\n\n\tr.Use(convertedMiddleware)\n\n}", "func (m *APIPatternV1) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}", "func (NilContext) GetContext() any {\n\treturn nil\n}", "func (attendant *Attendant) Context(key string) (interface{}, bool) {\n\tresult, ok := attendant.context[key]\n\treturn result, ok\n}", "func getUserFromContext(c *gin.Context) (*user_model.UserContextHeader, bool) {\n\t// get user from context\n\tif userContext, ok := c.Get(consts.USER_KEY_FOR_GIN_CONTEXT); ok {\n\t\tif userHeaderContext, ok := userContext.(user_model.UserContextHeader); ok {\n\t\t\treturn &userHeaderContext, true\n\t\t}\n\t}\n\treturn nil, false\n}", "func (_obj *DataService) GetUserInfoOneWayWithContext(tarsCtx context.Context, wx_id string, userInfo *UserInfo, _opt ...map[string]string) (ret int32, err error) {\n\n\tvar length int32\n\tvar have bool\n\tvar ty byte\n\t_os := codec.NewBuffer()\n\terr = _os.Write_string(wx_id, 1)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\terr = (*userInfo).WriteBlock(_os, 2)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tvar _status map[string]string\n\tvar _context map[string]string\n\tif len(_opt) == 1 {\n\t\t_context = _opt[0]\n\t} else if len(_opt) == 2 {\n\t\t_context = _opt[0]\n\t\t_status = _opt[1]\n\t}\n\t_resp := new(requestf.ResponsePacket)\n\n\terr = _obj.s.Tars_invoke(tarsCtx, 1, \"getUserInfo\", _os.ToBytes(), _status, _context, _resp)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tif len(_opt) == 1 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t} else if len(_opt) == 2 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t\tfor k := range _status {\n\t\t\tdelete(_status, k)\n\t\t}\n\t\tfor k, v := range _resp.Status {\n\t\t\t_status[k] = v\n\t\t}\n\n\t}\n\t_ = length\n\t_ = have\n\t_ = ty\n\treturn ret, nil\n}", "func (c *Context) ShouldBind(obj interface{}, binder binding.Binder) error {\n\treturn binder.Bind(c.Req, obj)\n}", "func validCtxFunc(fn interface{}, ctxType reflect.Type) error {\n\tif fn == nil {\n\t\treturn fmt.Errorf(\"rpc: middleware is nil\")\n\t}\n\n\tfnValue := reflect.ValueOf(fn)\n\n\tif fnValue.Type().Kind() != reflect.Func {\n\t\treturn fmt.Errorf(\"rpc: middleware is not func type\")\n\t}\n\n\tif fnValue.Type().NumIn() != 2 {\n\t\treturn fmt.Errorf(\"rpc: middleware ill-fromed\")\n\t}\n\n\tif fnValue.Type().NumOut() != 1 {\n\t\treturn fmt.Errorf(\"rpc: middleware ill-fromed\")\n\t}\n\n\tif inType := fnValue.Type().In(0); inType.Kind() != reflect.Ptr || inType.Elem() != reflect.TypeOf((*http.Request)(nil)).Elem() {\n\t\treturn fmt.Errorf(\"rpc: middleware ill-fromed\")\n\t}\n\n\tif inType := fnValue.Type().In(1); inType.Kind() != reflect.Ptr || inType.Elem() != ctxType {\n\t\treturn fmt.Errorf(\"rpc: middleware ill-fromed\")\n\t}\n\n\tif outType := fnValue.Type().Out(0); outType != reflect.TypeOf((*error)(nil)).Elem() {\n\t\treturn fmt.Errorf(\"rpc: middleware ill-fromed\")\n\t}\n\n\treturn nil\n}", "func Contexter() func(next http.Handler) http.Handler {\n\trnd := templates.HTMLRenderer()\n\tcsrfOpts := CsrfOptions{\n\t\tSecret: setting.SecretKey,\n\t\tCookie: setting.CSRFCookieName,\n\t\tSetCookie: true,\n\t\tSecure: setting.SessionConfig.Secure,\n\t\tCookieHTTPOnly: setting.CSRFCookieHTTPOnly,\n\t\tHeader: \"X-Csrf-Token\",\n\t\tCookieDomain: setting.SessionConfig.Domain,\n\t\tCookiePath: setting.SessionConfig.CookiePath,\n\t\tSameSite: setting.SessionConfig.SameSite,\n\t}\n\tif !setting.IsProd {\n\t\tCsrfTokenRegenerationInterval = 5 * time.Second // in dev, re-generate the tokens more aggressively for debug purpose\n\t}\n\treturn func(next http.Handler) http.Handler {\n\t\treturn http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) {\n\t\t\tctx := Context{\n\t\t\t\tResp: NewResponse(resp),\n\t\t\t\tCache: mc.GetCache(),\n\t\t\t\tLocale: middleware.Locale(resp, req),\n\t\t\t\tLink: setting.AppSubURL + strings.TrimSuffix(req.URL.EscapedPath(), \"/\"),\n\t\t\t\tRender: rnd,\n\t\t\t\tSession: session.GetSession(req),\n\t\t\t\tRepo: &Repository{\n\t\t\t\t\tPullRequest: &PullRequest{},\n\t\t\t\t},\n\t\t\t\tOrg: &Organization{},\n\t\t\t\tData: middleware.GetContextData(req.Context()),\n\t\t\t}\n\t\t\tdefer ctx.Close()\n\n\t\t\tctx.Data.MergeFrom(middleware.CommonTemplateContextData())\n\t\t\tctx.Data[\"Context\"] = &ctx\n\t\t\tctx.Data[\"CurrentURL\"] = setting.AppSubURL + req.URL.RequestURI()\n\t\t\tctx.Data[\"Link\"] = ctx.Link\n\t\t\tctx.Data[\"locale\"] = ctx.Locale\n\n\t\t\t// PageData is passed by reference, and it will be rendered to `window.config.pageData` in `head.tmpl` for JavaScript modules\n\t\t\tctx.PageData = map[string]any{}\n\t\t\tctx.Data[\"PageData\"] = ctx.PageData\n\n\t\t\tctx.Req = WithContext(req, &ctx)\n\t\t\tctx.Csrf = PrepareCSRFProtector(csrfOpts, &ctx)\n\n\t\t\t// Get the last flash message from cookie\n\t\t\tlastFlashCookie := middleware.GetSiteCookie(ctx.Req, CookieNameFlash)\n\t\t\tif vals, _ := url.ParseQuery(lastFlashCookie); len(vals) > 0 {\n\t\t\t\t// store last Flash message into the template data, to render it\n\t\t\t\tctx.Data[\"Flash\"] = &middleware.Flash{\n\t\t\t\t\tDataStore: &ctx,\n\t\t\t\t\tValues: vals,\n\t\t\t\t\tErrorMsg: vals.Get(\"error\"),\n\t\t\t\t\tSuccessMsg: vals.Get(\"success\"),\n\t\t\t\t\tInfoMsg: vals.Get(\"info\"),\n\t\t\t\t\tWarningMsg: vals.Get(\"warning\"),\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// prepare an empty Flash message for current request\n\t\t\tctx.Flash = &middleware.Flash{DataStore: &ctx, Values: url.Values{}}\n\t\t\tctx.Resp.Before(func(resp ResponseWriter) {\n\t\t\t\tif val := ctx.Flash.Encode(); val != \"\" {\n\t\t\t\t\tmiddleware.SetSiteCookie(ctx.Resp, CookieNameFlash, val, 0)\n\t\t\t\t} else if lastFlashCookie != \"\" {\n\t\t\t\t\tmiddleware.SetSiteCookie(ctx.Resp, CookieNameFlash, \"\", -1)\n\t\t\t\t}\n\t\t\t})\n\n\t\t\t// If request sends files, parse them here otherwise the Query() can't be parsed and the CsrfToken will be invalid.\n\t\t\tif ctx.Req.Method == \"POST\" && strings.Contains(ctx.Req.Header.Get(\"Content-Type\"), \"multipart/form-data\") {\n\t\t\t\tif err := ctx.Req.ParseMultipartForm(setting.Attachment.MaxSize << 20); err != nil && !strings.Contains(err.Error(), \"EOF\") { // 32MB max size\n\t\t\t\t\tctx.ServerError(\"ParseMultipartForm\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\n\t\t\thttpcache.SetCacheControlInHeader(ctx.Resp.Header(), 0, \"no-transform\")\n\t\t\tctx.Resp.Header().Set(`X-Frame-Options`, setting.CORSConfig.XFrameOptions)\n\n\t\t\tctx.Data[\"CsrfToken\"] = ctx.Csrf.GetToken()\n\t\t\tctx.Data[\"CsrfTokenHtml\"] = template.HTML(`<input type=\"hidden\" name=\"_csrf\" value=\"` + ctx.Data[\"CsrfToken\"].(string) + `\">`)\n\n\t\t\t// FIXME: do we really always need these setting? There should be someway to have to avoid having to always set these\n\t\t\tctx.Data[\"DisableMigrations\"] = setting.Repository.DisableMigrations\n\t\t\tctx.Data[\"DisableStars\"] = setting.Repository.DisableStars\n\t\t\tctx.Data[\"EnableActions\"] = setting.Actions.Enabled\n\n\t\t\tctx.Data[\"ManifestData\"] = setting.ManifestData\n\n\t\t\tctx.Data[\"UnitWikiGlobalDisabled\"] = unit.TypeWiki.UnitGlobalDisabled()\n\t\t\tctx.Data[\"UnitIssuesGlobalDisabled\"] = unit.TypeIssues.UnitGlobalDisabled()\n\t\t\tctx.Data[\"UnitPullsGlobalDisabled\"] = unit.TypePullRequests.UnitGlobalDisabled()\n\t\t\tctx.Data[\"UnitProjectsGlobalDisabled\"] = unit.TypeProjects.UnitGlobalDisabled()\n\t\t\tctx.Data[\"UnitActionsGlobalDisabled\"] = unit.TypeActions.UnitGlobalDisabled()\n\n\t\t\tctx.Data[\"AllLangs\"] = translation.AllLangs()\n\n\t\t\tnext.ServeHTTP(ctx.Resp, ctx.Req)\n\t\t})\n\t}\n}", "func (o *CloudTidesAPI) Context() *middleware.Context {\n\tif o.context == nil {\n\t\to.context = middleware.NewRoutableContext(o.spec, o, nil)\n\t}\n\n\treturn o.context\n}", "func (_obj *WebApiAuth) SysConfig_GetPageOneWayWithContext(tarsCtx context.Context, pageSize int32, pageIndex int32, req *SysConfig, res *SysConfig_List, _opt ...map[string]string) (err error) {\n\n\tvar length int32\n\tvar have bool\n\tvar ty byte\n\t_os := codec.NewBuffer()\n\terr = _os.Write_int32(pageSize, 1)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = _os.Write_int32(pageIndex, 2)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = req.WriteBlock(_os, 3)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = (*res).WriteBlock(_os, 4)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar _status map[string]string\n\tvar _context map[string]string\n\tif len(_opt) == 1 {\n\t\t_context = _opt[0]\n\t} else if len(_opt) == 2 {\n\t\t_context = _opt[0]\n\t\t_status = _opt[1]\n\t}\n\t_resp := new(requestf.ResponsePacket)\n\n\terr = _obj.s.Tars_invoke(tarsCtx, 1, \"SysConfig_GetPage\", _os.ToBytes(), _status, _context, _resp)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(_opt) == 1 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t} else if len(_opt) == 2 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t\tfor k := range _status {\n\t\t\tdelete(_status, k)\n\t\t}\n\t\tfor k, v := range _resp.Status {\n\t\t\t_status[k] = v\n\t\t}\n\n\t}\n\t_ = length\n\t_ = have\n\t_ = ty\n\treturn nil\n}", "func (o *GetSearchbyIDOKBody) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}", "func (_obj *WebApiAuth) SysUser_GetPageOneWayWithContext(tarsCtx context.Context, pageSize int32, pageIndex int32, req *SysUser, res *SysUser_List, _opt ...map[string]string) (err error) {\n\n\tvar length int32\n\tvar have bool\n\tvar ty byte\n\t_os := codec.NewBuffer()\n\terr = _os.Write_int32(pageSize, 1)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = _os.Write_int32(pageIndex, 2)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = req.WriteBlock(_os, 3)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = (*res).WriteBlock(_os, 4)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar _status map[string]string\n\tvar _context map[string]string\n\tif len(_opt) == 1 {\n\t\t_context = _opt[0]\n\t} else if len(_opt) == 2 {\n\t\t_context = _opt[0]\n\t\t_status = _opt[1]\n\t}\n\t_resp := new(requestf.ResponsePacket)\n\n\terr = _obj.s.Tars_invoke(tarsCtx, 1, \"SysUser_GetPage\", _os.ToBytes(), _status, _context, _resp)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(_opt) == 1 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t} else if len(_opt) == 2 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t\tfor k := range _status {\n\t\t\tdelete(_status, k)\n\t\t}\n\t\tfor k, v := range _resp.Status {\n\t\t\t_status[k] = v\n\t\t}\n\n\t}\n\t_ = length\n\t_ = have\n\t_ = ty\n\treturn nil\n}", "func (o *DataPlaneAPI) Context() *middleware.Context {\n\tif o.context == nil {\n\t\to.context = middleware.NewRoutableContext(o.spec, o, nil)\n\t}\n\n\treturn o.context\n}", "func (ac *AuthContext) TestContext(route web.Controller, chain []web.ChainableContext) error {\n\t//requires AuthorizableController and SessionChain\n\thasSession := false\n\n\t_, ok := route.(AuthorizableController)\n\tif !ok {\n\t\treturn errors.New(fmt.Sprintf(\"The route :: %T :: does not support the AuthContext.\", route))\n\t}\n\n\tfor i := 0; i < len(chain); i++ {\n\t\t_, ok := chain[i].(SessionChainLink)\n\t\tif ok {\n\t\t\thasSession = true\n\t\t}\n\t}\n\n\tif hasSession && ok {\n\t\treturn nil\n\t} else {\n\t\treturn errors.New(fmt.Sprintf(\"The route :: %T :: does not have a SessionAware context in it's context chain.\", route))\n\t}\n}", "func (_obj *WebApiAuth) LoginLog_GetPageOneWayWithContext(tarsCtx context.Context, pageSize int32, pageIndex int32, req *LoginLog, res *LoginLog_List, _opt ...map[string]string) (err error) {\n\n\tvar length int32\n\tvar have bool\n\tvar ty byte\n\t_os := codec.NewBuffer()\n\terr = _os.Write_int32(pageSize, 1)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = _os.Write_int32(pageIndex, 2)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = req.WriteBlock(_os, 3)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = (*res).WriteBlock(_os, 4)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar _status map[string]string\n\tvar _context map[string]string\n\tif len(_opt) == 1 {\n\t\t_context = _opt[0]\n\t} else if len(_opt) == 2 {\n\t\t_context = _opt[0]\n\t\t_status = _opt[1]\n\t}\n\t_resp := new(requestf.ResponsePacket)\n\n\terr = _obj.s.Tars_invoke(tarsCtx, 1, \"LoginLog_GetPage\", _os.ToBytes(), _status, _context, _resp)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(_opt) == 1 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t} else if len(_opt) == 2 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t\tfor k := range _status {\n\t\t\tdelete(_status, k)\n\t\t}\n\t\tfor k, v := range _resp.Status {\n\t\t\t_status[k] = v\n\t\t}\n\n\t}\n\t_ = length\n\t_ = have\n\t_ = ty\n\treturn nil\n}", "func contextFilter(host service.Host) FilterFunc {\n\treturn func(ctx context.Context, w http.ResponseWriter, r *http.Request, next Handler) {\n\t\tfxctx := fxcontext.New(ctx, host)\n\t\tnext.ServeHTTP(fxctx, w, r)\n\t}\n}", "func (object Object) Context(value interface{}) Object {\n\t// TODO: incomplete\n\treturn object\n}", "func initializeContext(h http.Handler) http.Handler {\n\twrapper := func(w http.ResponseWriter, r *http.Request) {\n\t\tctx := context.WithValue(r.Context(), \"ID\", uuid.NewV4().String())\n\t\th.ServeHTTP(w, r.WithContext(ctx))\n\t}\n\treturn http.HandlerFunc(wrapper)\n}", "func (e *engine) prepareContext(w http.ResponseWriter, r *http.Request) *Context {\n\tctx := acquireContext()\n\tctx.Req = ahttp.AcquireRequest(r)\n\tctx.Res = ahttp.AcquireResponseWriter(w)\n\tctx.reply = acquireReply()\n\tctx.subject = security.AcquireSubject()\n\treturn ctx\n}", "func RequestFromContext(ctx context.Context) (*http.Request, bool) {\n\tr, ok := ctx.Value(requestKey).(*http.Request)\n\treturn r, ok\n}", "func NewHandlerContext(inputStore *sql.DB) *HandlerContext {\n\thandlerContext := &HandlerContext{}\n\thandlerContext.Store = NewSQLStore(inputStore)\n\treturn handlerContext\n}", "func bindContext(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {\n\tparsed, err := abi.JSON(strings.NewReader(ContextABI))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil\n}", "func bindContext(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {\n\tparsed, err := abi.JSON(strings.NewReader(ContextABI))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil\n}", "func bindContext(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {\n\tparsed, err := abi.JSON(strings.NewReader(ContextABI))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil\n}", "func bindContext(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {\n\tparsed, err := abi.JSON(strings.NewReader(ContextABI))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil\n}", "func FromContext(ctx context.Context) (*AuthInfo, bool) {\n\tpr, ok := peer.FromContext(ctx)\n\tif !ok {\n\t\treturn nil, false\n\t}\n\tinfo, ok := pr.AuthInfo.(AuthInfo)\n\tif !ok {\n\t\treturn nil, false\n\t}\n\treturn &info, true\n}", "func (ri *RestInvoker) ContextDo(ctx context.Context, req *rest.Request, options ...InvocationOption) (*rest.Response, error) {\n\tif string(req.GetRequest().URL.Scheme) != \"cse\" {\n\t\treturn nil, fmt.Errorf(\"scheme invalid: %s, only support cse://\", req.GetRequest().URL.Scheme)\n\t}\n\n\topts := getOpts(req.GetRequest().Host, options...)\n\topts.Protocol = common.ProtocolRest\n\n\tresp := rest.NewResponse()\n\n\tinv := invocation.New(ctx)\n\twrapInvocationWithOpts(inv, opts)\n\tinv.MicroServiceName = req.GetRequest().Host\n\t// TODO load from openAPI schema\n\t// inv.SchemaID = schemaID\n\t// inv.OperationID = operationID\n\tinv.Args = req\n\tinv.Reply = resp\n\tinv.URLPathFormat = req.Req.URL.Path\n\n\tinv.SetMetadata(common.RestMethod, req.GetMethod())\n\n\terr := ri.invoke(inv)\n\treturn resp, err\n}", "func (o *Comment) HasContext() bool {\n\tif o != nil && o.Context != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func shouldBind(c *gin.Context, obj interface{}) error {\n\tb := getBinding(c.ContentType())\n\terr := c.ShouldBindWith(obj, b)\n\tif errors.Is(err, io.EOF) {\n\t\treturn nil\n\t}\n\treturn err\n}", "func (API) Context(s *api.State, thread uint64) api.Context {\n\treturn nil\n}", "func ToContextHandler(f interface{}) ContextHandler {\n\tswitch t := f.(type) {\n\tcase func(Context, http.ResponseWriter, *http.Request, func()):\n\t\treturn ContextHandler(t)\n\tcase ContextHandler:\n\t\treturn t\n\tcase func(Context, http.ResponseWriter, *http.Request):\n\t\treturn func(c Context, w http.ResponseWriter, r *http.Request, q func()) {\n\t\t\tt(c, w, r)\n\t\t}\n\tcase func(http.ResponseWriter, *http.Request, func()):\n\t\treturn func(c Context, w http.ResponseWriter, r *http.Request, q func()) {\n\t\t\tt(w, r, q)\n\t\t}\n\tcase func(http.ResponseWriter, *http.Request):\n\t\treturn func(c Context, w http.ResponseWriter, r *http.Request, q func()) {\n\t\t\tt(w, r)\n\t\t}\n\tcase http.Handler:\n\t\treturn func(c Context, w http.ResponseWriter, r *http.Request, q func()) {\n\t\t\tt.ServeHTTP(w, r)\n\t\t}\n\tdefault:\n\t\tpanic(ErrUnsupportedHandler)\n\t}\n}", "func (_obj *DataService) HasUserWithContext(tarsCtx context.Context, wx_id string, userExist *bool, _opt ...map[string]string) (ret int32, err error) {\n\n\tvar length int32\n\tvar have bool\n\tvar ty byte\n\t_os := codec.NewBuffer()\n\terr = _os.Write_string(wx_id, 1)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\terr = _os.Write_bool((*userExist), 2)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tvar _status map[string]string\n\tvar _context map[string]string\n\tif len(_opt) == 1 {\n\t\t_context = _opt[0]\n\t} else if len(_opt) == 2 {\n\t\t_context = _opt[0]\n\t\t_status = _opt[1]\n\t}\n\t_resp := new(requestf.ResponsePacket)\n\n\terr = _obj.s.Tars_invoke(tarsCtx, 0, \"hasUser\", _os.ToBytes(), _status, _context, _resp)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\t_is := codec.NewReader(tools.Int8ToByte(_resp.SBuffer))\n\terr = _is.Read_int32(&ret, 0, true)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\terr = _is.Read_bool(&(*userExist), 2, true)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tif len(_opt) == 1 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t} else if len(_opt) == 2 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t\tfor k := range _status {\n\t\t\tdelete(_status, k)\n\t\t}\n\t\tfor k, v := range _resp.Status {\n\t\t\t_status[k] = v\n\t\t}\n\n\t}\n\t_ = length\n\t_ = have\n\t_ = ty\n\treturn ret, nil\n}", "func CheckAuth(c *gin.Context) {\n\n}", "func (_obj *DataService) GetActivityInfoOneWayWithContext(tarsCtx context.Context, activity_id string, activityInfo *ActivityInfo, _opt ...map[string]string) (ret int32, err error) {\n\n\tvar length int32\n\tvar have bool\n\tvar ty byte\n\t_os := codec.NewBuffer()\n\terr = _os.Write_string(activity_id, 1)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\terr = (*activityInfo).WriteBlock(_os, 2)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tvar _status map[string]string\n\tvar _context map[string]string\n\tif len(_opt) == 1 {\n\t\t_context = _opt[0]\n\t} else if len(_opt) == 2 {\n\t\t_context = _opt[0]\n\t\t_status = _opt[1]\n\t}\n\t_resp := new(requestf.ResponsePacket)\n\n\terr = _obj.s.Tars_invoke(tarsCtx, 1, \"getActivityInfo\", _os.ToBytes(), _status, _context, _resp)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tif len(_opt) == 1 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t} else if len(_opt) == 2 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t\tfor k := range _status {\n\t\t\tdelete(_status, k)\n\t\t}\n\t\tfor k, v := range _resp.Status {\n\t\t\t_status[k] = v\n\t\t}\n\n\t}\n\t_ = length\n\t_ = have\n\t_ = ty\n\treturn ret, nil\n}", "func (_obj *WebApiAuth) SysUser_InsertOneWayWithContext(tarsCtx context.Context, req *SysUser, id *int32, _opt ...map[string]string) (err error) {\n\n\tvar length int32\n\tvar have bool\n\tvar ty byte\n\t_os := codec.NewBuffer()\n\terr = req.WriteBlock(_os, 1)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = _os.Write_int32((*id), 2)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar _status map[string]string\n\tvar _context map[string]string\n\tif len(_opt) == 1 {\n\t\t_context = _opt[0]\n\t} else if len(_opt) == 2 {\n\t\t_context = _opt[0]\n\t\t_status = _opt[1]\n\t}\n\t_resp := new(requestf.ResponsePacket)\n\n\terr = _obj.s.Tars_invoke(tarsCtx, 1, \"SysUser_Insert\", _os.ToBytes(), _status, _context, _resp)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(_opt) == 1 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t} else if len(_opt) == 2 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t\tfor k := range _status {\n\t\t\tdelete(_status, k)\n\t\t}\n\t\tfor k, v := range _resp.Status {\n\t\t\t_status[k] = v\n\t\t}\n\n\t}\n\t_ = length\n\t_ = have\n\t_ = ty\n\treturn nil\n}", "func IsContextErr(err error) bool {\n\tif err == context.Canceled || err == context.DeadlineExceeded {\n\t\treturn true\n\t}\n\t// This happens e.g. on redirect errors, see https://golang.org/src/net/http/client_test.go#L329\n\tif ue, ok := err.(*url.Error); ok {\n\t\tif ue.Temporary() {\n\t\t\treturn true\n\t\t}\n\t\t// Use of an AWS Signing Transport can result in a wrapped url.Error\n\t\treturn IsContextErr(ue.Err)\n\t}\n\treturn false\n}", "func Test_Ctx_Accepts_EmptyAccept(t *testing.T) {\n\tt.Parallel()\n\tapp := New()\n\tctx := app.AcquireCtx(&fasthttp.RequestCtx{})\n\tdefer app.ReleaseCtx(ctx)\n\tutils.AssertEqual(t, \".forwarded\", ctx.Accepts(\".forwarded\"))\n}", "func (m Type) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}", "func (_obj *WebApiAuth) SysConfig_CreateOneWayWithContext(tarsCtx context.Context, req *SysConfig, res *SysConfig, _opt ...map[string]string) (err error) {\n\n\tvar length int32\n\tvar have bool\n\tvar ty byte\n\t_os := codec.NewBuffer()\n\terr = req.WriteBlock(_os, 1)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = (*res).WriteBlock(_os, 2)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar _status map[string]string\n\tvar _context map[string]string\n\tif len(_opt) == 1 {\n\t\t_context = _opt[0]\n\t} else if len(_opt) == 2 {\n\t\t_context = _opt[0]\n\t\t_status = _opt[1]\n\t}\n\t_resp := new(requestf.ResponsePacket)\n\n\terr = _obj.s.Tars_invoke(tarsCtx, 1, \"SysConfig_Create\", _os.ToBytes(), _status, _context, _resp)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(_opt) == 1 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t} else if len(_opt) == 2 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t\tfor k := range _status {\n\t\t\tdelete(_status, k)\n\t\t}\n\t\tfor k, v := range _resp.Status {\n\t\t\t_status[k] = v\n\t\t}\n\n\t}\n\t_ = length\n\t_ = have\n\t_ = ty\n\treturn nil\n}", "func (self *Route) Handle(context *Context) bool {\n\tif matches := self.Match(context.Path); matches != nil {\n\t\tif context_ := context.AppendName(self.Name); context == context_ {\n\t\t\tcontext = context.Copy()\n\t\t} else {\n\t\t\tcontext = context_\n\t\t}\n\n\t\tfor key, value := range matches {\n\t\t\tswitch key {\n\t\t\tcase PathVariable:\n\t\t\t\tcontext.Path = value\n\n\t\t\tdefault:\n\t\t\t\tcontext.Variables[key] = value\n\t\t\t}\n\t\t}\n\n\t\tif self.Handler != nil {\n\t\t\treturn self.Handler(context)\n\t\t}\n\t}\n\n\treturn false\n}", "func (req *ServerHTTPRequest) Context() context.Context {\n\treturn req.httpRequest.Context()\n}", "func (m GatewayName) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}", "func TestContextIsAccessible(t *testing.T) {\n\t// case 1: success\n\tsuccHand := func(w http.ResponseWriter, r *http.Request) {\n\t\ttoken := Token(r)\n\t\tif token == \"\" {\n\t\t\tt.Errorf(\"Token is inaccessible in the success handler\")\n\t\t}\n\t}\n\n\thand := New(http.HandlerFunc(succHand))\n\n\t// we need a request that passes. Let's just use a safe method for that.\n\treq := dummyGet()\n\twriter := httptest.NewRecorder()\n\n\thand.ServeHTTP(writer, req)\n}", "func GetContext(req *http.Request) *Context {\n\tif ctx, ok := req.Context().Value(contextKey).(*Context); ok {\n\t\treturn ctx\n\t}\n\treturn nil\n}", "func TestContextIsAccessible(t *testing.T) {\n\t// case 1: success\n\tsuccHand := func(w http.ResponseWriter, r *http.Request) {\n\t\ttoken := Token(r)\n\t\tif token == \"\" {\n\t\t\tt.Errorf(\"Token is inaccessible in the success handler\")\n\t\t}\n\t}\n\n\thand := New()\n\n\t// we need a request that passes. Let's just use a safe method for that.\n\treq := dummyGet()\n\twriter := httptest.NewRecorder()\n\n\tchain := alice.New(hand.Handler).Then(http.HandlerFunc(succHand))\n\tchain.ServeHTTP(writer, req)\n}", "func (mr *MockWerftService_ListenServerMockRecorder) Context() *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"Context\", reflect.TypeOf((*MockWerftService_ListenServer)(nil).Context))\n}" ]
[ "0.7213183", "0.5509956", "0.5483808", "0.5468682", "0.5381114", "0.53599775", "0.53556824", "0.5326522", "0.5323669", "0.53084695", "0.5305011", "0.5259829", "0.52473307", "0.52404714", "0.52363706", "0.52258265", "0.5179706", "0.51651525", "0.5151531", "0.5140357", "0.50984246", "0.5097525", "0.5091238", "0.5087568", "0.50840974", "0.50426304", "0.5042026", "0.50404316", "0.50275743", "0.5004554", "0.5000439", "0.49934858", "0.49768698", "0.4974748", "0.4973135", "0.497138", "0.4971138", "0.49621028", "0.49570346", "0.4955459", "0.4955009", "0.49532658", "0.49521178", "0.4947634", "0.49403572", "0.49320734", "0.49174556", "0.4916288", "0.49059448", "0.49056667", "0.49032268", "0.49028027", "0.49002814", "0.48997834", "0.48987183", "0.48937798", "0.48921418", "0.48904592", "0.48863208", "0.48852938", "0.48738313", "0.48617396", "0.4860516", "0.48578733", "0.4846572", "0.4840971", "0.48404494", "0.4838868", "0.48382926", "0.4835343", "0.4826949", "0.48251045", "0.48209253", "0.48117793", "0.4805406", "0.47896054", "0.47896054", "0.47896054", "0.47896054", "0.47892267", "0.47884405", "0.47862947", "0.47845614", "0.47844872", "0.47840726", "0.4780041", "0.47786278", "0.47776216", "0.47775635", "0.47732207", "0.47654602", "0.47602156", "0.47586444", "0.47552463", "0.4753476", "0.47524318", "0.4747996", "0.47397244", "0.47390032", "0.47345155" ]
0.76307327
0
check the first parameter, true if it wants only a Renderer
func hasRendererParam(handlerType reflect.Type) bool { //if the handler doesn't take arguments, false if handlerType.NumIn() == 0 { return false } //if the first argument is not a pointer, false p1 := handlerType.In(0) if p1.Kind() != reflect.Ptr { return false } //but if the first argument is a renderer, true if p1.Elem() == rendererType { return true } return false }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func hasContextAndRenderer(handlerType reflect.Type) bool {\n\n\t//first check if we have pass 2 arguments\n\tif handlerType.NumIn() < 2 {\n\t\treturn false\n\t}\n\n\tfirstParamIsContext := hasContextParam(handlerType)\n\n\t//the first argument/parameter is always context if exists otherwise it's only Renderer or ResponseWriter,Request.\n\tif firstParamIsContext == false {\n\t\treturn false\n\t}\n\n\tp2 := handlerType.In(1)\n\tif p2.Kind() != reflect.Ptr {\n\t\treturn false\n\t}\n\t//but if the first argument is a context, true\n\tif p2.Elem() == rendererType {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func IsRendered(wid fyne.Widget) bool {\n\trenderersLock.RLock()\n\t_, found := renderers[wid]\n\trenderersLock.RUnlock()\n\treturn found\n}", "func (DrawTexture) IsDrawAction() {}", "func IsRenderbuffer(renderbuffer uint32) bool {\n ret := C.glowIsRenderbuffer(gpIsRenderbuffer, (C.GLuint)(renderbuffer))\n return ret == TRUE\n}", "func Renderer(wid fyne.Widget) fyne.WidgetRenderer {\n\tif wid == nil {\n\t\treturn nil\n\t}\n\n\tif wd, ok := wid.(isBaseWidget); ok {\n\t\tif wd.super() != nil {\n\t\t\twid = wd.super()\n\t\t}\n\t}\n\n\trenderersLock.RLock()\n\trinfo, ok := renderers[wid]\n\trenderersLock.RUnlock()\n\tif !ok {\n\t\trinfo = &rendererInfo{renderer: wid.CreateRenderer()}\n\t\trenderersLock.Lock()\n\t\trenderers[wid] = rinfo\n\t\trenderersLock.Unlock()\n\t}\n\n\tif rinfo == nil {\n\t\treturn nil\n\t}\n\n\trinfo.setAlive()\n\n\treturn rinfo.renderer\n}", "func (renderbuffer Renderbuffer) IsRenderbuffer() bool {\n\treturn gl.IsRenderbuffer(uint32(renderbuffer))\n}", "func (DrawText) IsDrawAction() {}", "func IsRenderbuffer(rb Renderbuffer) bool {\n\treturn gl.IsRenderbuffer(rb.Value)\n}", "func (isRenderable) Filter(e ces.Entity) bool {\n\t_, ok := e.(renderComponent)\n\treturn ok\n}", "func IsRenderbuffer(renderbuffer uint32) bool {\n\tret, _, _ := syscall.Syscall(gpIsRenderbuffer, 1, uintptr(renderbuffer), 0, 0)\n\treturn ret != 0\n}", "func IsRenderbuffer(renderbuffer uint32) bool {\n\tret := C.glowIsRenderbuffer(gpIsRenderbuffer, (C.GLuint)(renderbuffer))\n\treturn ret == TRUE\n}", "func IsRenderbuffer(renderbuffer uint32) bool {\n\tret := C.glowIsRenderbuffer(gpIsRenderbuffer, (C.GLuint)(renderbuffer))\n\treturn ret == TRUE\n}", "func (self *TileSprite) Renderable() bool{\n return self.Object.Get(\"renderable\").Bool()\n}", "func IsRenderbuffer(renderbuffer Uint) Boolean {\n\tcrenderbuffer, _ := (C.GLuint)(renderbuffer), cgoAllocsUnknown\n\t__ret := C.glIsRenderbuffer(crenderbuffer)\n\t__v := (Boolean)(__ret)\n\treturn __v\n}", "func (r *Renderer) RenderFrame() RenderFrame {\n\tif targetA {return frameB}\n\treturn frameA\n}", "func isDraw(s Abs) bool {\n\tif s.err == nil {\n\t\treturn false\n\t}\n\t_, ok := s.err.(checkmateError)\n\treturn !ok\n}", "func (me TxsdFeBlendTypeMode) IsScreen() bool { return me.String() == \"screen\" }", "func (is *MenuPage) Render(mainWindowSurface *sdl.Surface) {\n\n}", "func DetectRendererType(filename string, input io.Reader) string {\n\tbuf, err := io.ReadAll(input)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\tfor _, renderer := range renderers {\n\t\tif detector, ok := renderer.(RendererContentDetector); ok && detector.CanRender(filename, bytes.NewReader(buf)) {\n\t\t\treturn renderer.Name()\n\t\t}\n\t}\n\treturn \"\"\n}", "func (b *Baa) Render() Renderer {\n\treturn b.GetDI(\"render\").(Renderer)\n}", "func (self *Graphics) InCamera() bool{\n return self.Object.Get(\"inCamera\").Bool()\n}", "func (gm *GraphicsManager) Render(compsToSend *common.Vector) {\n\thandlerIndex := 0\n\tdefer gm.handleClosedGraphicsHandler(handlerIndex)\n\n\t//common.LogInfo.Println(compsToSend)\n\tfor handlerIndex = range gm.graphicsHandlersLink {\n\t\tif gm.graphicsHandlersLink[handlerIndex] == nil {\n\t\t\tcontinue\n\t\t}\n\t\tgm.graphicsHandlersLink[handlerIndex] <- compsToSend\n\t}\n}", "func (a *Application) Renderer() *renderer.Renderer {\r\n\r\n\treturn a.renderer\r\n}", "func (self *GameObjectCreator) RenderTexture1O(width int) *RenderTexture{\n return &RenderTexture{self.Object.Call(\"renderTexture\", width)}\n}", "func (me TxsdPresentationAttributesGraphicsDisplay) IsRunIn() bool { return me.String() == \"run-in\" }", "func GetRendererByType(tp string) Renderer {\n\treturn renderers[tp]\n}", "func IsGraphic(r rune) bool", "func (self *Graphics) InputEnabled() bool{\n return self.Object.Get(\"inputEnabled\").Bool()\n}", "func (self *Graphics) AutoCull() bool{\n return self.Object.Get(\"autoCull\").Bool()\n}", "func (obj *graphicsObject) renderSelf(self GraphicsObject) {\n\tobj.ifNotRendered(func() {\n\t\tobj.page.render(self)\n\t\tobj.rendered = true\n\t})\n}", "func (r ErrorRenderer) Render(_ io.Writer, _ interface{}) error { return r.Error }", "func (self *Graphics) IgnoreChildInput() bool{\n return self.Object.Get(\"ignoreChildInput\").Bool()\n}", "func (rf RendererFunc) Render(w io.Writer, v interface{}) error { return rf(w, v) }", "func (app *controlsTestApplication) RectangleRenderer() *graphics.RectangleRenderer {\n\treturn app.rectRenderer\n}", "func IsBuffer(buffer uint32) bool {\n ret := C.glowIsBuffer(gpIsBuffer, (C.GLuint)(buffer))\n return ret == TRUE\n}", "func (srv *Server) RenderOnly(items ...screenElement) {\n\tsrv.mu.RLock()\n\tdefer srv.mu.RUnlock()\n\tvar its []ui.Drawable\n\tfor _, it := range items {\n\t\tswitch it {\n\t\tcase InputTextBox:\n\t\t\tits = append(its, srv.inputTextBox)\n\t\tcase StatusBar:\n\t\t\tits = append(its, srv.statusBar)\n\t\tcase MainWindow:\n\t\t\tits = append(its, srv.mainWindow)\n\t\t}\n\t}\n\tui.Render(its...)\n}", "func (me TxsdFeBlendTypeMode) IsNormal() bool { return me.String() == \"normal\" }", "func (self *Graphics) InWorld() bool{\n return self.Object.Get(\"inWorld\").Bool()\n}", "func (self *Graphics) _renderCanvas(renderSession *RenderSession) {\n self.Object.Call(\"_renderCanvas\", renderSession)\n}", "func (o *os) CanDraw() gdnative.Bool {\n\to.ensureSingleton()\n\t//log.Println(\"Calling _OS.CanDraw()\")\n\n\t// Build out the method's arguments\n\tptrArguments := make([]gdnative.Pointer, 0, 0)\n\n\t// Get the method bind\n\tmethodBind := gdnative.NewMethodBind(\"_OS\", \"can_draw\")\n\n\t// Call the parent method.\n\t// bool\n\tretPtr := gdnative.NewEmptyBool()\n\tgdnative.MethodBindPtrCall(methodBind, o.GetBaseObject(), ptrArguments, retPtr)\n\n\t// If we have a return type, convert it from a pointer into its actual object.\n\tret := gdnative.NewBoolFromPointer(retPtr)\n\treturn ret\n}", "func (self *Tween) FrameBased() bool{\n return self.Object.Get(\"frameBased\").Bool()\n}", "func (self *Graphics) FixedToCamera() bool{\n return self.Object.Get(\"fixedToCamera\").Bool()\n}", "func (me TxsdPresentationAttributesGraphicsShapeRendering) IsInherit() bool {\n\treturn me.String() == \"inherit\"\n}", "func (self *Graphics) _renderCanvasI(args ...interface{}) {\n self.Object.Call(\"_renderCanvas\", args)\n}", "func loadRendering(item interface{}) Rendering {\n\trendering := Rendering{}\n\tmapstructure.Decode(item, &rendering)\n\tsettings := item.(map[string]interface{})[\"render\"]\n\trenders := map[string]Render{}\n\tmapstructure.Decode(settings, &renders)\n\n\t// Default needs to be true\n\tfor key, render := range renders {\n\n\t\t// yes, this is really ugly!\n\t\tusePointer := settings.(map[string]interface{})[key].(map[string]interface{})[\"parameters\"].(map[string]interface{})[\"pointers\"]\n\t\tif usePointer == nil {\n\t\t\trender.Parameters.Pointers = true\n\t\t} else {\n\t\t\trender.Parameters.Pointers = usePointer.(bool)\n\t\t}\n\t\trenders[key] = render\n\t}\n\trendering.Renders = renders\n\treturn rendering\n}", "func RegisterRenderer(renderer Renderer) {\n\trenderers[renderer.Name()] = renderer\n\tfor _, ext := range renderer.Extensions() {\n\t\textRenderers[strings.ToLower(ext)] = renderer\n\t}\n}", "func (me TxsdPresentationAttributesGraphicsDisplay) IsInherit() bool { return me.String() == \"inherit\" }", "func (self *Graphics) Fresh() bool{\n return self.Object.Get(\"fresh\").Bool()\n}", "func (self *Graphics) Debug() bool{\n return self.Object.Get(\"debug\").Bool()\n}", "func DestroyRenderer(wid fyne.Widget) {\n\trenderersLock.RLock()\n\trinfo, ok := renderers[wid]\n\trenderersLock.RUnlock()\n\tif !ok {\n\t\treturn\n\t}\n\tif rinfo != nil {\n\t\trinfo.renderer.Destroy()\n\t}\n\trenderersLock.Lock()\n\tdelete(renderers, wid)\n\trenderersLock.Unlock()\n}", "func (me TxsdPresentationAttributesGraphicsDisplay) IsMarker() bool { return me.String() == \"marker\" }", "func (c *Camera) TestDraw() {\n\tc.renderer.Test()\n}", "func (me TxsdPresentationAttributesGraphicsTextRendering) IsAuto() bool { return me.String() == \"auto\" }", "func (self *Graphics) PendingDestroy() bool{\n return self.Object.Get(\"pendingDestroy\").Bool()\n}", "func (e Keyboard) IsGraphic() bool {\n\treturn !e.Control && !e.Meta && unicode.IsGraphic(e.Rune)\n}", "func (config *Config) IsDrawIO() bool {\n\treturn config.GetOutputFormat() == \"drawio\"\n}", "func (me TxsdMarkerTypeMarkerUnits) IsStrokeWidth() bool { return me.String() == \"strokeWidth\" }", "func (me TxsdPresentationAttributesGraphicsDisplay) IsNone() bool { return me.String() == \"none\" }", "func (self *GameObjectCreator) RenderTexture() *RenderTexture{\n return &RenderTexture{self.Object.Call(\"renderTexture\")}\n}", "func (me TxsdMovementType) IsGr() bool { return me.String() == \"GR\" }", "func (me TxsdPresentationAttributesGraphicsTextRendering) IsInherit() bool {\n\treturn me.String() == \"inherit\"\n}", "func RegisterRenderer(name string, r LayoutRenderer) {\n\tif renderers == nil {\n\t\trenderers = make(map[string]LayoutRenderer)\n\t}\n\trenderers[name] = r\n}", "func (c *SceneController) OnRender() {\n}", "func (self *Graphics) Exists() bool{\n return self.Object.Get(\"exists\").Bool()\n}", "func (r *Renderer) Render() {\n\tgl.DrawArrays(gl.TRIANGLES, 0, int32(len(r.RawRenderer)*4))\n}", "func IsBuffer(buffer uint32) bool {\n\tret := C.glowIsBuffer(gpIsBuffer, (C.GLuint)(buffer))\n\treturn ret == TRUE\n}", "func IsBuffer(buffer uint32) bool {\n\tret := C.glowIsBuffer(gpIsBuffer, (C.GLuint)(buffer))\n\treturn ret == TRUE\n}", "func (self *Graphics) IsMask() bool{\n return self.Object.Get(\"isMask\").Bool()\n}", "func (me TdtypeType) IsFrame() bool { return me.String() == \"frame\" }", "func (me TxsdPresentationAttributesGraphicsDisplay) IsBlock() bool { return me.String() == \"block\" }", "func (self *TileSprite) SetRenderableA(member bool) {\n self.Object.Set(\"renderable\", member)\n}", "func NewRenderer(cfg renderer.Config, hubClient hubclient.HubClient, typeInstanceHandler *TypeInstanceHandler) *Renderer {\n\tr := &Renderer{\n\t\ttypeInstanceHandler: typeInstanceHandler,\n\t\tmaxDepth: cfg.MaxDepth,\n\t\trenderTimeout: cfg.RenderTimeout,\n\t\thubClient: hubClient,\n\t}\n\n\treturn r\n}", "func (*HTML) isOutput() {\n}", "func IsSampler(sampler uint32) bool {\n ret := C.glowIsSampler(gpIsSampler, (C.GLuint)(sampler))\n return ret == TRUE\n}", "func (gm *GraphicsManager) ForceRender(compsToSend *common.Vector) {\n\thandlerIndex := 0\n\tdefer gm.handleClosedGraphicsHandler(handlerIndex)\n\n\tgm.Render(compsToSend)\n\tfor handlerIndex = range gm.resizelink {\n\t\tif gm.resizelink[handlerIndex] == nil {\n\t\t\tcontinue\n\t\t}\n\t\tgm.resizelink[handlerIndex] <- true\n\t}\n\n\tgm.justForcedARender = true\n\n}", "func (v *View) Renderer(matcher func(instance.Description) bool) (func(w io.Writer, v interface{}) error, error) {\n\ttagsView, err := template.NewTemplate(template.ValidURL(v.tagsTemplate), v.options)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tpropertiesView, err := template.NewTemplate(template.ValidURL(v.propertiesTemplate), v.options)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn func(w io.Writer, result interface{}) error {\n\n\t\tinstances, is := result.([]instance.Description)\n\t\tif !is {\n\t\t\treturn fmt.Errorf(\"not []instance.Description\")\n\t\t}\n\n\t\tif !v.quiet {\n\t\t\tif v.viewTemplate != \"\" {\n\t\t\t\tfmt.Printf(\"%-30s\\t%-30s\\n\", \"ID\", \"VIEW\")\n\t\t\t} else if v.properties {\n\t\t\t\tfmt.Printf(\"%-30s\\t%-30s\\t%-30s\\t%-s\\n\", \"ID\", \"LOGICAL\", \"TAGS\", \"PROPERTIES\")\n\n\t\t\t} else {\n\t\t\t\tfmt.Printf(\"%-30s\\t%-30s\\t%-s\\n\", \"ID\", \"LOGICAL\", \"TAGS\")\n\t\t\t}\n\t\t}\n\t\tfor _, d := range instances {\n\n\t\t\t// TODO - filter on the client side by tags\n\t\t\tif len(v.TagFilter()) > 0 {\n\t\t\t\tif hasDifferentTag(v.TagFilter(), d.Tags) {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t\tif matcher != nil {\n\t\t\t\tif !matcher(d) {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tlogical := \" - \"\n\t\t\tif d.LogicalID != nil {\n\t\t\t\tlogical = string(*d.LogicalID)\n\t\t\t}\n\n\t\t\tif v.viewTemplate != \"\" {\n\n\t\t\t\tcolumn := \"-\"\n\t\t\t\tif view, err := d.View(v.viewTemplate); err == nil {\n\t\t\t\t\tcolumn = view\n\t\t\t\t} else {\n\t\t\t\t\tcolumn = err.Error()\n\t\t\t\t}\n\t\t\t\tfmt.Printf(\"%-30s\\t%-30s\\n\", d.ID, column)\n\n\t\t\t} else {\n\n\t\t\t\ttagViewBuff := \"\"\n\t\t\t\tif v.tagsTemplate == \"*\" {\n\t\t\t\t\t// default -- this is a hack\n\t\t\t\t\tprintTags := []string{}\n\t\t\t\t\tfor k, v := range d.Tags {\n\t\t\t\t\t\tprintTags = append(printTags, fmt.Sprintf(\"%s=%s\", k, v))\n\t\t\t\t\t}\n\t\t\t\t\tsort.Strings(printTags)\n\t\t\t\t\ttagViewBuff = strings.Join(printTags, \",\")\n\t\t\t\t} else {\n\t\t\t\t\ttagViewBuff = renderTags(d.Tags, tagsView)\n\t\t\t\t}\n\n\t\t\t\tif v.properties {\n\n\t\t\t\t\tif v.quiet {\n\t\t\t\t\t\t// special render only the properties\n\t\t\t\t\t\tfmt.Printf(\"%s\", renderProperties(d.Properties, propertiesView))\n\t\t\t\t\t} else {\n\t\t\t\t\t\tfmt.Printf(\"%-30s\\t%-30s\\t%-30s\\t%-s\\n\", d.ID, logical, tagViewBuff,\n\t\t\t\t\t\t\trenderProperties(d.Properties, propertiesView))\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tfmt.Printf(\"%-30s\\t%-30s\\t%-s\\n\", d.ID, logical, tagViewBuff)\n\t\t\t\t}\n\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}, nil\n}", "func (base fontCommon) isCIDFont() bool {\n\tif base.subtype == \"\" {\n\t\tcommon.Log.Debug(\"ERROR: isCIDFont. context is nil. font=%s\", base)\n\t}\n\tisCID := false\n\tswitch base.subtype {\n\tcase \"Type0\", \"CIDFontType0\", \"CIDFontType2\":\n\t\tisCID = true\n\t}\n\tcommon.Log.Trace(\"isCIDFont: isCID=%t font=%s\", isCID, base)\n\treturn isCID\n}", "func (self *Graphics) CheckWorldBounds() bool{\n return self.Object.Get(\"checkWorldBounds\").Bool()\n}", "func (self *Graphics) DestroyPhase() bool{\n return self.Object.Get(\"destroyPhase\").Bool()\n}", "func (self *Graphics) _renderWebGLI(args ...interface{}) {\n self.Object.Call(\"_renderWebGL\", args)\n}", "func (self *Graphics) OutOfCameraBoundsKill() bool{\n return self.Object.Get(\"outOfCameraBoundsKill\").Bool()\n}", "func (me TxsdPresentationAttributesGraphicsPointerEvents) IsFill() bool { return me.String() == \"fill\" }", "func (vm *C8VM) IsDrawFlagSet() bool {\n\treturn vm.drawFlag\n}", "func (sp *ServiceProcessor) RegisterRenderer(renderer renderer.ServiceRendererAPI) error {\n\tsp.renderers = append(sp.renderers, renderer)\n\treturn nil\n}", "func GetDebugRenderable(rName string) (Renderable, bool) {\n\tr, ok := debugMap.Load(rName)\n\tif r == nil {\n\t\treturn nil, false\n\t}\n\treturn r.(Renderable), ok\n}", "func (T triangle) shouldrender() (float64,float64,float64,float64){\n\treturn (T.p0.x*T.p0.x+T.p0.y*T.p0.y-T.size)/(T.p0.z*T.p0.z),T.p0.z,T.p0dotn/T.p0.Abs()/T.n.Abs(),T.size\n}", "func IsBuffer(buffer Uint) Boolean {\n\tcbuffer, _ := (C.GLuint)(buffer), cgoAllocsUnknown\n\t__ret := C.glIsBuffer(cbuffer)\n\t__v := (Boolean)(__ret)\n\treturn __v\n}", "func New(set *jet.Set) *Renderer {\n\treturn &Renderer{\n\t\tSet: set,\n\t}\n}", "func (self *GameObjectCreator) Graphics() *Graphics{\n return &Graphics{self.Object.Call(\"graphics\")}\n}", "func (self *GameObjectCreator) Graphics1O(x int) *Graphics{\n return &Graphics{self.Object.Call(\"graphics\", x)}\n}", "func (me TxsdTextPathTypeMethod) IsStretch() bool { return me.String() == \"stretch\" }", "func (s *Site) RendererManager() renderers.Renderers {\n\tif s.renderer == nil {\n\t\tpanic(fmt.Errorf(\"uninitialized rendering manager\"))\n\t}\n\treturn s.renderer\n}", "func (self *TileSprite) _renderCanvas(renderSession *RenderSession) {\n self.Object.Call(\"_renderCanvas\", renderSession)\n}", "func (me TxsdShow) IsEmbed() bool { return me == \"embed\" }", "func (ob *Object) Render() {\n\tob.image.Draw(ob.x, ob.y, allegro.FLIP_NONE)\n}", "func (me TAttlistLocationLabelType) IsFigure() bool { return me.String() == \"figure\" }", "func Renderer() func(string) {\n\tvar prev string\n\n\treturn func(curr string) {\n\t\t// clear lines\n\t\tif prev != \"\" {\n\t\t\tfor range lines(prev) {\n\t\t\t\tMoveUp(1)\n\t\t\t\tClearLine()\n\t\t\t}\n\t\t}\n\n\t\t// print lines\n\t\tif curr != \"\" {\n\t\t\tfor _, s := range lines(curr) {\n\t\t\t\tfmt.Printf(\"%s\\n\", s)\n\t\t\t}\n\t\t}\n\n\t\tprev = curr\n\t}\n}", "func (me TxsdViewTypeZoomAndPan) IsZoom() bool { return me.String() == \"zoom\" }", "func (me TxsdPresentationAttributesColorColorRendering) IsAuto() bool { return me.String() == \"auto\" }", "func isNativeInline(o fyne.CanvasObject) bool {\n\tswitch o.(type) {\n\tcase *canvas.Image, *canvas.Text, *widget.Label:\n\t\treturn true\n\t}\n\treturn false\n}" ]
[ "0.64195585", "0.6211272", "0.5952793", "0.5785611", "0.57511944", "0.5662539", "0.56310743", "0.56270677", "0.5563061", "0.5553207", "0.55048263", "0.55048263", "0.55043006", "0.5455274", "0.5440663", "0.54219353", "0.5395095", "0.5364567", "0.5228344", "0.52223676", "0.519283", "0.5154841", "0.5075897", "0.5032395", "0.50191545", "0.5008844", "0.49966112", "0.4985985", "0.49469015", "0.49297443", "0.49257928", "0.49252596", "0.49220333", "0.4914986", "0.4910191", "0.49060613", "0.48835608", "0.48747176", "0.48715156", "0.48681337", "0.48639357", "0.4863874", "0.4857052", "0.48548833", "0.48250088", "0.481801", "0.48018235", "0.47967777", "0.47958437", "0.47792184", "0.47780138", "0.47651187", "0.4764392", "0.47623077", "0.47578657", "0.4756494", "0.47516525", "0.47478902", "0.4740155", "0.4738263", "0.4722567", "0.47222298", "0.47200754", "0.47179383", "0.47074568", "0.46994096", "0.46994096", "0.46908468", "0.46783292", "0.46715888", "0.46578854", "0.46433708", "0.4642997", "0.46421987", "0.46317333", "0.462847", "0.46262547", "0.46262196", "0.4626072", "0.46246272", "0.46243945", "0.4618955", "0.46048516", "0.4596852", "0.4595965", "0.4588563", "0.45885193", "0.4583967", "0.45780075", "0.45727643", "0.456959", "0.4568916", "0.4562199", "0.45560375", "0.45514348", "0.45442173", "0.45309442", "0.45290267", "0.45258054", "0.4520331" ]
0.72854406
0
check if two parameters, true if it wants Context following by a Renderer
func hasContextAndRenderer(handlerType reflect.Type) bool { //first check if we have pass 2 arguments if handlerType.NumIn() < 2 { return false } firstParamIsContext := hasContextParam(handlerType) //the first argument/parameter is always context if exists otherwise it's only Renderer or ResponseWriter,Request. if firstParamIsContext == false { return false } p2 := handlerType.In(1) if p2.Kind() != reflect.Ptr { return false } //but if the first argument is a context, true if p2.Elem() == rendererType { return true } return false }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func hasRendererParam(handlerType reflect.Type) bool {\n\t//if the handler doesn't take arguments, false\n\tif handlerType.NumIn() == 0 {\n\t\treturn false\n\t}\n\n\t//if the first argument is not a pointer, false\n\tp1 := handlerType.In(0)\n\tif p1.Kind() != reflect.Ptr {\n\t\treturn false\n\t}\n\t//but if the first argument is a renderer, true\n\tif p1.Elem() == rendererType {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (DrawTexture) IsDrawAction() {}", "func (DrawText) IsDrawAction() {}", "func hasContextParam(handlerType reflect.Type) bool {\n\t//if the handler doesn't take arguments, false\n\tif handlerType.NumIn() == 0 {\n\t\treturn false\n\t}\n\n\t//if the first argument is not a pointer, false\n\tp1 := handlerType.In(0)\n\tif p1.Kind() != reflect.Ptr {\n\t\treturn false\n\t}\n\t//but if the first argument is a context, true\n\tif p1.Elem() == contextType {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (me TxsdFeBlendTypeMode) IsScreen() bool { return me.String() == \"screen\" }", "func (r *subprocess) isContext(n ast.Node, ctx *gosec.Context) bool {\n\tselector, indent, err := gosec.GetCallInfo(n, ctx)\n\tif err != nil {\n\t\treturn false\n\t}\n\tif selector == \"exec\" && indent == \"CommandContext\" {\n\t\treturn true\n\t}\n\treturn false\n}", "func (me TxsdColorProfileTypeRenderingIntent) IsPerceptual() bool { return me.String() == \"perceptual\" }", "func (self *Graphics) InWorld() bool{\n return self.Object.Get(\"inWorld\").Bool()\n}", "func (r *Renderer) RenderFrame() RenderFrame {\n\tif targetA {return frameB}\n\treturn frameA\n}", "func (me TxsdFeBlendTypeMode) IsMultiply() bool { return me.String() == \"multiply\" }", "func Context(c gfx.Context) gfx.Context {\n\treturn Checker(c)\n}", "func (self *Graphics) InCamera() bool{\n return self.Object.Get(\"inCamera\").Bool()\n}", "func (me TxsdFeBlendTypeMode) IsNormal() bool { return me.String() == \"normal\" }", "func (pos *Pos) IsPassed(x, y int) bool {\n\treturn (*pos.maps)[x*pos.Len+y] == 10 //has passd\n}", "func (self *Graphics) AutoCull() bool{\n return self.Object.Get(\"autoCull\").Bool()\n}", "func (me TxsdPresentationAttributesGraphicsPointerEvents) IsFill() bool { return me.String() == \"fill\" }", "func (T triangle) shouldrender() (float64,float64,float64,float64){\n\treturn (T.p0.x*T.p0.x+T.p0.y*T.p0.y-T.size)/(T.p0.z*T.p0.z),T.p0.z,T.p0dotn/T.p0.Abs()/T.n.Abs(),T.size\n}", "func (self *Graphics) BlendMode() int{\n return self.Object.Get(\"blendMode\").Int()\n}", "func isDraw(s Abs) bool {\n\tif s.err == nil {\n\t\treturn false\n\t}\n\t_, ok := s.err.(checkmateError)\n\treturn !ok\n}", "func (me TxsdFeBlendTypeMode) IsLighten() bool { return me.String() == \"lighten\" }", "func (base fontCommon) isCIDFont() bool {\n\tif base.subtype == \"\" {\n\t\tcommon.Log.Debug(\"ERROR: isCIDFont. context is nil. font=%s\", base)\n\t}\n\tisCID := false\n\tswitch base.subtype {\n\tcase \"Type0\", \"CIDFontType0\", \"CIDFontType2\":\n\t\tisCID = true\n\t}\n\tcommon.Log.Trace(\"isCIDFont: isCID=%t font=%s\", isCID, base)\n\treturn isCID\n}", "func (this *inlineBody) SwitchContext() value.Tristate {\n\treturn value.FALSE\n}", "func IsRendered(wid fyne.Widget) bool {\n\trenderersLock.RLock()\n\t_, found := renderers[wid]\n\trenderersLock.RUnlock()\n\treturn found\n}", "func (is *MenuPage) Render(mainWindowSurface *sdl.Surface) {\n\n}", "func (me TxsdPresentationAttributesGraphicsDisplay) IsInherit() bool { return me.String() == \"inherit\" }", "func (self *Tween) FrameBased() bool{\n return self.Object.Get(\"frameBased\").Bool()\n}", "func (b *Bool) Context() lexer.Context {\n\treturn b.Token.Pos\n}", "func (context Context) IsUsingMouse() bool {\n\treturn imgui.CurrentIO().WantCaptureMouse()\n}", "func (self *Graphics) InputEnabled() bool{\n return self.Object.Get(\"inputEnabled\").Bool()\n}", "func (me TxsdPresentationAttributesGraphicsTextRendering) IsInherit() bool {\n\treturn me.String() == \"inherit\"\n}", "func (r *Renderer)TargetFrame() RenderFrame{\n\tif targetA {return frameA}\n\treturn frameB\n}", "func (self *Graphics) IgnoreChildInput() bool{\n return self.Object.Get(\"ignoreChildInput\").Bool()\n}", "func (me TxsdPresentationAttributesGraphicsDisplay) IsRunIn() bool { return me.String() == \"run-in\" }", "func (self *Graphics) FixedToCamera() bool{\n return self.Object.Get(\"fixedToCamera\").Bool()\n}", "func (self *TileSprite) Renderable() bool{\n return self.Object.Get(\"renderable\").Bool()\n}", "func (self *Graphics) CheckWorldBounds() bool{\n return self.Object.Get(\"checkWorldBounds\").Bool()\n}", "func (rs *RenderSystem) prepare() {\n\tgl.Enable(gl.CULL_FACE)\n\tgl.CullFace(gl.BACK)\n\tgl.Enable(gl.DEPTH_TEST)\n\tgl.Clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT)\n\tgl.ClearColor(rs.BaseColour.R, rs.BaseColour.G, rs.BaseColour.B, rs.BaseColour.A)\n\tif rs.drawPolygon {\n\t\tgl.PolygonMode(gl.FRONT_AND_BACK, gl.LINE)\n\t} else {\n\t\tgl.PolygonMode(gl.FRONT_AND_BACK, gl.FILL)\n\t}\n}", "func (me TxsdPresentationAttributesGraphicsShapeRendering) IsInherit() bool {\n\treturn me.String() == \"inherit\"\n}", "func (o *os) CanDraw() gdnative.Bool {\n\to.ensureSingleton()\n\t//log.Println(\"Calling _OS.CanDraw()\")\n\n\t// Build out the method's arguments\n\tptrArguments := make([]gdnative.Pointer, 0, 0)\n\n\t// Get the method bind\n\tmethodBind := gdnative.NewMethodBind(\"_OS\", \"can_draw\")\n\n\t// Call the parent method.\n\t// bool\n\tretPtr := gdnative.NewEmptyBool()\n\tgdnative.MethodBindPtrCall(methodBind, o.GetBaseObject(), ptrArguments, retPtr)\n\n\t// If we have a return type, convert it from a pointer into its actual object.\n\tret := gdnative.NewBoolFromPointer(retPtr)\n\treturn ret\n}", "func (self *Graphics) DestroyPhase() bool{\n return self.Object.Get(\"destroyPhase\").Bool()\n}", "func IsRenderbuffer(renderbuffer uint32) bool {\n ret := C.glowIsRenderbuffer(gpIsRenderbuffer, (C.GLuint)(renderbuffer))\n return ret == TRUE\n}", "func (self *Graphics) PendingDestroy() bool{\n return self.Object.Get(\"pendingDestroy\").Bool()\n}", "func (attendant *Attendant) Context(key string) (interface{}, bool) {\n\tresult, ok := attendant.context[key]\n\treturn result, ok\n}", "func (o *TelemetryDruidScanRequestAllOf) HasContext() bool {\n\tif o != nil && o.Context != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (renderbuffer Renderbuffer) IsRenderbuffer() bool {\n\treturn gl.IsRenderbuffer(uint32(renderbuffer))\n}", "func (ButtonPressedAction) isEngineAction() {}", "func (native *OpenGL) BeginConditionalRender(id uint32, mode uint32) {\n\tgl.BeginConditionalRender(id, mode)\n}", "func (isRenderable) Filter(e ces.Entity) bool {\n\t_, ok := e.(renderComponent)\n\treturn ok\n}", "func IsBuffer(buffer uint32) bool {\n ret := C.glowIsBuffer(gpIsBuffer, (C.GLuint)(buffer))\n return ret == TRUE\n}", "func HasContext(status *statuspb.Status) bool {\n\tcontext := status.GetContext()\n\treturn context != nil\n}", "func (self *Graphics) Debug() bool{\n return self.Object.Get(\"debug\").Bool()\n}", "func (me TxsdPresentationAttributesGraphicsDisplay) IsBlock() bool { return me.String() == \"block\" }", "func (o *Comment) HasContext() bool {\n\tif o != nil && o.Context != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (this *javascriptBody) SwitchContext() value.Tristate {\n\treturn value.FALSE\n}", "func (me TxsdPresentationAttributesColorColorRendering) IsInherit() bool {\n\treturn me.String() == \"inherit\"\n}", "func (self *Graphics) _renderCanvas(renderSession *RenderSession) {\n self.Object.Call(\"_renderCanvas\", renderSession)\n}", "func IsBuffer(buffer Uint) Boolean {\n\tcbuffer, _ := (C.GLuint)(buffer), cgoAllocsUnknown\n\t__ret := C.glIsBuffer(cbuffer)\n\t__v := (Boolean)(__ret)\n\treturn __v\n}", "func IsRenderbuffer(renderbuffer Uint) Boolean {\n\tcrenderbuffer, _ := (C.GLuint)(renderbuffer), cgoAllocsUnknown\n\t__ret := C.glIsRenderbuffer(crenderbuffer)\n\t__v := (Boolean)(__ret)\n\treturn __v\n}", "func (me TxsdPresentationAttributesGraphicsPointerEvents) IsPainted() bool {\n\treturn me.String() == \"painted\"\n}", "func IsBuffer(buffer uint32) bool {\n\tret := C.glowIsBuffer(gpIsBuffer, (C.GLuint)(buffer))\n\treturn ret == TRUE\n}", "func IsBuffer(buffer uint32) bool {\n\tret := C.glowIsBuffer(gpIsBuffer, (C.GLuint)(buffer))\n\treturn ret == TRUE\n}", "func (me TxsdPresentationAttributesGraphicsDisplay) IsMarker() bool { return me.String() == \"marker\" }", "func (s *BaseCGListener) EnterCr(ctx *CrContext) {}", "func (me TxsdPresentationAttributesGraphicsPointerEvents) IsStroke() bool {\n\treturn me.String() == \"stroke\"\n}", "func (route *Route) DoesMatchContext(c *Context) bool {\n\n\t// by default, we match\n\tvar match bool = true\n\tif len(route.MatcherFuncs) > 0 {\n\n\t\t// there are some matcher functions, so don't automatically\n\t\t// match by default - let the matchers decide\n\t\tmatch = false\n\n\t\t// loop through the matcher functions\n\t\tfor _, f := range route.MatcherFuncs {\n\t\t\t// modify 'match' based on the result of the matcher function\n\t\t\tswitch f(c) {\n\t\t\tcase NoMatch:\n\t\t\t\tmatch = false\n\t\t\tcase Match:\n\t\t\t\tmatch = true\n\t\t\t}\n\n\t\t}\n\n\t}\n\n\t// return the result\n\treturn match\n\n}", "func (c *Camera) TestDraw() {\n\tc.renderer.Test()\n}", "func (o *OAuth2ConsentRequest) HasContext() bool {\n\tif o != nil && o.Context != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (*CapturedStacktrace) Truth() starlark.Bool { return starlark.True }", "func (s *BasePCREListener) EnterConditional(ctx *ConditionalContext) {}", "func (me TxsdPresentationAttributesFillStrokeStrokeLinejoin) IsBevel() bool {\n\treturn me.String() == \"bevel\"\n}", "func (self *Graphics) Fresh() bool{\n return self.Object.Get(\"fresh\").Bool()\n}", "func (self *Graphics) Contains(child *DisplayObject) bool{\n return self.Object.Call(\"contains\", child).Bool()\n}", "func (ps Presubmit) ContextRequired() bool {\n\treturn !ps.Optional && !ps.SkipReport\n}", "func IsRenderbuffer(renderbuffer uint32) bool {\n\tret := C.glowIsRenderbuffer(gpIsRenderbuffer, (C.GLuint)(renderbuffer))\n\treturn ret == TRUE\n}", "func IsRenderbuffer(renderbuffer uint32) bool {\n\tret := C.glowIsRenderbuffer(gpIsRenderbuffer, (C.GLuint)(renderbuffer))\n\treturn ret == TRUE\n}", "func IsTransformFeedback(id uint32) bool {\n ret := C.glowIsTransformFeedback(gpIsTransformFeedback, (C.GLuint)(id))\n return ret == TRUE\n}", "func (me TxsdPresentationAttributesGraphicsPointerEvents) IsVisibleStroke() bool {\n\treturn me.String() == \"visibleStroke\"\n}", "func (context Context) IsUsingKeyboard() bool {\n\treturn imgui.CurrentIO().WantTextInput()\n}", "func (s *BasejossListener) EnterBoolComp(ctx *BoolCompContext) {}", "func IsRenderbuffer(rb Renderbuffer) bool {\n\treturn gl.IsRenderbuffer(rb.Value)\n}", "func VBLENDPD(i, mxy, xy, xy1 operand.Op) { ctx.VBLENDPD(i, mxy, xy, xy1) }", "func (native *OpenGL) EndConditionalRender() {\n\tgl.EndConditionalRender()\n}", "func (me TxsdPresentationAttributesGraphicsShapeRendering) IsCrispEdges() bool {\n\treturn me.String() == \"crispEdges\"\n}", "func _EvtRender(\n\tcontext EvtHandle,\n\tfragment EvtHandle,\n\tflags EvtRenderFlag,\n\tbufferSize uint32,\n\tbuffer *byte,\n\tbufferUsed *uint32,\n\tpropertyCount *uint32,\n) error {\n\tr1, _, e1 := syscall.SyscallN(\n\t\tprocEvtRender.Addr(),\n\t\tuintptr(context),\n\t\tuintptr(fragment),\n\t\tuintptr(flags),\n\t\tuintptr(bufferSize),\n\t\tuintptr(unsafe.Pointer(buffer)), //nolint:gosec // G103: Valid use of unsafe call to pass buffer\n\t\tuintptr(unsafe.Pointer(bufferUsed)), //nolint:gosec // G103: Valid use of unsafe call to pass bufferUsed\n\t\tuintptr(unsafe.Pointer(propertyCount)), //nolint:gosec // G103: Valid use of unsafe call to pass propertyCount\n\t)\n\n\tvar err error\n\tif r1 == 0 {\n\t\tif e1 != 0 {\n\t\t\terr = errnoErr(e1)\n\t\t} else {\n\t\t\terr = syscall.EINVAL\n\t\t}\n\t}\n\treturn err\n}", "func Trace(ray *Ray, samp *TraceSample) bool {\n\n\t// This is the only time that ShaderContext should be created manually, note we set task here.\n\tsg := &ShaderContext{\n\t\tRo: ray.P,\n\t\tRd: ray.D,\n\t\tX: ray.X,\n\t\tY: ray.Y,\n\t\tSx: ray.Sx,\n\t\tSy: ray.Sy,\n\t\tLevel: ray.Level,\n\t\tLambda: ray.Lambda,\n\t\tI: ray.I,\n\t\tTime: ray.Time,\n\t\ttask: ray.Task,\n\t\tImage: image,\n\t\tScramble: ray.Scramble,\n\t\tTransform: m.Matrix4Identity,\n\t\tInvTransform: m.Matrix4Identity,\n\t}\n\n\tif TraceProbe(ray, sg) {\n\n\t\tif sg.Shader == nil { // can't do much with no material\n\t\t\treturn false\n\t\t}\n\n\t\tray.DifferentialTransfer(sg)\n\n\t\tsg.ApplyTransform()\n\n\t\tsg.Shader.Eval(sg)\n\n\t\tif samp != nil {\n\t\t\tsamp.Colour = sg.OutRGB\n\t\t\tsamp.Point = sg.P\n\t\t\tsamp.ElemID = sg.ElemID\n\t\t\tsamp.Geom = sg.Geom\n\t\t}\n\n\t\treturn true\n\t}\n\treturn false\n}", "func (s *BaselimboListener) EnterReal_(ctx *Real_Context) {}", "func XGETBV() { ctx.XGETBV() }", "func (me TxsdPresentationAttributesColorColorRendering) IsOptimizeSpeed() bool {\n\treturn me.String() == \"optimizeSpeed\"\n}", "func (me TxsdFeBlendTypeMode) IsDarken() bool { return me.String() == \"darken\" }", "func (c spanContext) IsValid() bool {\n\treturn c.TraceID != 0 && c.SpanID != 0\n}", "func (me TxsdPresentationAttributesGraphicsShapeRendering) IsOptimizeSpeed() bool {\n\treturn me.String() == \"optimizeSpeed\"\n}", "func (d *drawImageHistoryItem) canMerge(image *Image, colorm *affine.ColorM, mode opengl.CompositeMode, filter graphics.Filter) bool {\n\tif d.image != image {\n\t\treturn false\n\t}\n\tif !d.colorm.Equals(colorm) {\n\t\treturn false\n\t}\n\tif d.mode != mode {\n\t\treturn false\n\t}\n\tif d.filter != filter {\n\t\treturn false\n\t}\n\treturn true\n}", "func (self *Rectangle) SameDimensions(a interface{}, b interface{}) bool{\n return self.Object.Call(\"sameDimensions\", a, b).Bool()\n}", "func VPBLENDVB(xy, mxy, xy1, xy2 operand.Op) { ctx.VPBLENDVB(xy, mxy, xy1, xy2) }", "func (c *SceneController) OnRender() {\n}", "func (object Object) Context(value interface{}) Object {\n\t// TODO: incomplete\n\treturn object\n}", "func (g *G1) IsOnG1() bool { return g.isValidProjective() && g.isOnCurve() && g.isRTorsion() }", "func (self *TileSprite) Overlap(displayObject interface{}) bool{\n return self.Object.Call(\"overlap\", displayObject).Bool()\n}", "func (t *CircularTimes) IsPass() bool {\n return t.Wtp != nil\n}", "func (o *CanvasItem) X_EditUseRect() gdnative.Bool {\n\t//log.Println(\"Calling CanvasItem.X_EditUseRect()\")\n\n\t// Build out the method's arguments\n\tptrArguments := make([]gdnative.Pointer, 0, 0)\n\n\t// Get the method bind\n\tmethodBind := gdnative.NewMethodBind(\"CanvasItem\", \"_edit_use_rect\")\n\n\t// Call the parent method.\n\t// bool\n\tretPtr := gdnative.NewEmptyBool()\n\tgdnative.MethodBindPtrCall(methodBind, o.GetBaseObject(), ptrArguments, retPtr)\n\n\t// If we have a return type, convert it from a pointer into its actual object.\n\tret := gdnative.NewBoolFromPointer(retPtr)\n\treturn ret\n}" ]
[ "0.5999429", "0.5965332", "0.57624316", "0.54819465", "0.53604686", "0.5321732", "0.52336234", "0.51454157", "0.50726795", "0.5054565", "0.50247794", "0.4973077", "0.49111223", "0.4891894", "0.48881614", "0.4885618", "0.48833802", "0.48631704", "0.48545897", "0.48403186", "0.48180294", "0.48024005", "0.47663236", "0.47626126", "0.47479114", "0.4743843", "0.47419524", "0.4735529", "0.4719309", "0.4708723", "0.4699725", "0.46942705", "0.46900603", "0.46756592", "0.46706998", "0.46695188", "0.4668976", "0.46664485", "0.46591303", "0.46396002", "0.46240938", "0.46226794", "0.46192104", "0.46136862", "0.46082202", "0.46011212", "0.45906326", "0.45876905", "0.45723054", "0.45690072", "0.45470187", "0.4528079", "0.45255142", "0.4501942", "0.44747368", "0.4469587", "0.44516802", "0.4451001", "0.44477272", "0.44409496", "0.44409496", "0.44386405", "0.4437297", "0.44097704", "0.4407775", "0.4406124", "0.43911427", "0.43837795", "0.4381154", "0.43805158", "0.43800864", "0.43713307", "0.43668824", "0.43543565", "0.43543565", "0.434966", "0.43492693", "0.43447238", "0.43415254", "0.43369803", "0.43318015", "0.4331701", "0.4329835", "0.4316532", "0.4315635", "0.43008155", "0.42954126", "0.42878607", "0.4286972", "0.4280394", "0.42791197", "0.42774704", "0.4274004", "0.4272745", "0.42702296", "0.42675778", "0.4264097", "0.42564738", "0.42408207", "0.42385986" ]
0.7145373
0
GetNerCustomizedSeaEcom invokes the alinlp.GetNerCustomizedSeaEcom API synchronously
func (client *Client) GetNerCustomizedSeaEcom(request *GetNerCustomizedSeaEcomRequest) (response *GetNerCustomizedSeaEcomResponse, err error) { response = CreateGetNerCustomizedSeaEcomResponse() err = client.DoAction(request, response) return }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (client *Client) GetNerCustomizedSeaEcomWithCallback(request *GetNerCustomizedSeaEcomRequest, callback func(response *GetNerCustomizedSeaEcomResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *GetNerCustomizedSeaEcomResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.GetNerCustomizedSeaEcom(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func (client *Client) GetNerCustomizedSeaEcomWithChan(request *GetNerCustomizedSeaEcomRequest) (<-chan *GetNerCustomizedSeaEcomResponse, <-chan error) {\n\tresponseChan := make(chan *GetNerCustomizedSeaEcomResponse, 1)\n\terrChan := make(chan error, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tdefer close(responseChan)\n\t\tdefer close(errChan)\n\t\tresponse, err := client.GetNerCustomizedSeaEcom(request)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t} else {\n\t\t\tresponseChan <- response\n\t\t}\n\t})\n\tif err != nil {\n\t\terrChan <- err\n\t\tclose(responseChan)\n\t\tclose(errChan)\n\t}\n\treturn responseChan, errChan\n}", "func CreateGetNerCustomizedSeaEcomResponse() (response *GetNerCustomizedSeaEcomResponse) {\n\tresponse = &GetNerCustomizedSeaEcomResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func CreateGetNerCustomizedSeaEcomRequest() (request *GetNerCustomizedSeaEcomRequest) {\n\trequest = &GetNerCustomizedSeaEcomRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"alinlp\", \"2020-06-29\", \"GetNerCustomizedSeaEcom\", \"alinlp\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (client *Client) GetWsCustomizedChEcomContentWithCallback(request *GetWsCustomizedChEcomContentRequest, callback func(response *GetWsCustomizedChEcomContentResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *GetWsCustomizedChEcomContentResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.GetWsCustomizedChEcomContent(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func (client *Client) GetWsCustomizedChEcomContent(request *GetWsCustomizedChEcomContentRequest) (response *GetWsCustomizedChEcomContentResponse, err error) {\n\tresponse = CreateGetWsCustomizedChEcomContentResponse()\n\terr = client.DoAction(request, response)\n\treturn\n}", "func (client *Client) GetWsCustomizedChEcomContentWithChan(request *GetWsCustomizedChEcomContentRequest) (<-chan *GetWsCustomizedChEcomContentResponse, <-chan error) {\n\tresponseChan := make(chan *GetWsCustomizedChEcomContentResponse, 1)\n\terrChan := make(chan error, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tdefer close(responseChan)\n\t\tdefer close(errChan)\n\t\tresponse, err := client.GetWsCustomizedChEcomContent(request)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t} else {\n\t\t\tresponseChan <- response\n\t\t}\n\t})\n\tif err != nil {\n\t\terrChan <- err\n\t\tclose(responseChan)\n\t\tclose(errChan)\n\t}\n\treturn responseChan, errChan\n}", "func (client ModelClient) GetCustomPrebuiltEntityRolesSender(req *http.Request) (*http.Response, error) {\n\treturn autorest.SendWithSender(client, req,\n\t\tautorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))\n}", "func getProductsSameEAN(c chan []ZunkaSiteProductRx, ean string) {\n\tproducts := []ZunkaSiteProductRx{}\n\n\t// Request product add.\n\tclient := &http.Client{}\n\t// title = \"GABINETE COOLER MASTER MASTERBOX LITE 3.1 TG LATERAL EM VIDRO TEMPERADO ATX/E-ATX/MINI-ITX/MICRO-AT\"\n\treq, err := http.NewRequest(\"GET\", zunkaSiteHost()+\"/setup/products-same-ean\", nil)\n\tif err != nil {\n\t\tError.Print(err)\n\t\tc <- products\n\t\treturn\n\t}\n\t// Query params\n\tq := req.URL.Query()\n\tq.Add(\"ean\", ean)\n\treq.URL.RawQuery = q.Encode()\n\t// Head.\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\treq.SetBasicAuth(zunkaSiteUser(), zunkaSitePass())\n\tres, err := client.Do(req)\n\tif err != nil {\n\t\tError.Print(err)\n\t\tc <- products\n\t\treturn\n\t}\n\t// res, err := http.Post(\"http://localhost:3080/setup/product/add\", \"application/json\", bytes.NewBuffer(reqBody))\n\tdefer res.Body.Close()\n\n\t// Result.\n\tresBody, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\tError.Print(err)\n\t\tc <- products\n\t\treturn\n\t}\n\t// No 200 status.\n\tif res.StatusCode != 200 {\n\t\tError.Print(errors.New(fmt.Sprintf(\"Getting products same Ean from zunkasite.\\nstatus: %v\\nbody: %v\", res.StatusCode, string(resBody))))\n\t\tc <- products\n\t\treturn\n\t}\n\terr = json.Unmarshal(resBody, &products)\n\tif err != nil {\n\t\tError.Print(err)\n\t}\n\t// Debug.Printf(\"Product[0]: %v\", products[0])\n\tc <- products\n\treturn\n}", "func (a *AllApiService) EnterpriseGetEnterpriseNetworkSegments(ctx _context.Context, body EnterpriseGetEnterpriseNetworkSegments) ([]EnterpriseGetEnterpriseNetworkSegmentsResultItem, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = _nethttp.MethodPost\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue []EnterpriseGetEnterpriseNetworkSegmentsResultItem\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/enterprise/getEnterpriseNetworkSegments\"\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &body\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t}\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 200 {\n\t\t\tvar v []EnterpriseGetEnterpriseNetworkSegmentsResultItem\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 400 {\n\t\t\tvar v ModelError\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 500 {\n\t\t\tvar v ModelError\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHttpResponse, nil\n}", "func (client ModelClient) ListCustomPrebuiltEntitiesSender(req *http.Request) (*http.Response, error) {\n\treturn autorest.SendWithSender(client, req,\n\t\tautorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))\n}", "func (client RosettaNetProcessConfigurationsClient) GetResponder(resp *http.Response) (result IntegrationAccountRosettaNetProcessConfiguration, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (client AppsClient) ListAvailableCustomPrebuiltDomainsForCultureSender(req *http.Request) (*http.Response, error) {\n\treturn autorest.SendWithSender(client, req,\n\t\tautorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))\n}", "func (client ModelClient) ListCustomPrebuiltEntitiesResponder(resp *http.Response) (result ListEntityExtractor, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result.Value),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (client ModelClient) GetCustomPrebuiltEntityRolesResponder(resp *http.Response) (result ListEntityRole, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result.Value),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (client AppsClient) ListAvailableCustomPrebuiltDomainsSender(req *http.Request) (*http.Response, error) {\n\treturn autorest.SendWithSender(client, req,\n\t\tautorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))\n}", "func (fM *FeslManager) NuGetPersonas(event GameSpy.EventClientTLSCommand) {\n\tif !event.Client.IsActive {\n\t\tlog.Noteln(\"Client left\")\n\t\treturn\n\t}\n\n\tif event.Client.RedisState.Get(\"clientType\") == \"server\" {\n\t\tfM.NuGetPersonasServer(event)\n\t\treturn\n\t}\n\n\trows, err := fM.stmtGetHeroesByUserID.Query(event.Client.RedisState.Get(\"uID\"))\n\tif err != nil {\n\t\treturn\n\t}\n\n\tpersonaPacket := make(map[string]string)\n\tpersonaPacket[\"TXN\"] = \"NuGetPersonas\"\n\n\tvar i = 0\n\tfor rows.Next() {\n\t\tvar id, userID, heroName, online string\n\t\terr := rows.Scan(&id, &userID, &heroName, &online)\n\t\tif err != nil {\n\t\t\tlog.Errorln(err)\n\t\t\treturn\n\t\t}\n\t\tpersonaPacket[\"personas.\"+strconv.Itoa(i)] = heroName\n\t\tevent.Client.RedisState.Set(\"ownerId.\"+strconv.Itoa(i+1), id)\n\t\ti++\n\t}\n\n\tevent.Client.RedisState.Set(\"numOfHeroes\", strconv.Itoa(i))\n\n\tpersonaPacket[\"personas.[]\"] = strconv.Itoa(i)\n\n\tevent.Client.WriteFESL(event.Command.Query, personaPacket, event.Command.PayloadID)\n\tfM.logAnswer(event.Command.Query, personaPacket, event.Command.PayloadID)\n}", "func (client AppsClient) ListAvailableCustomPrebuiltDomainsForCultureResponder(resp *http.Response) (result ListPrebuiltDomain, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result.Value),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (client ModelClient) GetRegexEntityInfosSender(req *http.Request) (*http.Response, error) {\n\treturn autorest.SendWithSender(client, req,\n\t\tautorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))\n}", "func CreateGetWsCustomizedChEcomContentRequest() (request *GetWsCustomizedChEcomContentRequest) {\n\trequest = &GetWsCustomizedChEcomContentRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"alinlp\", \"2020-06-29\", \"GetWsCustomizedChEcomContent\", \"alinlp\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (i *ICommunityService) GetUserPartnerEventNews() (*geyser.Request, error) {\n\tsm, err := i.Interface.Methods.Get(schema.MethodKey{\n\t\tName: \"GetUserPartnerEventNews\",\n\t\tVersion: 1,\n\t})\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq := geyser.NewRequest(i.Interface, sm)\n\n\treturn req, nil\n}", "func (client ModelClient) AddCustomPrebuiltEntityResponder(resp *http.Response) (result UUID, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),\n\t\tautorest.ByUnmarshallingJSON(&result.Value),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (client ModelClient) GetRegexEntityEntityInfoSender(req *http.Request) (*http.Response, error) {\n\treturn autorest.SendWithSender(client, req,\n\t\tautorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))\n}", "func GetInsertClientForApp(e *entities.Entity) (c *client.InsertClient) {\n\n\tguid := e.AttributeByName(config.Get().AttributeName(config.EnvAppID)).Value()\n\tcfapp := cfapps.GetInstance().GetApp(guid.(string))\n\tim := insights.New()\n\n\tcfapp.Lock.RLock()\n\tvcap := cfapp.VcapServices\n\tcfapp.Lock.RUnlock()\n\n\tif vcap == nil {\n\t\tdefaultClient := im.Get(app.Get().Config.GetNewRelicConfig())\n\t\treturn defaultClient\n\t}\n\n\t//Can do this if newrelic isn't found, but also need to check for rpmAccountId and insightsInsertKey values\n\tif _, found := vcap[\"newrelic\"]; !found {\n\t\tdefaultClient := im.Get(app.Get().Config.GetNewRelicConfig())\n\t\treturn defaultClient\n\t}\n\n\tnewrelicSlice := vcap[\"newrelic\"].([]interface{})\n\tnewrelic := newrelicSlice[0].(map[string]interface{})\n\n\t// Get the credentials map from inside of the newrelic map, if it exists.\n\tif _, found := newrelic[\"credentials\"].(map[string]interface{}); !found {\n\t\tdefaultClient := im.Get(app.Get().Config.GetNewRelicConfig())\n\t\treturn defaultClient\n\t}\n\tcredentials := newrelic[\"credentials\"].(map[string]interface{})\n\n\t// Call GetInsertKey\n\tinsertKey, found := GetInsertKey(credentials)\n\tif !found {\n\t\tdefaultClient := im.Get(app.Get().Config.GetNewRelicConfig())\n\t\treturn defaultClient\n\t}\n\t// Call GetRpmId\n\trpmId, found := GetRpmId(credentials)\n\tif !found {\n\t\tdefaultClient := im.Get(app.Get().Config.GetNewRelicConfig())\n\t\treturn defaultClient\n\t}\n\n\t// Call GetLicenseKey\n\tlicenseKey, found := GetLicenseKey(credentials)\n\tif !found {\n\t\tdefaultClient := im.Get(app.Get().Config.GetNewRelicConfig())\n\t\treturn defaultClient\n\t}\n\n\tisEU := strings.HasPrefix(licenseKey, \"eu01x\")\n\tvar accountRegion string\n\tif isEU {\n\t\taccountRegion = \"EU\"\n\t} else {\n\t\taccountRegion = \"US\"\n\t}\n\n\t// Call Get from Insights manager to get a client with this configuration.\n\tc = im.Get(insertKey, rpmId, accountRegion)\n\n\treturn c\n\n}", "func (a *AllApiService) EnterpriseGetEnterpriseNetworkAllocations(ctx _context.Context, body EnterpriseGetEnterpriseNetworkAllocations) ([]EnterpriseGetEnterpriseNetworkAllocationsResultItem, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = _nethttp.MethodPost\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue []EnterpriseGetEnterpriseNetworkAllocationsResultItem\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/enterprise/getEnterpriseNetworkAllocations\"\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &body\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t}\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 200 {\n\t\t\tvar v []EnterpriseGetEnterpriseNetworkAllocationsResultItem\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 400 {\n\t\t\tvar v ModelError\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 500 {\n\t\t\tvar v ModelError\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHttpResponse, nil\n}", "func GuestsGetNics(endpoint string) (int, []byte) {\n\tbuffer := getEndpointwithGuests(endpoint)\n\tbuffer.WriteString(\"/nics\")\n\n\tstatus, data := hq.Get(buffer.String())\n\n\treturn status, data\n}", "func (r *EpNodeAccelRiser) discoverRemotePhase1() {\n\n\t//Since the parent Assembly obj already has the array of NodeAccelRiser objects\n\t//the NodeAccelRiserRF field for this NodeAccelRiser just needs to be pulled from there\n\t//instead of retrieving it using an HTTP call\n\tif r.assemblyRF.AssemblyRF.Assemblies == nil {\n\t\t//this is a lookup error\n\t\terrlog.Printf(\"%s: No Assemblies array found in Parent.\\n\", r.OdataID)\n\t\tr.LastStatus = HTTPsGetFailed\n\t\treturn\n\t}\n\t//If we got this far, then the EpAssembly call to discoverRemotePhase1 was successful\n\tr.LastStatus = HTTPsGetOk\n\n\tif rfDebug > 0 {\n\t\terrlog.Printf(\"%s: %s\\n\", r.ParentOID, r.assemblyRF.AssemblyRaw)\n\t}\n\n\t//use r.RawOrdinal as the index to retrieve the NodeAccelRiser entry from the parent Assembly.Assemblies array,\n\t//and assign it to r.NodeAccelRiserRF\n\tif (len(r.assemblyRF.AssemblyRF.Assemblies) > r.RawOrdinal) && (r.assemblyRF.AssemblyRF.Assemblies[r.RawOrdinal] != nil) {\n\t\tr.NodeAccelRiserRF = r.assemblyRF.AssemblyRF.Assemblies[r.RawOrdinal]\n\t} else {\n\t\t//this is a lookup error\n\t\terrlog.Printf(\"%s: failure retrieving NodeAccelRiser from Assembly.Assemblies[%d].\\n\", r.OdataID, r.RawOrdinal)\n\t\tr.LastStatus = HTTPsGetFailed\n\t\treturn\n\t}\n\tr.RedfishSubtype = NodeAccelRiserType\n\n\tif rfVerbose > 0 {\n\t\tjout, _ := json.MarshalIndent(r, \"\", \" \")\n\t\terrlog.Printf(\"%s: %s\\n\", r.NodeAccelRiserURL, jout)\n\t}\n\n\tr.LastStatus = VerifyingData\n}", "func (client *Client) GetWsCustomizedChO2OWithCallback(request *GetWsCustomizedChO2ORequest, callback func(response *GetWsCustomizedChO2OResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *GetWsCustomizedChO2OResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.GetWsCustomizedChO2O(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func (a *AllApiService) EnterpriseProxyGetEnterpriseProxyEnterprises(ctx _context.Context, body EnterpriseProxyGetEnterpriseProxyEnterprises) ([]EnterpriseProxyGetEnterpriseProxyEnterprisesResultItem, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = _nethttp.MethodPost\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue []EnterpriseProxyGetEnterpriseProxyEnterprisesResultItem\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/enterpriseProxy/getEnterpriseProxyEnterprises\"\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &body\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t}\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 200 {\n\t\t\tvar v []EnterpriseProxyGetEnterpriseProxyEnterprisesResultItem\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 400 {\n\t\t\tvar v ModelError\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 500 {\n\t\t\tvar v ModelError\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHttpResponse, nil\n}", "func (client ModelClient) AddCustomPrebuiltEntitySender(req *http.Request) (*http.Response, error) {\n\treturn autorest.SendWithSender(client, req,\n\t\tautorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))\n}", "func (client RecommendedElasticPoolsClient) GetResponder(resp *http.Response) (result RecommendedElasticPool, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (a *AllApiService) EnterpriseGetEnterpriseMaximumSegments(ctx _context.Context, body EnterpriseGetEnterpriseMaximumSegments) (EnterpriseGetEnterprisePropertyResult, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = _nethttp.MethodPost\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue EnterpriseGetEnterprisePropertyResult\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/enterprise/getEnterpriseMaximumSegments\"\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &body\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t}\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 200 {\n\t\t\tvar v EnterpriseGetEnterprisePropertyResult\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 400 {\n\t\t\tvar v ModelError\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 500 {\n\t\t\tvar v ModelError\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHttpResponse, nil\n}", "func (a *AllApiService) EnterpriseGetEnterpriseNetworkAllocation(ctx _context.Context, body EnterpriseGetEnterpriseNetworkAllocation) (EnterpriseGetEnterpriseNetworkAllocationResult, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = _nethttp.MethodPost\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue EnterpriseGetEnterpriseNetworkAllocationResult\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/enterprise/getEnterpriseNetworkAllocation\"\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &body\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t}\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 200 {\n\t\t\tvar v EnterpriseGetEnterpriseNetworkAllocationResult\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 400 {\n\t\t\tvar v ModelError\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 500 {\n\t\t\tvar v ModelError\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHttpResponse, nil\n}", "func CreateGetWsCustomizedChEcomContentResponse() (response *GetWsCustomizedChEcomContentResponse) {\n\tresponse = &GetWsCustomizedChEcomContentResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func LeninaCrowne(wg *sync.WaitGroup, updateSite bool, knownScenes []string, out chan<- models.ScrapedScene) error {\n\treturn SexLikeReal(wg, updateSite, knownScenes, out, \"leninacrowne\", \"LeninaCrowne\", \"Terrible\")\n}", "func (client ModelClient) GetPrebuiltEntityRolesSender(req *http.Request) (*http.Response, error) {\n\treturn autorest.SendWithSender(client, req,\n\t\tautorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))\n}", "func (client ModelClient) AddCustomPrebuiltDomainResponder(resp *http.Response) (result ListUUID, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),\n\t\tautorest.ByUnmarshallingJSON(&result.Value),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (o *IscsiInitiatorGetIterRequest) executeWithoutIteration(zr *ZapiRunner) (*IscsiInitiatorGetIterResponse, error) {\n\tresult, err := zr.ExecuteUsing(o, \"IscsiInitiatorGetIterRequest\", NewIscsiInitiatorGetIterResponse())\n\tif result == nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*IscsiInitiatorGetIterResponse), err\n}", "func (client AppsClient) ListAvailableCustomPrebuiltDomainsResponder(resp *http.Response) (result ListPrebuiltDomain, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result.Value),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (P *Pension) ConsultarNetos(cedula string, vive bool, familiar string, estatus string) (jSon []byte, err error) {\n\tvar lst []WNetos\n\n\ts := `SELECT pg.cedu, pg.calc, pg.fech, pg.banc, pg.tipo, pg.nume, pg.situ, pg.esta,\n\tpg.neto, sn.obse, sn.desd, sn.hast, sn.mes, CAST(rtp.monto_prima AS character varying), rtp.descripcion,\n\t`\n\n\tif vive == true {\n\t\ts += `pg.situ, pg.base, SISA.monto\n\t\t\t\tFROM space.pagos pg\n\t\t\t\tJOIN space.nomina AS sn ON pg.nomi=sn.oid\n\t\t\t\tLEFT JOIN restaurarprima rtp ON rtp.cedula=pg.cedu AND rtp.nomina=pg.nomi\n\t\t\t\tLEFT JOIN \n\t\t\t\t(SELECT cedu, llav, SUM(mont) AS monto FROM space.nomina_sisa GROUP BY cedu, llav) AS SISA\n\t\t\t\tON pg.cedu=SISA.cedu AND sn.mes=SISA.llav AND sn.obse='NOMINA MENSUAL' AND sn.desd>'2021-12-31'\n\t\tWHERE pg.cedu='` + cedula + `' AND sn.llav != '' ` + estatus + ` ORDER BY fech DESC`\n\t} else {\n\t\ts += `\n\t\t pg.cfam, fami.porcentaje, 0.00\n\t\tFROM space.pagos pg\n\t\tJOIN space.nomina AS sn ON pg.nomi=sn.oid\n\t\tLEFT JOIN restaurarprima rtp ON rtp.cedula=pg.cedu AND rtp.nomina=pg.nomi\n\t\tJOIN familiar fami ON pg.cedu=fami.titular AND pg.cfam=fami.cedula\n\t\tWHERE pg.cedu='` + cedula + `' AND pg.cfam='` + familiar + `' AND sn.llav != '' ` + estatus + ` ORDER BY fech DESC`\n\t}\n\t//fmt.Println(s)\n\tsq, err := sys.PostgreSQLPENSION.Query(s)\n\tutil.Error(err)\n\n\tfor sq.Next() {\n\t\tvar cedu, calc, fech, banc, tipo, nume, situ, esta, nomina, desde, hasta, mes, pmonto, descrip, fam, porc sql.NullString\n\t\tvar neto, montosisa sql.NullFloat64\n\t\tvar netos WNetos\n\t\terr = sq.Scan(&cedu, &calc, &fech, &banc, &tipo, &nume, &situ, &esta, &neto, &nomina, &desde, &hasta, &mes, &pmonto, &descrip, &fam, &porc, &montosisa)\n\t\tutil.Error(err)\n\t\t//\t\tfmt.Println(desde, hasta)\n\t\tnetos.Cedula = util.ValidarNullString(cedu)\n\t\tnetos.Calculos = util.ValidarNullString(calc)\n\t\tnetos.Fecha = util.ValidarNullString(fech)[:10]\n\t\tnetos.Banco = util.ValidarNullString(banc)\n\t\tnetos.Tipo = util.ValidarNullString(tipo)\n\t\tnetos.Numero = util.ValidarNullString(nume)\n\t\tnetos.Situacion = util.ValidarNullString(cedu)\n\t\tnetos.Estatus = util.ValidarNullString(cedu)\n\t\tnetos.Neto = util.ValidarNullFloat64(neto)\n\t\tnetos.MontoSisa = util.ValidarNullFloat64(montosisa)\n\t\tnetos.Nomina = util.ValidarNullString(nomina)\n\t\tnetos.Mes = util.ValidarNullString(mes)\n\t\tnetos.Desde = util.ValidarNullString(desde)[:10]\n\t\tnetos.Hasta = util.ValidarNullString(hasta)[:10]\n\t\tnetos.Porcentaje = util.ValidarNullString(porc)\n\t\tnetos.Otros = util.ValidarNullString(descrip) + \"|\" + util.ValidarNullString(pmonto)\n\n\t\tlst = append(lst, netos)\n\t}\n\tjSon, err = json.Marshal(lst)\n\treturn\n}", "func (client ModelClient) ListCustomPrebuiltModelsSender(req *http.Request) (*http.Response, error) {\n\treturn autorest.SendWithSender(client, req,\n\t\tautorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))\n}", "func NitricFunction(trigger *faas.NitricTrigger) (*faas.NitricResponse, error) {\n\t// Do something interesting...\n\tresponse := trigger.DefaultResponse()\n\tresponse.SetData([]byte(\"Hello Nitric\"))\n\n\treturn response, nil\n}", "func (tM *TheaterManager) ECNL(event gs.EventClientFESLCommand) {\n\tlog.Noteln(\"Hero RQ\")\n\tanswer := make(map[string]string)\n\tanswer[\"TID\"] = event.Process.Msg[\"TID\"]\n\tanswer[\"GID\"] = event.Process.Msg[\"GID\"]\n\tanswer[\"LID\"] = event.Process.Msg[\"LID\"]\n\tevent.Client.Answer(\"ECNL\", answer, 0x0)\n}", "func (a *EnterpriseProxyApiService) EnterpriseProxyGetEnterpriseProxyEnterprises(ctx _context.Context, body EnterpriseProxyGetEnterpriseProxyEnterprises) ([]EnterpriseProxyGetEnterpriseProxyEnterprisesResultItem, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = _nethttp.MethodPost\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue []EnterpriseProxyGetEnterpriseProxyEnterprisesResultItem\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/enterpriseProxy/getEnterpriseProxyEnterprises\"\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &body\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t}\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 200 {\n\t\t\tvar v []EnterpriseProxyGetEnterpriseProxyEnterprisesResultItem\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 400 {\n\t\t\tvar v ModelError\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 500 {\n\t\t\tvar v ModelError\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHttpResponse, nil\n}", "func (a *AllApiService) EnterpriseGetEnterprise(ctx _context.Context, body EnterpriseGetEnterprise) (EnterpriseGetEnterpriseResult, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = _nethttp.MethodPost\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue EnterpriseGetEnterpriseResult\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/enterprise/getEnterprise\"\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &body\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t}\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 200 {\n\t\t\tvar v EnterpriseGetEnterpriseResult\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 400 {\n\t\t\tvar v ModelError\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 500 {\n\t\t\tvar v ModelError\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHttpResponse, nil\n}", "func (a *AllApiService) NetworkGetNetworkEnterprises(ctx _context.Context, body NetworkGetNetworkEnterprises) ([]NetworkGetNetworkEnterprisesResultItem, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = _nethttp.MethodPost\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue []NetworkGetNetworkEnterprisesResultItem\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/network/getNetworkEnterprises\"\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &body\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t}\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 200 {\n\t\t\tvar v []NetworkGetNetworkEnterprisesResultItem\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 400 {\n\t\t\tvar v ModelError\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 500 {\n\t\t\tvar v ModelError\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHttpResponse, nil\n}", "func (client ModelClient) UpdateCustomPrebuiltEntityRoleSender(req *http.Request) (*http.Response, error) {\n\treturn autorest.SendWithSender(client, req,\n\t\tautorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))\n}", "func (a *Client) GetNiaapiDcnmReleaseRecommends(params *GetNiaapiDcnmReleaseRecommendsParams) (*GetNiaapiDcnmReleaseRecommendsOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetNiaapiDcnmReleaseRecommendsParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"GetNiaapiDcnmReleaseRecommends\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/niaapi/DcnmReleaseRecommends\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &GetNiaapiDcnmReleaseRecommendsReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*GetNiaapiDcnmReleaseRecommendsOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\tunexpectedSuccess := result.(*GetNiaapiDcnmReleaseRecommendsDefault)\n\treturn nil, runtime.NewAPIError(\"unexpected success response: content available as default response in error\", unexpectedSuccess, unexpectedSuccess.Code())\n}", "func dcrm_genPubKey(msgprex string,account string,cointype string,ch chan interface{}, mode string,nonce string) {\n\n fmt.Println(\"========dcrm_genPubKey============\")\n\n wk,err := FindWorker(msgprex)\n if err != nil || wk == nil {\n\tres := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:no find worker\",Err:err}\n\tch <- res\n\treturn\n }\n id := wk.id\n \n GetEnodesInfo(wk.groupid)\n\n if int32(Enode_cnts) != int32(NodeCnt) {\n\tres := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:group is not ready\",Err:GetRetErr(ErrGroupNotReady)}\n\tch <- res\n\treturn\n }\n\n if types.IsDefaultED25519(cointype) {\n\tok2 := KeyGenerate_ed(msgprex,ch,id,cointype)\n\tif ok2 == false {\n\t return\n\t}\n\n\titertmp := workers[id].edpk.Front()\n\tif itertmp == nil {\n\t logs.Debug(\"get workers[id].edpk fail.\")\n\t res := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:get workers[id].edpk fail\",Err:GetRetErr(ErrGetGenPubkeyFail)}\n\t ch <- res\n\t return\n\t}\n\tsedpk := []byte(itertmp.Value.(string))\n\n\titertmp = workers[id].edsave.Front()\n\tif itertmp == nil {\n\t logs.Debug(\"get workers[id].edsave fail.\")\n\t res := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:get workers[id].edsave fail\",Err:GetRetErr(ErrGetGenSaveDataFail)}\n\t ch <- res\n\t return\n\t}\n\t\n\tsedsave := itertmp.Value.(string)\n\tpubs := &PubKeyData{Pub:string(sedpk),Save:sedsave,Nonce:\"0\",GroupId:wk.groupid,LimitNum:wk.limitnum,Mode:mode}\n\tepubs,err := Encode2(pubs)\n\tif err != nil {\n\t res := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:encode PubKeyData fail in req ed pubkey\",Err:err}\n\t ch <- res\n\t return\n\t}\n\t\n\tss,err := Compress([]byte(epubs))\n\tif err != nil {\n\t res := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:compress PubKeyData fail in req ed pubkey\",Err:err}\n\t ch <- res\n\t return\n\t}\n\n\t////TODO\n\tAllAccounts = append(AllAccounts,pubs)\n\t////////\n\n\tpubkeyhex := hex.EncodeToString(sedpk)\n\tfmt.Println(\"===============dcrm_genPubKey,pubkey = %s,nonce =%s ==================\",pubkeyhex,nonce)\n\t////save to db\n\t////add for req addr\n\t/*reqnonce,_,err := GetReqAddrNonce(account)\n\tif err != nil {\n\t reqnonce = \"0\"\n\t}\n\tSetReqAddrNonce(account,reqnonce)*/\n\tkey2 := Keccak256Hash([]byte(strings.ToLower(account))).Hex()\n\tkd := KeyData{Key:[]byte(key2),Data:nonce}\n\tPubKeyDataChan <-kd\n\n\t/////\n\tLdbPubKeyData[key2] = []byte(nonce)\n\t//key2 = Keccak256Hash([]byte(strings.ToLower(account+\":\"+\"LOCKOUT\"))).Hex()\n\t//LdbPubKeyData[key2] = []byte(\"0\")\n\t////\n\n\ttip,reply := AcceptReqAddr(account,cointype,wk.groupid,nonce,wk.limitnum,mode,true,\"true\",\"Success\",pubkeyhex,\"\",\"\",\"\")\n\tif reply != nil {\n\t res := RpcDcrmRes{Ret:\"\",Tip:tip,Err:fmt.Errorf(\"update req addr status error.\")}\n\t ch <- res\n\t return\n\t}\n \n\tif !strings.EqualFold(cointype, \"ALL\") {\n\t h := cryptocoins.NewCryptocoinHandler(cointype)\n\t if h == nil {\n\t\tres := RpcDcrmRes{Ret:\"\",Tip:\"cointype is not supported\",Err:fmt.Errorf(\"req addr fail,cointype is not supported.\")}\n\t\tch <- res\n\t\treturn\n\t }\n\n\t ctaddr, err := h.PublicKeyToAddress(pubkeyhex)\n\t if err != nil {\n\t\tres := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:get dcrm addr fail from pubkey:\"+pubkeyhex,Err:err}\n\t\tch <- res\n\t\treturn\n\t }\n\n\t //add for lockout\n\t kd = KeyData{Key:sedpk[:],Data:ss}\n\t PubKeyDataChan <-kd\n\t /////\n\t LdbPubKeyData[string(sedpk[:])] = []byte(ss)\n\t ////\n\n\t key := Keccak256Hash([]byte(strings.ToLower(account + \":\" + cointype))).Hex()\n\t kd = KeyData{Key:[]byte(key),Data:ss}\n\t PubKeyDataChan <-kd\n\t /////\n\t LdbPubKeyData[key] = []byte(ss)\n\t ////\n\n\t key = Keccak256Hash([]byte(strings.ToLower(ctaddr))).Hex()\n\t kd = KeyData{Key:[]byte(key),Data:ss}\n\t PubKeyDataChan <-kd\n\t /////\n\t LdbPubKeyData[key] = []byte(ss)\n\t ////\n\n\t /*lock.Lock()\n\t dir := GetDbDir()\n\t db,err := ethdb.NewLDBDatabase(dir, 0, 0)\n\t if err != nil { \n\t\tlock.Unlock()\n\t\tres := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:open level db fail\",Err:err}\n\t\tch <- res\n\t\treturn\n\t }\n\n\t h := cryptocoins.NewCryptocoinHandler(cointype)\n\t if h == nil {\n\t\tdb.Close()\n\t\tlock.Unlock()\n\t\tres := RpcDcrmRes{Ret:\"\",Tip:\"cointype is not supported\",Err:fmt.Errorf(\"req addr fail,cointype is not supported.\")}\n\t\tch <- res\n\t\treturn\n\t }\n\n\t ctaddr, err := h.PublicKeyToAddress(pubkeyhex)\n\t if err != nil {\n\t\tdb.Close()\n\t\tlock.Unlock()\n\t\tres := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:get dcrm addr fail from pubkey:\"+pubkeyhex,Err:err}\n\t\tch <- res\n\t\treturn\n\t }\n\n\t //add for lockout\n\t db.Put(sedpk[:],[]byte(ss))\n\t key := Keccak256Hash([]byte(strings.ToLower(account + \":\" + cointype))).Hex()\n\t db.Put([]byte(key),[]byte(ss))\n\t key = Keccak256Hash([]byte(strings.ToLower(ctaddr))).Hex()\n\t db.Put([]byte(key),[]byte(ss))\n\t db.Close()\n\t lock.Unlock()*/\n\t} else {\n\t kd = KeyData{Key:sedpk[:],Data:ss}\n\t PubKeyDataChan <-kd\n\t /////\n\t LdbPubKeyData[string(sedpk[:])] = []byte(ss)\n\t ////\n\n\t key := Keccak256Hash([]byte(strings.ToLower(account + \":\" + cointype))).Hex()\n\t kd = KeyData{Key:[]byte(key),Data:ss}\n\t PubKeyDataChan <-kd\n\t /////\n\t LdbPubKeyData[key] = []byte(ss)\n\t ////\n\n\t for _, ct := range cryptocoins.Cointypes {\n\t\tif strings.EqualFold(ct, \"ALL\") {\n\t\t continue\n\t\t}\n\n\t\th := cryptocoins.NewCryptocoinHandler(ct)\n\t\tif h == nil {\n\t\t continue\n\t\t}\n\t\tctaddr, err := h.PublicKeyToAddress(pubkeyhex)\n\t\tif err != nil {\n\t\t continue\n\t\t}\n\t\t\n\t\tkey = Keccak256Hash([]byte(strings.ToLower(ctaddr))).Hex()\n\t\tkd = KeyData{Key:[]byte(key),Data:ss}\n\t\tPubKeyDataChan <-kd\n\t\t/////\n\t\tLdbPubKeyData[key] = []byte(ss)\n\t\t////\n\t }\n\t \n\t /*lock.Lock()\n\t dir := GetDbDir()\n\t db,err := ethdb.NewLDBDatabase(dir, 0, 0)\n\t if err != nil { \n\t\tlock.Unlock()\n\t\tres := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:open level db fail\",Err:err}\n\t\tch <- res\n\t\treturn\n\t }\n\n\t //add for lockout\n\t db.Put(sedpk[:],[]byte(ss))\n\t key := Keccak256Hash([]byte(strings.ToLower(account + \":\" + cointype))).Hex()\n\t db.Put([]byte(key),[]byte(ss))\n\t \n\t for _, ct := range cryptocoins.Cointypes {\n\t\tif strings.EqualFold(ct, \"ALL\") {\n\t\t continue\n\t\t}\n\n\t\th := cryptocoins.NewCryptocoinHandler(ct)\n\t\tif h == nil {\n\t\t continue\n\t\t}\n\t\tctaddr, err := h.PublicKeyToAddress(pubkeyhex)\n\t\tif err != nil {\n\t\t continue\n\t\t}\n\t\t\n\t\tkey = Keccak256Hash([]byte(strings.ToLower(ctaddr))).Hex()\n\t\tdb.Put([]byte(key),[]byte(ss))\n\t }\n\n\t db.Close()\n\t lock.Unlock()\n\t */\n\t}\n\n\tres := RpcDcrmRes{Ret:pubkeyhex,Tip:\"\",Err:nil}\n\tch <- res\n\treturn\n }\n \n ok := KeyGenerate_ec2(msgprex,ch,id,cointype)\n if ok == false {\n\treturn\n }\n\n iter := workers[id].pkx.Front()\n if iter == nil {\n\tres := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:get pkx fail in req ec2 pubkey\",Err:GetRetErr(ErrGetGenPubkeyFail)}\n\tch <- res\n\treturn\n }\n spkx := iter.Value.(string)\n pkx := new(big.Int).SetBytes([]byte(spkx))\n iter = workers[id].pky.Front()\n if iter == nil {\n\tres := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:get pky fail in req ec2 pubkey\",Err:GetRetErr(ErrGetGenPubkeyFail)}\n\tch <- res\n\treturn\n }\n spky := iter.Value.(string)\n pky := new(big.Int).SetBytes([]byte(spky))\n ys := secp256k1.S256().Marshal(pkx,pky)\n\n iter = workers[id].save.Front()\n if iter == nil {\n\tres := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:get save data fail in req ec2 pubkey\",Err:GetRetErr(ErrGetGenSaveDataFail)}\n\tch <- res\n\treturn\n }\n save := iter.Value.(string)\n pubs := &PubKeyData{Pub:string(ys),Save:save,Nonce:\"0\",GroupId:wk.groupid,LimitNum:wk.limitnum,Mode:mode}\n epubs,err := Encode2(pubs)\n if err != nil {\n\tres := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:encode PubKeyData fail in req ec2 pubkey\",Err:err}\n\tch <- res\n\treturn\n }\n \n ss,err := Compress([]byte(epubs))\n if err != nil {\n\tres := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:compress PubKeyData fail in req ec2 pubkey\",Err:err}\n\tch <- res\n\treturn\n }\n \n ////TODO\n AllAccounts = append(AllAccounts,pubs)\n ////////\n\n pubkeyhex := hex.EncodeToString(ys)\n fmt.Println(\"===============dcrm_genPubKey,pubkey = %s,nonce =%s ==================\",pubkeyhex,nonce)\n //tip, err := StorePubAccount(wk.groupid, pubkeyhex, mode)\n //fmt.Printf(\"==== dcrm_genPubKey() ====, StorePubAccount tip: %v, err: %v\\n\", tip, err)\n ////save to db\n \n ////add for req addr\n /*reqnonce,_,err := GetReqAddrNonce(account)\n if err != nil {\n\treqnonce = \"0\"\n }\n SetReqAddrNonce(account,reqnonce)*/\n key2 := Keccak256Hash([]byte(strings.ToLower(account))).Hex()\n kd := KeyData{Key:[]byte(key2),Data:nonce}\n PubKeyDataChan <-kd\n /////\n LdbPubKeyData[key2] = []byte(nonce)\n //key2 = Keccak256Hash([]byte(strings.ToLower(account+\":\"+\"LOCKOUT\"))).Hex()\n //LdbPubKeyData[key2] = []byte(\"0\")\n ////\n\n tip,reply := AcceptReqAddr(account,cointype,wk.groupid,nonce,wk.limitnum,mode,true,\"true\",\"Success\",pubkeyhex,\"\",\"\",\"\")\n if reply != nil {\n\tres := RpcDcrmRes{Ret:\"\",Tip:tip,Err:fmt.Errorf(\"update req addr status error.\")}\n\tch <- res\n\treturn\n }\n\n if !strings.EqualFold(cointype, \"ALL\") {\n\th := cryptocoins.NewCryptocoinHandler(cointype)\n\tif h == nil {\n\t res := RpcDcrmRes{Ret:\"\",Tip:\"cointype is not supported\",Err:fmt.Errorf(\"req addr fail,cointype is not supported.\")}\n\t ch <- res\n\t return\n\t}\n\n\tctaddr, err := h.PublicKeyToAddress(pubkeyhex)\n\tif err != nil {\n\t res := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:get dcrm addr fail from pubkey:\"+pubkeyhex,Err:err}\n\t ch <- res\n\t return\n\t}\n\t\n\tkd = KeyData{Key:ys,Data:ss}\n\tPubKeyDataChan <-kd\n\t/////\n\tLdbPubKeyData[string(ys)] = []byte(ss)\n\t////\n\n\tkey := Keccak256Hash([]byte(strings.ToLower(account + \":\" + cointype))).Hex()\n\tkd = KeyData{Key:[]byte(key),Data:ss}\n\tPubKeyDataChan <-kd\n\t/////\n\tLdbPubKeyData[key] = []byte(ss)\n\t////\n\n\tkey = Keccak256Hash([]byte(strings.ToLower(ctaddr))).Hex()\n\tkd = KeyData{Key:[]byte(key),Data:ss}\n\tPubKeyDataChan <-kd\n\t/////\n\tLdbPubKeyData[key] = []byte(ss)\n\t////\n\n\t/*lock.Lock()\n\tdir := GetDbDir()\n\tdb,err := ethdb.NewLDBDatabase(dir, 0, 0)\n\tif err != nil { \n\t lock.Unlock()\n\t res := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:open level db fail\",Err:err}\n\t ch <- res\n\t return\n\t}\n\n\th := cryptocoins.NewCryptocoinHandler(cointype)\n\tif h == nil {\n\t db.Close()\n\t lock.Unlock()\n\t res := RpcDcrmRes{Ret:\"\",Tip:\"cointype is not supported\",Err:fmt.Errorf(\"req addr fail,cointype is not supported.\")}\n\t ch <- res\n\t return\n\t}\n\n\tctaddr, err := h.PublicKeyToAddress(pubkeyhex)\n\tif err != nil {\n\t db.Close()\n\t lock.Unlock()\n\t res := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:get dcrm addr fail from pubkey:\"+pubkeyhex,Err:err}\n\t ch <- res\n\t return\n\t}\n\n\t//add for lockout\n\tdb.Put(ys,[]byte(ss))\n\tkey := Keccak256Hash([]byte(strings.ToLower(account + \":\" + cointype))).Hex()\n\tdb.Put([]byte(key),[]byte(ss))\n\tkey = Keccak256Hash([]byte(strings.ToLower(ctaddr))).Hex()\n\tdb.Put([]byte(key),[]byte(ss))\n\tdb.Close()\n\tlock.Unlock()*/\n } else {\n\tkd = KeyData{Key:ys,Data:ss}\n\tPubKeyDataChan <-kd\n\t/////\n\tLdbPubKeyData[string(ys)] = []byte(ss)\n\t////\n\n\tkey := Keccak256Hash([]byte(strings.ToLower(account + \":\" + cointype))).Hex()\n\tkd = KeyData{Key:[]byte(key),Data:ss}\n\tPubKeyDataChan <-kd\n\t/////\n\tLdbPubKeyData[key] = []byte(ss)\n\t////\n\n\tfor _, ct := range cryptocoins.Cointypes {\n\t if strings.EqualFold(ct, \"ALL\") {\n\t\tcontinue\n\t }\n\n\t h := cryptocoins.NewCryptocoinHandler(ct)\n\t if h == nil {\n\t\tcontinue\n\t }\n\t ctaddr, err := h.PublicKeyToAddress(pubkeyhex)\n\t if err != nil {\n\t\tcontinue\n\t }\n\t \n\t key = Keccak256Hash([]byte(strings.ToLower(ctaddr))).Hex()\n\t kd = KeyData{Key:[]byte(key),Data:ss}\n\t PubKeyDataChan <-kd\n\t /////\n\t LdbPubKeyData[key] = []byte(ss)\n\t ////\n\t}\n\t/*lock.Lock()\n\tdir := GetDbDir()\n\tdb,err := ethdb.NewLDBDatabase(dir, 0, 0)\n\tif err != nil { \n\t lock.Unlock()\n\t res := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:open level db fail\",Err:err}\n\t ch <- res\n\t return\n\t}\n\n\t//add for lockout\n\tdb.Put(ys,[]byte(ss))\n\tkey := Keccak256Hash([]byte(strings.ToLower(account + \":\" + cointype))).Hex()\n\tdb.Put([]byte(key),[]byte(ss))\n key = Keccak256Hash([]byte(strings.ToLower(pubkeyhex))).Hex()\n db.Put([]byte(key),[]byte(ss))\n\t\n\tfor _, ct := range cryptocoins.Cointypes {\n\t if strings.EqualFold(ct, \"ALL\") {\n\t\tcontinue\n\t }\n\n\t h := cryptocoins.NewCryptocoinHandler(ct)\n\t if h == nil {\n\t\tcontinue\n\t }\n\t ctaddr, err := h.PublicKeyToAddress(pubkeyhex)\n\t if err != nil {\n\t\tcontinue\n\t }\n\t \n\t key = Keccak256Hash([]byte(strings.ToLower(ctaddr))).Hex()\n\t db.Put([]byte(key),[]byte(ss))\n\t}\n\n\tdb.Close()\n\tlock.Unlock()\n\t*/\n }\n \n res := RpcDcrmRes{Ret:pubkeyhex,Tip:\"\",Err:nil}\n ch <- res\n}", "func (client *Client) GetKeywordChEcomWithCallback(request *GetKeywordChEcomRequest, callback func(response *GetKeywordChEcomResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *GetKeywordChEcomResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.GetKeywordChEcom(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func (m *TelecomExpenseManagementPartner) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {\n res := m.Entity.GetFieldDeserializers()\n res[\"appAuthorized\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetBoolValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetAppAuthorized(val)\n }\n return nil\n }\n res[\"displayName\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetDisplayName(val)\n }\n return nil\n }\n res[\"enabled\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetBoolValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetEnabled(val)\n }\n return nil\n }\n res[\"lastConnectionDateTime\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetTimeValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetLastConnectionDateTime(val)\n }\n return nil\n }\n res[\"url\"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {\n val, err := n.GetStringValue()\n if err != nil {\n return err\n }\n if val != nil {\n m.SetUrl(val)\n }\n return nil\n }\n return res\n}", "func (page VirtualMachineListResultPageClient) NotDone() bool {\n\treturn page.vmlrp.NotDone()\n}", "func getCRDClient(resController *ClusterWatcher, gvr schema.GroupVersionResource, resInfo *autoCreateKAMInfo) dynamic.ResourceInterface {\n\tvar intfNoNS = resController.plugin.dynamicClient.Resource(gvr)\n\tvar intf dynamic.ResourceInterface\n\tif resInfo.namespace != \"\" {\n\t\tintf = intfNoNS.Namespace(resInfo.namespace)\n\t} else {\n\t\tintf = intfNoNS\n\t}\n\treturn intf\n}", "func (client ModelClient) GetCustomEntityRoleSender(req *http.Request) (*http.Response, error) {\n\treturn autorest.SendWithSender(client, req,\n\t\tautorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))\n}", "func (t *Colorado) getBrokerSSIsByPartialCompositeKey(stub shim.ChaincodeStubInterface, args []string) peer.Response {\n\t if len(args) != 6 {\n\t\t return shim.Error(\"Incorrect number of arguments. Expecting 6.\")\n\t }\n \n\t if len(args[0]) == 0 {\n\t\t return shim.Error(\"1st argument (Executing Broker) must be a non-empty string\")\n\t }\n \n\t eb := strings.ToUpper(args[0])\n\t im := strings.ToUpper(args[1])\n\t product := strings.ToUpper(args[2])\n\t subProduct := strings.ToUpper(args[3])\n\t currency := strings.ToUpper(args[4])\n\t settleLocation := strings.ToUpper(args[5])\n\t fmt.Println(\"- start getting Broker SSIs by partial composite key: \" + eb + \", \" + im + \", \" + product + \", \" + subProduct + \", \" + currency + \", \" + settleLocation)\n \n\t // buffer is a JSON array containing query results\n\t var buffer bytes.Buffer\n\t buffer.WriteString(\"[\")\n \n\t // Query the brokerSSICompositeKey index by partial fields (e.g. EB1 only)\n\t // This will execute a key range query on all keys starting with 'EB1'\n\t allIMs := [1]string{\"IM1\"}\n\t allEBs := [2]string{\"EB1\", \"EB2\"}\n\t allABs := [2]string{\"AB1\", \"AB2\"}\n\t indexName := \"brokerSSICompositeKey\"\n\t for i := 0; i < len(allIMs); i++ {\n\t\t for j := 0; j < len(allEBs); j++ {\n\t\t\t for k := 0; k < len(allABs); k++ {\n\t\t\t\t brokerSSIResultsIterator, err := stub.GetPrivateDataByPartialCompositeKey(\"privateBrokerSSIFor\" + allIMs[i] + allEBs[j] + allABs[k], indexName, []string{eb, im, product, subProduct, currency, settleLocation})\n\t\t\t\t if err != nil {\n\t\t\t\t\t // not return error for non-entitled privateBrokerSSIForIMxEByABz\n\t\t\t\t\t // return shim.Error(err.Error())\n\t\t\t\t } else {\n\t\t\t\t\t defer brokerSSIResultsIterator.Close()\n\t\t\t\t\t for brokerSSIResultsIterator.HasNext() {\n\t\t\t\t\t\t brokerSSIResult, err := brokerSSIResultsIterator.Next()\n\t\t\t\t\t\t if err != nil {\n\t\t\t\t\t\t\t return shim.Error(err.Error())\n\t\t\t\t\t\t }\n\t\t\t\t\t\t if buffer.Len() > 1 {\n\t\t\t\t\t\t\t buffer.WriteString(\",\")\n\t\t\t\t\t\t }\n\t\t\t\t\t\t buffer.WriteString(\"{\\\"compositeKey\\\":\")\n\t\t\t\t\t\t buffer.WriteString(\"\\\"\")\n\t\t\t\t\t\t buffer.WriteString(brokerSSIResult.Key)\n\t\t\t\t\t\t buffer.WriteString(\"\\\"\")\n \n\t\t\t\t\t\t buffer.WriteString(\", \\\"brokerSSI\\\":\")\n\t\t\t\t\t\t // record is a JSON object, so write as-is\n\t\t\t\t\t\t buffer.WriteString(string(brokerSSIResult.Value))\n\t\t\t\t\t\t buffer.WriteString(\"}\")\n\t\t\t\t\t }\n\t\t\t\t }\n\t\t\t }\n\t\t }\n\t }\n\t buffer.WriteString(\"]\")\n \n\t fmt.Println(\"- end getting Broker SSIs with query result:\\n\" + buffer.String())\n \n\t return shim.Success(buffer.Bytes())\n }", "func (client ModelClient) GetPrebuiltEntityRolesResponder(resp *http.Response) (result ListEntityRole, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result.Value),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func superNetperfRR(s *helpers.SSHMeta, client string, server string, num int) *helpers.CmdRes {\n\treturn superNetperfRRIPv4(s, client, server, num)\n}", "func (client BaseClient) EnableConsoleResponder(resp *http.Response) (result SetObject, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNotFound),\n\t\tautorest.ByUnmarshallingJSON(&result.Value),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (e *Epic) GetEpic() {\n\n}", "func (o *NvmeServiceCollectionGetDefault) Code() int {\n\treturn o._statusCode\n}", "func (client ModelClient) AddCustomPrebuiltDomainSender(req *http.Request) (*http.Response, error) {\n\treturn autorest.SendWithSender(client, req,\n\t\tautorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))\n}", "func HDRnetE2E(ctx context.Context, s *testing.State) {\n\thdrnetEnablePath := \"/run/camera/force_enable_hdrnet\"\n\tif err := createFile(hdrnetEnablePath); err != nil {\n\t\ts.Fatalf(\"Failed to create HDRnet force enable file %s: %s\", hdrnetEnablePath, err)\n\t}\n\tdefer func() {\n\t\tif err := os.Remove(hdrnetEnablePath); err != nil {\n\t\t\ts.Errorf(\"Failed to remove HDRnet force enable file %s: %s\", hdrnetEnablePath, err)\n\t\t}\n\t}()\n\n\trunSubTest := s.FixtValue().(cca.FixtureData).RunTestWithApp\n\tcr := s.FixtValue().(cca.FixtureData).Chrome\n\ttconn, err := cr.TestAPIConn(ctx)\n\tif err != nil {\n\t\ts.Fatal(\"Failed to establish connection to the test API extension\")\n\t}\n\tsubTestTimeout := 120 * time.Second\n\n\tfor _, t := range []struct {\n\t\tname string\n\t\ttestFunc func(context.Context, *cca.App, *chrome.TestConn) error\n\t}{\n\t\t{\"testPhotoTaking\", testPhotoTaking},\n\t\t{\"testVideoRecording\", testVideoRecording},\n\t} {\n\t\tsubTestCtx, cancel := context.WithTimeout(ctx, subTestTimeout)\n\t\ts.Run(subTestCtx, t.name, func(ctx context.Context, s *testing.State) {\n\t\t\tif err := runSubTest(ctx, func(ctx context.Context, app *cca.App) error {\n\t\t\t\treturn t.testFunc(ctx, app, tconn)\n\t\t\t}, cca.TestWithAppParams{StopAppOnlyIfExist: true}); err != nil {\n\t\t\t\ts.Errorf(\"Failed to pass %v subtest: %v\", t.name, err)\n\t\t\t}\n\t\t})\n\t\tcancel()\n\t}\n}", "func (client ModelClient) GetRegexEntityEntityInfoResponder(resp *http.Response) (result RegexEntityExtractor, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (a *AllApiService) EnterpriseGetEnterpriseConfigurations(ctx _context.Context, body EnterpriseGetEnterpriseConfigurations) ([]EnterpriseGetEnterpriseConfigurationsResultItem, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = _nethttp.MethodPost\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue []EnterpriseGetEnterpriseConfigurationsResultItem\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/enterprise/getEnterpriseConfigurations\"\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &body\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t}\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 200 {\n\t\t\tvar v []EnterpriseGetEnterpriseConfigurationsResultItem\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 400 {\n\t\t\tvar v ModelError\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 500 {\n\t\t\tvar v ModelError\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHttpResponse, nil\n}", "func (client ModelClient) ListCustomPrebuiltIntentsResponder(resp *http.Response) (result ListIntentClassifier, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result.Value),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func name_node(message nn_proto.Propuesta){\n\tvar conn *grpc.ClientConn\n\tconn, err := grpc.Dial(\"dist13:9000\", grpc.WithInsecure())\n\tif err != nil {\n\t\tlog.Fatalf(\"could not connect: %s\", err)\n\t}\n\tdefer conn.Close()\n\n\tc := nn_proto.NewHelloworldServiceClient(conn)\n\n\tresponse, err := c.EnviarPropuesta(context.Background(), &message)\n\tif err != nil {\n\t\tlog.Fatalf(\"Error when calling EnviarPropuesta: %s\", err)\n\t}\n\tmsgenviados = msgenviados + 1\n\tmessagedn := dn_proto.PropRequest{\n\t\tCantidadn1: response.Cantidadn1,\n\t\tCantidadn2: response.Cantidadn2,\n\t\tCantidadn3: response.Cantidadn3,\n\t\tNombrel: response.Nombrel,\n\t\tCantidadtotal: response.Cantidadtotal,\n\t}\n\tif response.Nombrel == \"Propuesta aceptada\" {\n\t\tmessagedn = dn_proto.PropRequest{\n\t\t\tCantidadn1: message.Cantidadn1,\n\t\t\tCantidadn2: message.Cantidadn2,\n\t\t\tCantidadn3: message.Cantidadn3,\n\t\t\tNombrel: message.Nombrel,\n\t\t\tCantidadtotal: message.Cantidadtotal,\n\t\t}\n\t}\n\tvar maquina string = \"\"\n\tif message.Cantidadn1 != \"0\"{\n\t\tmaquina = \"dist14:9001\"\n\t\tconectardn(maquina, messagedn)\n\t}\n\tif message.Cantidadn3 != \"0\"{\n\t\tmaquina = \"dist16:9003\"\n\t\tconectardn(maquina, messagedn)\n\t}\n\tdescargarlocal(messagedn)\n}", "func (calr ClassicAdministratorListResult) classicAdministratorListResultPreparer(ctx context.Context) (*http.Request, error) {\n\tif !calr.hasNextLink() {\n\t\treturn nil, nil\n\t}\n\treturn autorest.Prepare((&http.Request{}).WithContext(ctx),\n\t\tautorest.AsJSON(),\n\t\tautorest.AsGet(),\n\t\tautorest.WithBaseURL(to.String(calr.NextLink)))\n}", "func InitServidorVistas(me comun.HostPort) {\n\t// Init\n\te := new(typeSV)\n\te.tick = time.Tick(timeHeartbeat * time.Millisecond)\n\te.requests = make(chan msgsys.Message)\n\te.nodeList = make(map[comun.HostPort]*int)\n\te.actualView.NumView = 0\n\te.attemptView.NumView = 0\n\te.ms = msgsys.Make(me)\n\te.state = s_init\n\t// Start the mail box for request\n\tgo e.mailBox()\n\tfmt.Println(\"GV operative -> \", e.me)\n\t// Loop while all ok\n\tfor e.state != s_fatal {\n\t\te.tratarEventos()\n\t}\n\tlog.Fatal(\"ERROR: The content has been losed\")\n}", "func (client ModelClient) GetPrebuiltEntityRoleSender(req *http.Request) (*http.Response, error) {\n\treturn autorest.SendWithSender(client, req,\n\t\tautorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))\n}", "func (client ModelClient) ListPrebuiltEntitiesResponder(resp *http.Response) (result ListAvailablePrebuiltEntityModel, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result.Value),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (l *Libvirt) NodeGetSevInfo(Nparams int32, Flags uint32) (rParams []TypedParam, rNparams int32, err error) {\n\tvar buf []byte\n\n\targs := NodeGetSevInfoArgs {\n\t\tNparams: Nparams,\n\t\tFlags: Flags,\n\t}\n\n\tbuf, err = encode(&args)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tvar r response\n\tr, err = l.requestStream(395, constants.Program, buf, nil, nil)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t// Return value unmarshaling\n\ttpd := typedParamDecoder{}\n\tct := map[string]xdr.TypeDecoder{\"libvirt.TypedParam\": tpd}\n\trdr := bytes.NewReader(r.Payload)\n\tdec := xdr.NewDecoderCustomTypes(rdr, 0, ct)\n\t// Params: []TypedParam\n\t_, err = dec.Decode(&rParams)\n\tif err != nil {\n\t\treturn\n\t}\n\t// Nparams: int32\n\t_, err = dec.Decode(&rNparams)\n\tif err != nil {\n\t\treturn\n\t}\n\n\treturn\n}", "func (gs *GRPCClient) AfterInit() {}", "func (client MultipleResponsesClient) Get202None204NoneDefaultNone202InvalidSender(req *http.Request) (*http.Response, error) {\n return autorest.SendWithSender(client, req)\n}", "func (client ModelClient) GetRegexEntityRolesSender(req *http.Request) (*http.Response, error) {\n\treturn autorest.SendWithSender(client, req,\n\t\tautorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))\n}", "func (client ModelClient) GetRegexEntityInfosResponder(resp *http.Response) (result ListRegexEntityExtractor, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result.Value),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func RegAdaptiveNetwork(nameSpaceID string, mcisID string, networkReq *tbmcis.NetworkReq) (*tbmcis.AgentInstallContentWrapper, model.WebStatus) {\n\tvar originalUrl = \"/ns/{nsId}/network/mcis/{mcisId}\"\n\n\tvar paramMapper = make(map[string]string)\n\tparamMapper[\"{nsId}\"] = nameSpaceID\n\tparamMapper[\"{mcisId}\"] = mcisID\n\turlParam := util.MappingUrlParameter(originalUrl, paramMapper)\n\n\turl := util.TUMBLEBUG + urlParam\n\n\tpbytes, _ := json.Marshal(networkReq)\n\tresp, err := util.CommonHttp(url, pbytes, http.MethodPost)\n\n\tagentInstallContentWrapper := tbmcis.AgentInstallContentWrapper{}\n\treturnStatus := model.WebStatus{}\n\n\trespBody := resp.Body\n\trespStatus := resp.StatusCode\n\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn &agentInstallContentWrapper, model.WebStatus{StatusCode: 500, Message: err.Error()}\n\t}\n\n\tif respStatus != 200 && respStatus != 201 { // 호출은 정상이나, 가져온 결과값이 200, 201아닌 경우 message에 담겨있는 것을 WebStatus에 set\n\t\terrorInfo := model.ErrorInfo{}\n\t\tjson.NewDecoder(respBody).Decode(&errorInfo)\n\t\tfmt.Println(\"respStatus != 200 reason \", errorInfo)\n\t\treturnStatus.Message = errorInfo.Message\n\t} else {\n\t\tjson.NewDecoder(respBody).Decode(&agentInstallContentWrapper)\n\t\tfmt.Println(agentInstallContentWrapper)\n\t}\n\treturnStatus.StatusCode = respStatus\n\n\treturn &agentInstallContentWrapper, returnStatus\n}", "func (cli *OpsGenieAlertV2Client) Snooze(req alertsv2.SnoozeRequest) (*AsyncRequestResponse, error) {\n\treturn cli.sendAsyncPostRequest(&req)\n}", "func (client ModelClient) ListCustomPrebuiltIntentsSender(req *http.Request) (*http.Response, error) {\n\treturn autorest.SendWithSender(client, req,\n\t\tautorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))\n}", "func TraerClientes(ctx *iris.Context) {\n\tvar Send CotizacionModel.SDataCliente\n\n\t//Aqui! --> Validacion de permisos AJAx\n\n\tCliente := ctx.FormValue(\"Cliente\")\n\tfmt.Println(Cliente)\n\n\tdocs := PersonaModel.BuscarEnElastic(Cliente + \" +CLIENTE\")\n\tif docs.Hits.TotalHits > 0 {\n\t\tarrIDElastic = []bson.ObjectId{}\n\n\t\tfor _, item := range docs.Hits.Hits {\n\t\t\tIDElastic = bson.ObjectIdHex(item.Id)\n\t\t\tarrIDElastic = append(arrIDElastic, IDElastic)\n\t\t}\n\n\t\tnumeroRegistros = len(arrIDElastic)\n\n\t\tarrToMongo = []bson.ObjectId{}\n\t\tif numeroRegistros <= limitePorPagina {\n\t\t\tfor _, v := range arrIDElastic[0:numeroRegistros] {\n\t\t\t\tarrToMongo = append(arrToMongo, v)\n\t\t\t}\n\t\t} else if numeroRegistros >= limitePorPagina {\n\t\t\tfor _, v := range arrIDElastic[0:limitePorPagina] {\n\t\t\t\tarrToMongo = append(arrToMongo, v)\n\t\t\t}\n\t\t}\n\n\t\tMoConexion.FlushElastic()\n\t\tbusqueda := CotizacionModel.GeneraTemplateBusquedaClientes(PersonaModel.GetEspecifics(arrToMongo))\n\t\tif busqueda == \"\" {\n\t\t\tSend.SEstado = false\n\t\t\tSend.SMsj = \"No se encontraron clientes\"\n\t\t} else {\n\t\t\tSend.SIhtml = template.HTML(busqueda)\n\t\t\tSend.SEstado = true\n\t\t\tSend.SMsj = \"\"\n\t\t}\n\n\t\tjData, _ := json.Marshal(Send)\n\t\tctx.Header().Set(\"Content-Type\", \"application/json\")\n\t\tctx.Write(jData)\n\t\treturn\n\n\t}\n\n\tSend.SEstado = false\n\tSend.SMsj = \"El Cliente No Existe en la base de Datos.\"\n\tjData, _ := json.Marshal(Send)\n\tctx.Header().Set(\"Content-Type\", \"application/json\")\n\tctx.Write(jData)\n\treturn\n}", "func (client ModelClient) ListCustomPrebuiltModelsResponder(resp *http.Response) (result ListCustomPrebuiltModel, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result.Value),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func runWhenEnterprise(t *testing.T, semverRange string, rbacRequired bool) {\n\tclient, err := NewTestClient(nil, nil)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tres, err := client.Root(defaultCtx)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tv := res[\"version\"].(string)\n\n\tif !strings.Contains(v, \"enterprise-edition\") {\n\t\tt.Skip()\n\t}\n\n\tr := res[\"configuration\"].(map[string]interface{})[\"rbac\"].(string)\n\n\tif rbacRequired && r != \"on\" {\n\t\tt.Skip()\n\t}\n\n\trunWhenKong(t, semverRange)\n\n}", "func (instance *NDiscovery) getNodeList(message *lygo_n_commons.Command) interface{} {\n\tresponse := make(map[string]interface{})\n\tif nil != instance.storage {\n\t\tnetworkId := message.GetParamAsString(\"network_id\")\n\t\tresponse[\"publishers\"] = instance.storage.QueryPublishersAll()\n\t\tresponse[\"nodes\"] = instance.storage.QueryNodes(networkId)\n\t}\n\treturn response\n}", "func (crdRC *crdRequestController) updateNodeNetConfig(cntxt context.Context, nodeNetworkConfig *nnc.NodeNetworkConfig) error {\n\tif err := crdRC.KubeClient.Update(cntxt, nodeNetworkConfig); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (a *AllApiService) EnterpriseGetEnterpriseServices(ctx _context.Context, body EnterpriseGetEnterpriseServices) ([]EnterpriseGetEnterpriseServicesResultItem, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = _nethttp.MethodPost\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue []EnterpriseGetEnterpriseServicesResultItem\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/enterprise/getEnterpriseServices\"\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &body\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t}\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 200 {\n\t\t\tvar v []EnterpriseGetEnterpriseServicesResultItem\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 400 {\n\t\t\tvar v ModelError\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 500 {\n\t\t\tvar v ModelError\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHttpResponse, nil\n}", "func ActualizeSerie(log logging.Logger, serie *dao.Serie) (err error) {\n\t// Find the right parser for the serie.URL\n\thostParser, err := host.GetParser(serie.URL)\n\tif err != nil {\n\t\treturn\n\t}\n\tserie.Host = hostParser.Host()\n\tlog.Println(\"Host is\", hostParser.Name())\n\n\t// Get the content of the serie.URL\n\tpageContent, err := extdep.CommonClient.Get(serie.URL)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t// Find the season\n\tseason, err := hostParser.Season(pageContent)\n\tif err != nil {\n\t\treturn\n\t}\n\tserie.Season = season\n\tlog.Println(fmt.Sprintf(\"Season is %v\", season))\n\n\t// Find the last episode out\n\tepisode, err := hostParser.LastEpisode(pageContent)\n\tif err != nil {\n\t\treturn\n\t}\n\tserie.LastEpisode = episode\n\tlog.Println(fmt.Sprintf(\"Last episode is %v\", episode))\n\n\treturn\n}", "func (client MultipleResponsesClient) GetDefaultModelA200NoneSender(req *http.Request) (*http.Response, error) {\n return autorest.SendWithSender(client, req)\n}", "func (s *Service) getRecommend(c context.Context, mid int64, build int, plat int8, buvid, network, mobiApp, device, ipaddr string) (sis []*show.Item) {\n\tcnt := s.itemNum(plat)\n\t// first get recommend data.\n\tsis = s.userRecommend(c, mid, build, plat, buvid, network, mobiApp, device, ipaddr, cnt)\n\treturn\n}", "func (page *VirtualMachineListResultPageClient) Next() error {\n\treturn page.vmlrp.Next()\n}", "func (m *IntentsDeviceManagementIntentItemRequestBuilder) GetCustomizedSettings()(*IntentsItemGetCustomizedSettingsRequestBuilder) {\n return NewIntentsItemGetCustomizedSettingsRequestBuilderInternal(m.BaseRequestBuilder.PathParameters, m.BaseRequestBuilder.RequestAdapter)\n}", "func (m *Mosn) inheritHandler() error {\n\tvar err error\n\tm.Upgrade.InheritListeners, m.Upgrade.InheritPacketConn, m.Upgrade.ListenSockConn, err = server.GetInheritListeners()\n\tif err != nil {\n\t\tlog.StartLogger.Errorf(\"[mosn] [NewMosn] getInheritListeners failed, exit\")\n\t\treturn err\n\t}\n\tlog.StartLogger.Infof(\"[mosn] [NewMosn] active reconfiguring\")\n\t// parse MOSNConfig again\n\tc := configmanager.Load(configmanager.GetConfigPath())\n\tif c.InheritOldMosnconfig {\n\t\t// inherit old mosn config\n\t\toldMosnConfig, err := server.GetInheritConfig()\n\t\tif err != nil {\n\t\t\tm.Upgrade.ListenSockConn.Close()\n\t\t\tlog.StartLogger.Errorf(\"[mosn] [NewMosn] GetInheritConfig failed, exit\")\n\t\t\treturn err\n\t\t}\n\t\tlog.StartLogger.Debugf(\"[mosn] [NewMosn] old mosn config: %v\", oldMosnConfig)\n\t\tc.Servers = oldMosnConfig.Servers\n\t\tc.ClusterManager = oldMosnConfig.ClusterManager\n\t\tc.Extends = oldMosnConfig.Extends\n\t}\n\tif c.CloseGraceful {\n\t\tc.DisableUpgrade = true\n\t}\n\tm.Config = c\n\treturn nil\n}", "func (a *AllApiService) EnterpriseSetEnterpriseMaximumSegments(ctx _context.Context, body EnterpriseSetEnterpriseMaximumSegments) (EnterpriseSetEnterpriseMaximumSegmentsResult, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = _nethttp.MethodPost\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue EnterpriseSetEnterpriseMaximumSegmentsResult\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/enterprise/setEnterpriseMaximumSegments\"\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &body\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t}\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 200 {\n\t\t\tvar v EnterpriseSetEnterpriseMaximumSegmentsResult\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 400 {\n\t\t\tvar v ModelError\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 500 {\n\t\t\tvar v ModelError\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHttpResponse, nil\n}", "func (eh *GetSLIEventHandler) sendEvent(factory adapter.CloudEventFactoryInterface) error {\n\terr := eh.kClient.SendCloudEvent(factory)\n\tif err != nil {\n\t\tlog.WithError(err).Error(\"Could not send get sli cloud event\")\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (a *Client) GetNiaapiDcnmReleaseRecommendsMoid(params *GetNiaapiDcnmReleaseRecommendsMoidParams) (*GetNiaapiDcnmReleaseRecommendsMoidOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetNiaapiDcnmReleaseRecommendsMoidParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"GetNiaapiDcnmReleaseRecommendsMoid\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/niaapi/DcnmReleaseRecommends/{moid}\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &GetNiaapiDcnmReleaseRecommendsMoidReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*GetNiaapiDcnmReleaseRecommendsMoidOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\tunexpectedSuccess := result.(*GetNiaapiDcnmReleaseRecommendsMoidDefault)\n\treturn nil, runtime.NewAPIError(\"unexpected success response: content available as default response in error\", unexpectedSuccess, unexpectedSuccess.Code())\n}", "func (a *AllApiService) EnterpriseGetEnterpriseEdges(ctx _context.Context, body EnterpriseGetEnterpriseEdges) ([]EnterpriseGetEnterpriseEdgesResultItem, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = _nethttp.MethodPost\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue []EnterpriseGetEnterpriseEdgesResultItem\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/enterprise/getEnterpriseEdges\"\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &body\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t}\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 200 {\n\t\t\tvar v []EnterpriseGetEnterpriseEdgesResultItem\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 400 {\n\t\t\tvar v ModelError\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 500 {\n\t\t\tvar v ModelError\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHttpResponse, nil\n}", "func (t *Procure2Pay) UpdatePoECustoms(stub shim.ChaincodeStubInterface, args []string) pb.Response {\n\tvar obj_uipurchaseOrder purchaseOrder\n\tvar obj_bcpurchaseOrder purchaseOrder\n\tvar err error\n\n\tfmt.Println(\"Entering UpdatePoECustoms\")\n\n\tif (len(args) < 1) {\n\t\tfmt.Println(\"Invalid number of args\")\n\t\treturn shim.Error(err.Error())\n\t\t//return nil,nil\n\t}\n\n\terr = json.Unmarshal([]byte(args[0]), &obj_uipurchaseOrder)\n if err != nil {\n\t\tfmt.Printf(\"Unable to marshal createTransaction input UpdateTransaction : %s\\n\", err)\n\t\treturn shim.Error(err.Error())\n\t\t//return nil, nil\n\t}\n\n\tfmt.Println(\"\\n refno variable value is : \",obj_uipurchaseOrder.POID); \n\n\t// code to get data from blockchain using dynamic key starts here\n\tvar bytesread []byte\t\n\tbytesread, err = stub.GetState(obj_uipurchaseOrder.POID)\n\terr = json.Unmarshal(bytesread, &obj_bcpurchaseOrder)\n\t// code to get data from blockchain using dynamic key ends here\n\n\tfmt.Printf(\"\\nobj_bcpurchaseOrder : %s \", obj_bcpurchaseOrder)\n\n\tobj_bcpurchaseOrder.POID=obj_uipurchaseOrder.POID\n\tobj_bcpurchaseOrder.PoECustoms=obj_uipurchaseOrder.PoECustoms\n\n\t// Data insertion for Couch DB starts here \n\ttransJSONasBytes, err := json.Marshal(obj_bcpurchaseOrder)\n\terr = stub.PutState(obj_uipurchaseOrder.POID, transJSONasBytes)\n\t// Data insertion for Couch DB ends here \n\t\t\n\tfmt.Println(\"POE Customs Successfully updated.\")\t\n\n\tif err != nil {\n\t\t\t\tfmt.Printf(\"\\nUnable to make transevent inputs : %v \", err)\n\t\t\t\treturn shim.Error(err.Error())\n\t\t\t\t//return nil,nil\n\t\t\t}\n\treturn shim.Success(nil)\n\t//return nil, nil\n}", "func (client ModelClient) GetPrebuiltEntityRoleResponder(resp *http.Response) (result EntityRole, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (client MultipleResponsesClient) GetDefaultModelA400NoneSender(req *http.Request) (*http.Response, error) {\n return autorest.SendWithSender(client, req)\n}", "func (client ModelClient) UpdateCustomPrebuiltEntityRoleResponder(resp *http.Response) (result OperationStatus, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func rpcClientConnectLoop(legacyRPCServer *legacyrpc.Server, loader *wallet.Loader) {\n\tvar certs []byte\n\t// if !cfg.UseSPV {\n\tcerts = readCAFile()\n\t// }\n\tfor {\n\t\tvar (\n\t\t\tchainClient chain.Interface\n\t\t\terr error\n\t\t)\n\t\t// if cfg.UseSPV {\n\t\t// \tvar (\n\t\t// \t\tchainService *neutrino.ChainService\n\t\t// \t\tspvdb walletdb.DB\n\t\t// \t)\n\t\t// \tnetDir := networkDir(cfg.AppDataDir.Value, ActiveNet.Params)\n\t\t// \tspvdb, err = walletdb.Create(\"bdb\",\n\t\t// \t\tfilepath.Join(netDir, \"neutrino.db\"))\n\t\t// \tdefer spvdb.Close()\n\t\t// \tif err != nil {\n\t\t// \t\tlog<-cl.Errorf{\"unable to create Neutrino DB: %s\", err)\n\t\t// \t\tcontinue\n\t\t// \t}\n\t\t// \tchainService, err = neutrino.NewChainService(\n\t\t// \t\tneutrino.Config{\n\t\t// \t\t\tDataDir: netDir,\n\t\t// \t\t\tDatabase: spvdb,\n\t\t// \t\t\tChainParams: *ActiveNet.Params,\n\t\t// \t\t\tConnectPeers: cfg.ConnectPeers,\n\t\t// \t\t\tAddPeers: cfg.AddPeers,\n\t\t// \t\t})\n\t\t// \tif err != nil {\n\t\t// \t\tlog<-cl.Errorf{\"couldn't create Neutrino ChainService: %s\", err)\n\t\t// \t\tcontinue\n\t\t// \t}\n\t\t// \tchainClient = chain.NewNeutrinoClient(ActiveNet.Params, chainService)\n\t\t// \terr = chainClient.Start()\n\t\t// \tif err != nil {\n\t\t// \t\tlog<-cl.Errorf{\"couldn't start Neutrino client: %s\", err)\n\t\t// \t}\n\t\t// } else {\n\t\tchainClient, err = startChainRPC(certs)\n\t\tif err != nil {\n\t\t\tlog <- cl.Error{\n\t\t\t\t\"unable to open connection to consensus RPC server:\", err}\n\t\t\tcontinue\n\t\t}\n\t\t// }\n\t\t// Rather than inlining this logic directly into the loader\n\t\t// callback, a function variable is used to avoid running any of\n\t\t// this after the client disconnects by setting it to nil. This\n\t\t// prevents the callback from associating a wallet loaded at a\n\t\t// later time with a client that has already disconnected. A\n\t\t// mutex is used to make this concurrent safe.\n\t\tassociateRPCClient := func(w *wallet.Wallet) {\n\t\t\tw.SynchronizeRPC(chainClient)\n\t\t\tif legacyRPCServer != nil {\n\t\t\t\tlegacyRPCServer.SetChainServer(chainClient)\n\t\t\t}\n\t\t}\n\t\tmu := new(sync.Mutex)\n\t\tloader.RunAfterLoad(func(w *wallet.Wallet) {\n\t\t\tmu.Lock()\n\t\t\tassociate := associateRPCClient\n\t\t\tmu.Unlock()\n\t\t\tif associate != nil {\n\t\t\t\tassociate(w)\n\t\t\t}\n\t\t})\n\t\tchainClient.WaitForShutdown()\n\t\tmu.Lock()\n\t\tassociateRPCClient = nil\n\t\tmu.Unlock()\n\t\tloadedWallet, ok := loader.LoadedWallet()\n\t\tif ok {\n\t\t\t// Do not attempt a reconnect when the wallet was explicitly stopped.\n\t\t\tif loadedWallet.ShuttingDown() {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tloadedWallet.SetChainSynced(false)\n\t\t\t// TODO: Rework the wallet so changing the RPC client does not require stopping and restarting everything.\n\t\t\tloadedWallet.Stop()\n\t\t\tloadedWallet.WaitForShutdown()\n\t\t\tloadedWallet.Start()\n\t\t}\n\t}\n}", "func (page ClassicAdministratorListResultPage) Response() ClassicAdministratorListResult {\n\treturn page.calr\n}" ]
[ "0.7576879", "0.69949114", "0.6187827", "0.6112665", "0.50135446", "0.4806385", "0.46168134", "0.45960653", "0.45363542", "0.45080954", "0.44634172", "0.43717235", "0.43654612", "0.43571216", "0.43244952", "0.42791805", "0.42694405", "0.42490053", "0.42486668", "0.42294818", "0.42274", "0.42109695", "0.41909933", "0.4177224", "0.41693124", "0.4157282", "0.41570595", "0.4117975", "0.41035843", "0.407697", "0.40736964", "0.407198", "0.40673882", "0.40622452", "0.40595293", "0.40487742", "0.40470037", "0.40435567", "0.40413743", "0.4038432", "0.4038247", "0.40179437", "0.40154907", "0.4000815", "0.39980868", "0.3993271", "0.39910424", "0.39589828", "0.39484444", "0.3948031", "0.39471754", "0.39376906", "0.39320233", "0.39315188", "0.39271036", "0.3926596", "0.3924712", "0.39222416", "0.39207417", "0.39177388", "0.39159063", "0.39099625", "0.3903053", "0.39028952", "0.3902083", "0.38951066", "0.3892034", "0.38877767", "0.3884049", "0.38840312", "0.388268", "0.38813812", "0.38783517", "0.38774437", "0.38764343", "0.38753268", "0.38722858", "0.38698256", "0.38674212", "0.38647595", "0.3863468", "0.3861367", "0.38605446", "0.38589427", "0.3856989", "0.3853618", "0.38487917", "0.38395724", "0.38372132", "0.38353813", "0.38347176", "0.38331613", "0.38280746", "0.38254198", "0.38200262", "0.38177848", "0.38160768", "0.3815792", "0.38139397", "0.38110504" ]
0.7616816
0
GetNerCustomizedSeaEcomWithChan invokes the alinlp.GetNerCustomizedSeaEcom API asynchronously
func (client *Client) GetNerCustomizedSeaEcomWithChan(request *GetNerCustomizedSeaEcomRequest) (<-chan *GetNerCustomizedSeaEcomResponse, <-chan error) { responseChan := make(chan *GetNerCustomizedSeaEcomResponse, 1) errChan := make(chan error, 1) err := client.AddAsyncTask(func() { defer close(responseChan) defer close(errChan) response, err := client.GetNerCustomizedSeaEcom(request) if err != nil { errChan <- err } else { responseChan <- response } }) if err != nil { errChan <- err close(responseChan) close(errChan) } return responseChan, errChan }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (client *Client) GetNerCustomizedSeaEcomWithCallback(request *GetNerCustomizedSeaEcomRequest, callback func(response *GetNerCustomizedSeaEcomResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *GetNerCustomizedSeaEcomResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.GetNerCustomizedSeaEcom(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func (client *Client) GetNerCustomizedSeaEcom(request *GetNerCustomizedSeaEcomRequest) (response *GetNerCustomizedSeaEcomResponse, err error) {\n\tresponse = CreateGetNerCustomizedSeaEcomResponse()\n\terr = client.DoAction(request, response)\n\treturn\n}", "func (client *Client) GetWsCustomizedChEcomContentWithChan(request *GetWsCustomizedChEcomContentRequest) (<-chan *GetWsCustomizedChEcomContentResponse, <-chan error) {\n\tresponseChan := make(chan *GetWsCustomizedChEcomContentResponse, 1)\n\terrChan := make(chan error, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tdefer close(responseChan)\n\t\tdefer close(errChan)\n\t\tresponse, err := client.GetWsCustomizedChEcomContent(request)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t} else {\n\t\t\tresponseChan <- response\n\t\t}\n\t})\n\tif err != nil {\n\t\terrChan <- err\n\t\tclose(responseChan)\n\t\tclose(errChan)\n\t}\n\treturn responseChan, errChan\n}", "func (client *Client) GetWsCustomizedChEcomContentWithCallback(request *GetWsCustomizedChEcomContentRequest, callback func(response *GetWsCustomizedChEcomContentResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *GetWsCustomizedChEcomContentResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.GetWsCustomizedChEcomContent(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func (client *Client) OemSitingSelctionWithChan(request *OemSitingSelctionRequest) (<-chan *OemSitingSelctionResponse, <-chan error) {\n\tresponseChan := make(chan *OemSitingSelctionResponse, 1)\n\terrChan := make(chan error, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tdefer close(responseChan)\n\t\tdefer close(errChan)\n\t\tresponse, err := client.OemSitingSelction(request)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t} else {\n\t\t\tresponseChan <- response\n\t\t}\n\t})\n\tif err != nil {\n\t\terrChan <- err\n\t\tclose(responseChan)\n\t\tclose(errChan)\n\t}\n\treturn responseChan, errChan\n}", "func (client *Client) GetWsCustomizedChO2OWithChan(request *GetWsCustomizedChO2ORequest) (<-chan *GetWsCustomizedChO2OResponse, <-chan error) {\n\tresponseChan := make(chan *GetWsCustomizedChO2OResponse, 1)\n\terrChan := make(chan error, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tdefer close(responseChan)\n\t\tdefer close(errChan)\n\t\tresponse, err := client.GetWsCustomizedChO2O(request)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t} else {\n\t\t\tresponseChan <- response\n\t\t}\n\t})\n\tif err != nil {\n\t\terrChan <- err\n\t\tclose(responseChan)\n\t\tclose(errChan)\n\t}\n\treturn responseChan, errChan\n}", "func CreateGetNerCustomizedSeaEcomResponse() (response *GetNerCustomizedSeaEcomResponse) {\n\tresponse = &GetNerCustomizedSeaEcomResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func CreateGetNerCustomizedSeaEcomRequest() (request *GetNerCustomizedSeaEcomRequest) {\n\trequest = &GetNerCustomizedSeaEcomRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"alinlp\", \"2020-06-29\", \"GetNerCustomizedSeaEcom\", \"alinlp\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (client *Client) CoreEngineWithChan(request *CoreEngineRequest) (<-chan *CoreEngineResponse, <-chan error) {\n\tresponseChan := make(chan *CoreEngineResponse, 1)\n\terrChan := make(chan error, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tdefer close(responseChan)\n\t\tdefer close(errChan)\n\t\tresponse, err := client.CoreEngine(request)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t} else {\n\t\t\tresponseChan <- response\n\t\t}\n\t})\n\tif err != nil {\n\t\terrChan <- err\n\t\tclose(responseChan)\n\t\tclose(errChan)\n\t}\n\treturn responseChan, errChan\n}", "func (client *Client) OemSitingSelctionWithCallback(request *OemSitingSelctionRequest, callback func(response *OemSitingSelctionResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *OemSitingSelctionResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.OemSitingSelction(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func (client *Client) GetKeywordChEcomWithChan(request *GetKeywordChEcomRequest) (<-chan *GetKeywordChEcomResponse, <-chan error) {\n\tresponseChan := make(chan *GetKeywordChEcomResponse, 1)\n\terrChan := make(chan error, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tdefer close(responseChan)\n\t\tdefer close(errChan)\n\t\tresponse, err := client.GetKeywordChEcom(request)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t} else {\n\t\t\tresponseChan <- response\n\t\t}\n\t})\n\tif err != nil {\n\t\terrChan <- err\n\t\tclose(responseChan)\n\t\tclose(errChan)\n\t}\n\treturn responseChan, errChan\n}", "func (client *Client) GetKeywordChEcomWithCallback(request *GetKeywordChEcomRequest, callback func(response *GetKeywordChEcomResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *GetKeywordChEcomResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.GetKeywordChEcom(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func ProcessInChannel(wg *sync.WaitGroup, scConfig *common.SCConfiguration) {\n\tdefer wg.Done()\n\tfor { //nolint:gosimple\n\t\tselect {\n\t\tcase d := <-scConfig.EventInCh:\n\t\t\tif d.Type == channel.LISTENER {\n\t\t\t\tlog.Warnf(\"amqp disabled,no action taken: request to create listener address %s was called,but transport is not enabled\", d.Address)\n\t\t\t} else if d.Type == channel.SENDER {\n\t\t\t\tlog.Warnf(\"no action taken: request to create sender for address %s was called,but transport is not enabled\", d.Address)\n\t\t\t} else if d.Type == channel.EVENT && d.Status == channel.NEW {\n\t\t\t\tif e, err := v1event.GetCloudNativeEvents(*d.Data); err != nil {\n\t\t\t\t\tlog.Warnf(\"error marshalling event data\")\n\t\t\t\t} else {\n\t\t\t\t\tlog.Warnf(\"amqp disabled,no action taken(can't send to a desitination): logging new event %s\\n\", e.String())\n\t\t\t\t}\n\t\t\t\tout := channel.DataChan{\n\t\t\t\t\tAddress: d.Address,\n\t\t\t\t\tData: d.Data,\n\t\t\t\t\tStatus: channel.SUCCESS,\n\t\t\t\t\tType: channel.EVENT,\n\t\t\t\t\tProcessEventFn: d.ProcessEventFn,\n\t\t\t\t}\n\t\t\t\tif d.OnReceiveOverrideFn != nil {\n\t\t\t\t\tif err := d.OnReceiveOverrideFn(*d.Data, &out); err != nil {\n\t\t\t\t\t\tlog.Errorf(\"error onReceiveOverrideFn %s\", err)\n\t\t\t\t\t\tout.Status = channel.FAILED\n\t\t\t\t\t} else {\n\t\t\t\t\t\tout.Status = channel.SUCCESS\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tscConfig.EventOutCh <- &out\n\t\t\t} else if d.Type == channel.STATUS && d.Status == channel.NEW {\n\t\t\t\tlog.Warnf(\"amqp disabled,no action taken(can't send to a destination): logging new status check %v\\n\", d)\n\t\t\t\tout := channel.DataChan{\n\t\t\t\t\tAddress: d.Address,\n\t\t\t\t\tData: d.Data,\n\t\t\t\t\tStatus: channel.SUCCESS,\n\t\t\t\t\tType: channel.EVENT,\n\t\t\t\t\tProcessEventFn: d.ProcessEventFn,\n\t\t\t\t}\n\t\t\t\tif d.OnReceiveOverrideFn != nil {\n\t\t\t\t\tif err := d.OnReceiveOverrideFn(*d.Data, &out); err != nil {\n\t\t\t\t\t\tlog.Errorf(\"error onReceiveOverrideFn %s\", err)\n\t\t\t\t\t\tout.Status = channel.FAILED\n\t\t\t\t\t} else {\n\t\t\t\t\t\tout.Status = channel.SUCCESS\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\tcase <-scConfig.CloseCh:\n\t\t\treturn\n\t\t}\n\t}\n}", "func (client *Client) BeginVnDialogueWithChan(request *BeginVnDialogueRequest) (<-chan *BeginVnDialogueResponse, <-chan error) {\n\tresponseChan := make(chan *BeginVnDialogueResponse, 1)\n\terrChan := make(chan error, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tdefer close(responseChan)\n\t\tdefer close(errChan)\n\t\tresponse, err := client.BeginVnDialogue(request)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t} else {\n\t\t\tresponseChan <- response\n\t\t}\n\t})\n\tif err != nil {\n\t\terrChan <- err\n\t\tclose(responseChan)\n\t\tclose(errChan)\n\t}\n\treturn responseChan, errChan\n}", "func (client *Client) GetWsCustomizedChO2OWithCallback(request *GetWsCustomizedChO2ORequest, callback func(response *GetWsCustomizedChO2OResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *GetWsCustomizedChO2OResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.GetWsCustomizedChO2O(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func (client *Client) ModifyClusterServiceConfigForAdminWithChan(request *ModifyClusterServiceConfigForAdminRequest) (<-chan *ModifyClusterServiceConfigForAdminResponse, <-chan error) {\n\tresponseChan := make(chan *ModifyClusterServiceConfigForAdminResponse, 1)\n\terrChan := make(chan error, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tdefer close(responseChan)\n\t\tdefer close(errChan)\n\t\tresponse, err := client.ModifyClusterServiceConfigForAdmin(request)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t} else {\n\t\t\tresponseChan <- response\n\t\t}\n\t})\n\tif err != nil {\n\t\terrChan <- err\n\t\tclose(responseChan)\n\t\tclose(errChan)\n\t}\n\treturn responseChan, errChan\n}", "func (client *Client) GetIndustryCommerceInfoWithChan(request *GetIndustryCommerceInfoRequest) (<-chan *GetIndustryCommerceInfoResponse, <-chan error) {\n\tresponseChan := make(chan *GetIndustryCommerceInfoResponse, 1)\n\terrChan := make(chan error, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tdefer close(responseChan)\n\t\tdefer close(errChan)\n\t\tresponse, err := client.GetIndustryCommerceInfo(request)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t} else {\n\t\t\tresponseChan <- response\n\t\t}\n\t})\n\tif err != nil {\n\t\terrChan <- err\n\t\tclose(responseChan)\n\t\tclose(errChan)\n\t}\n\treturn responseChan, errChan\n}", "func (client *Client) GetEMapWithChan(request *GetEMapRequest) (<-chan *GetEMapResponse, <-chan error) {\n\tresponseChan := make(chan *GetEMapResponse, 1)\n\terrChan := make(chan error, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tdefer close(responseChan)\n\t\tdefer close(errChan)\n\t\tresponse, err := client.GetEMap(request)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t} else {\n\t\t\tresponseChan <- response\n\t\t}\n\t})\n\tif err != nil {\n\t\terrChan <- err\n\t\tclose(responseChan)\n\t\tclose(errChan)\n\t}\n\treturn responseChan, errChan\n}", "func (f *Fetcher) processChan(outChanCommands <-chan Command, routineIndex int) {\n var (\n agent *robotstxt.Group\n ttl <-chan time.Time\n // add some random seconds for each channel\n delay = f.CrawlDelay\n httpClient = f.NewClient(true, true)\n httpClientCommands = 0\n restartClient = false\n )\n\n // tata tata\n logs.Info(\"Channel %d is starting..\", routineIndex)\n if routineIndex < 10 {\n time.Sleep(time.Duration(routineIndex) + 2*time.Second)\n }\n\nloop:\n for {\n select {\n case <-f.queue.cancelled:\n break loop\n case command, ok := <-outChanCommands:\n if !ok {\n logs.Info(\"Channel %d was closed, terminating..\", routineIndex)\n // Terminate this goroutine, channel is closed\n break loop\n }\n\n // was it stop during the wait? check again\n select {\n case <-f.queue.cancelled:\n logs.Info(\"Channel %d was cancelled, terminating..\", routineIndex)\n break loop\n default:\n // go on\n }\n\n restartClient = false\n\n logs.Debug(\"Channel %d received new command %s\", routineIndex, command.Url())\n\n if !f.DisablePoliteness {\n agent = f.getRobotAgent(command.Url())\n // Initialize the crawl delay\n if agent != nil && agent.CrawlDelay > 0 {\n delay = agent.CrawlDelay\n }\n }\n\n if command.HttpClient() == nil {\n command.SetHttpClient(httpClient)\n }\n\n if f.DisablePoliteness || agent == nil || agent.Test(command.Url().Path) {\n\n // path allowed, process the request\n res, err, isCached := f.Request(command, delay)\n\n if !isCached {\n httpClientCommands++\n\n var statusCode int\n if res != nil {\n statusCode = res.StatusCode\n }\n logs.Info(\"[%d][%d] %s %s\", routineIndex, statusCode, command.Method(), command.Url())\n }\n\n restartClient = f.visit(command, res, err, isCached)\n\n } else {\n // path disallowed by robots.txt\n f.visit(command, nil, ErrDisallowed, false)\n }\n\n f.snapshot.removeCommandInQueue(f.uniqueId(command.Url(), command.Method()))\n\n // Every time a command is received, reset the ttl channel\n ttl = time.After(f.WorkerIdleTTL)\n\n if restartClient || httpClientCommands > f.MaxCommandsPerClient {\n if f.LogLevel < logs.LevelNotice {\n fmt.Print(\"👅\")\n }\n logs.Info(\"Channel %d needs restart after %d commands..\", routineIndex, httpClientCommands)\n go f.processChan(outChanCommands, routineIndex)\n return\n }\n\n case <-ttl:\n if f.snapshot.queueLength() != 0 {\n logs.Debug(\"Channel %d was trying to timeout while queue length is %d\", routineIndex, f.snapshot.queueLength())\n ttl = time.After(f.WorkerIdleTTL)\n continue\n }\n logs.Alert(\"Channel %d with %d unique urls\", routineIndex, f.snapshot.uniqueUrlsLength())\n go f.Shutdown()\n break loop\n }\n }\n for range outChanCommands {\n }\n\n f.workersWaitGroup.Done()\n}", "func LeninaCrowne(wg *sync.WaitGroup, updateSite bool, knownScenes []string, out chan<- models.ScrapedScene) error {\n\treturn SexLikeReal(wg, updateSite, knownScenes, out, \"leninacrowne\", \"LeninaCrowne\", \"Terrible\")\n}", "func (dv DomVisit) SeedURLChanWork(promiscuous bool, srcURLChan, seedURLChan chan URL) {\n\ts := Spinner{}\n\tcnt := 0\n\tfor itm := range seedURLChan {\n\t\tif false {\n\t\t\tfmt.Println(\"SeedURL:\", itm)\n\t\t}\n\t\tif itm.Initialise() {\n\t\t\tlog.Fatal(\"URL needed initialising in nd\", itm)\n\t\t}\n\t\tdomainI := itm.Base()\n\t\tif domainI != \"\" {\n\t\t\t// Mark this as a domain we can Fetch from\n\t\t\t_ = dv.VisitedA(domainI)\n\t\t\t// send this URL for grabbing\n\t\t\tif promiscuous {\n\t\t\t\titm.SetPromiscuous()\n\t\t\t}\n\t\t\titm.SetShallow()\n\t\t\ts.PrintSpin(cnt)\n\t\t\tsrcURLChan <- itm\n\t\t\tcnt++\n\t\t\t//fmt.Println(itm, \"Sent\")\n\t\t} else {\n\t\t\tlog.Fatalln(\"ERROR: a URL in the seed file we can't get domain of:\", itm)\n\t\t}\n\t}\n\tfmt.Println(\"seed_url_chan seen closed\")\n\tclose(srcURLChan)\n}", "func (client *Client) QueryPublicModelEngineWithChan(request *QueryPublicModelEngineRequest) (<-chan *QueryPublicModelEngineResponse, <-chan error) {\n\tresponseChan := make(chan *QueryPublicModelEngineResponse, 1)\n\terrChan := make(chan error, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tdefer close(responseChan)\n\t\tdefer close(errChan)\n\t\tresponse, err := client.QueryPublicModelEngine(request)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t} else {\n\t\t\tresponseChan <- response\n\t\t}\n\t})\n\tif err != nil {\n\t\terrChan <- err\n\t\tclose(responseChan)\n\t\tclose(errChan)\n\t}\n\treturn responseChan, errChan\n}", "func (client *Client) DescribeUserVvTopByDayWithChan(request *DescribeUserVvTopByDayRequest) (<-chan *DescribeUserVvTopByDayResponse, <-chan error) {\n\tresponseChan := make(chan *DescribeUserVvTopByDayResponse, 1)\n\terrChan := make(chan error, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tdefer close(responseChan)\n\t\tdefer close(errChan)\n\t\tresponse, err := client.DescribeUserVvTopByDay(request)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t} else {\n\t\t\tresponseChan <- response\n\t\t}\n\t})\n\tif err != nil {\n\t\terrChan <- err\n\t\tclose(responseChan)\n\t\tclose(errChan)\n\t}\n\treturn responseChan, errChan\n}", "func (client *Client) ModifyVnRepeatingConfigWithChan(request *ModifyVnRepeatingConfigRequest) (<-chan *ModifyVnRepeatingConfigResponse, <-chan error) {\n\tresponseChan := make(chan *ModifyVnRepeatingConfigResponse, 1)\n\terrChan := make(chan error, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tdefer close(responseChan)\n\t\tdefer close(errChan)\n\t\tresponse, err := client.ModifyVnRepeatingConfig(request)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t} else {\n\t\t\tresponseChan <- response\n\t\t}\n\t})\n\tif err != nil {\n\t\terrChan <- err\n\t\tclose(responseChan)\n\t\tclose(errChan)\n\t}\n\treturn responseChan, errChan\n}", "func ListenChannel(task ...Task) {\n\t// To trigger channel at first time\n\tSendToChannel(\"\", t4)\n\n\tfor {\n\t\tselect {\n\t\tcase msgChan := <-Ch:\n\t\t\tif len(task) > 0 {\n\t\t\t\tfor _, t := range task {\n\t\t\t\t\tif msg, ok := msgChan[t.Code]; ok {\n\t\t\t\t\t\tt.Job(msg)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tvar msg interface{}\n\t\t\t\tfor _, msgByte := range msgChan {\n\t\t\t\t\terr := json.Unmarshal(msgByte, &msg)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tfmt.Println(err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func excuse(srvChan chan string, channel, nick, hostname string, args []string) {\n\tmessage := \"NOTICE \" + channel + \" :\"\n\tres, err := http.Get(\"http://programmingexcuses.com/\")\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\treturn\n\t}\n\tbody, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\treturn\n\t}\n\tres.Body.Close()\n\tlinkRegexp := regexp.MustCompile(`<a href=\"/\" rel=\"nofollow\" .*?>(.*?)</a>`)\n\tif match := linkRegexp.FindStringSubmatch(string(body)); match != nil {\n\t\tmessage += match[1]\n\t} else {\n\t\tlog.Println(\"ERROR: No match\")\n\t\treturn\n\t}\n\tsrvChan <- message\n\tlog.Println(message)\n}", "func (client *Client) QueryCustomerAddressListWithChan(request *QueryCustomerAddressListRequest) (<-chan *QueryCustomerAddressListResponse, <-chan error) {\nresponseChan := make(chan *QueryCustomerAddressListResponse, 1)\nerrChan := make(chan error, 1)\nerr := client.AddAsyncTask(func() {\ndefer close(responseChan)\ndefer close(errChan)\nresponse, err := client.QueryCustomerAddressList(request)\nif err != nil {\nerrChan <- err\n} else {\nresponseChan <- response\n}\n})\nif err != nil {\nerrChan <- err\nclose(responseChan)\nclose(errChan)\n}\nreturn responseChan, errChan\n}", "func (client *Client) GetOfficePreviewURLWithChan(request *GetOfficePreviewURLRequest) (<-chan *GetOfficePreviewURLResponse, <-chan error) {\n\tresponseChan := make(chan *GetOfficePreviewURLResponse, 1)\n\terrChan := make(chan error, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tdefer close(responseChan)\n\t\tdefer close(errChan)\n\t\tresponse, err := client.GetOfficePreviewURL(request)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t} else {\n\t\t\tresponseChan <- response\n\t\t}\n\t})\n\tif err != nil {\n\t\terrChan <- err\n\t\tclose(responseChan)\n\t\tclose(errChan)\n\t}\n\treturn responseChan, errChan\n}", "func (client *Client) CoreEngineWithCallback(request *CoreEngineRequest, callback func(response *CoreEngineResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *CoreEngineResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.CoreEngine(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func ProcessOutChannel(wg *sync.WaitGroup, scConfig *common.SCConfiguration) {\n\t//qdr throws out the data on this channel ,listen to data coming out of qdrEventOutCh\n\t//Send back the acknowledgement to publisher\n\tpostProcessFn := func(address string, status channel.Status) {\n\t\tif pub, ok := scConfig.PubSubAPI.HasPublisher(address); ok {\n\t\t\tif status == channel.SUCCESS {\n\t\t\t\tlocalmetrics.UpdateEventAckCount(address, localmetrics.SUCCESS)\n\t\t\t} else {\n\t\t\t\tlocalmetrics.UpdateEventAckCount(address, localmetrics.FAILED)\n\t\t\t}\n\t\t\tif pub.EndPointURI != nil {\n\t\t\t\tlog.Debugf(\"posting event status %s to publisher %s\", channel.Status(status), pub.Resource)\n\t\t\t\trestClient := restclient.New()\n\t\t\t\t_ = restClient.Post(pub.EndPointURI,\n\t\t\t\t\t[]byte(fmt.Sprintf(`{eventId:\"%s\",status:\"%s\"}`, pub.ID, status)))\n\t\t\t}\n\t\t} else {\n\t\t\tlog.Warnf(\"could not send ack to publisher ,`publisher` for address %s not found\", address)\n\t\t\tlocalmetrics.UpdateEventAckCount(address, localmetrics.FAILED)\n\t\t}\n\t}\n\tpostHandler := func(err error, endPointURI *types.URI, address string) {\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"error posting request at %s : %s\", endPointURI, err)\n\t\t\tlocalmetrics.UpdateEventReceivedCount(address, localmetrics.FAILED)\n\t\t} else {\n\t\t\tlocalmetrics.UpdateEventReceivedCount(address, localmetrics.SUCCESS)\n\t\t}\n\t}\n\n\tfor { //nolint:gosimple\n\t\tselect { //nolint:gosimple\n\t\tcase d := <-scConfig.EventOutCh: // do something that is put out by QDR\n\t\t\tswitch d.Data.Type() {\n\t\t\tcase channel.HWEvent:\n\t\t\t\tevent, err := v1hwevent.GetCloudNativeEvents(*d.Data)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Errorf(\"error marshalling event data when reading from amqp %v\\n %#v\", err, d)\n\t\t\t\t\tlog.Infof(\"data %#v\", d.Data)\n\t\t\t\t} else if d.Type == channel.EVENT {\n\t\t\t\t\tif d.Status == channel.NEW {\n\t\t\t\t\t\tif d.ProcessEventFn != nil { // always leave event to handle by default method for events\n\t\t\t\t\t\t\tif err := d.ProcessEventFn(event); err != nil {\n\t\t\t\t\t\t\t\tlog.Errorf(\"error processing data %v\", err)\n\t\t\t\t\t\t\t\tlocalmetrics.UpdateEventReceivedCount(d.Address, localmetrics.FAILED)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t} else if sub, ok := scConfig.PubSubAPI.HasSubscription(d.Address); ok {\n\t\t\t\t\t\t\tif sub.EndPointURI != nil {\n\t\t\t\t\t\t\t\trestClient := restclient.New()\n\t\t\t\t\t\t\t\tevent.ID = sub.ID // set ID to the subscriptionID\n\t\t\t\t\t\t\t\terr := restClient.PostHwEvent(sub.EndPointURI, event)\n\t\t\t\t\t\t\t\tpostHandler(err, sub.EndPointURI, d.Address)\n\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\tlog.Warnf(\"endpoint uri not given, posting event to log %#v for address %s\\n\", event, d.Address)\n\t\t\t\t\t\t\t\tlocalmetrics.UpdateEventReceivedCount(d.Address, localmetrics.SUCCESS)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tlog.Warnf(\"subscription not found, posting event %#v to log for address %s\\n\", event, d.Address)\n\t\t\t\t\t\t\tlocalmetrics.UpdateEventReceivedCount(d.Address, localmetrics.FAILED)\n\t\t\t\t\t\t}\n\t\t\t\t\t} else if d.Status == channel.SUCCESS || d.Status == channel.FAILED { // event sent ,ack back to publisher\n\t\t\t\t\t\tpostProcessFn(d.Address, d.Status)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\tevent, err := v1event.GetCloudNativeEvents(*d.Data)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Errorf(\"error marshalling event data when reading from amqp %v\\n %#v\", err, d)\n\t\t\t\t\tlog.Infof(\"data %#v\", d.Data)\n\t\t\t\t} else if d.Type == channel.EVENT {\n\t\t\t\t\tif d.Status == channel.NEW {\n\t\t\t\t\t\tif d.ProcessEventFn != nil { // always leave event to handle by default method for events\n\t\t\t\t\t\t\tif err := d.ProcessEventFn(event); err != nil {\n\t\t\t\t\t\t\t\tlog.Errorf(\"error processing data %v\", err)\n\t\t\t\t\t\t\t\tlocalmetrics.UpdateEventReceivedCount(d.Address, localmetrics.FAILED)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t} else if sub, ok := scConfig.PubSubAPI.HasSubscription(d.Address); ok {\n\t\t\t\t\t\t\tif sub.EndPointURI != nil {\n\t\t\t\t\t\t\t\trestClient := restclient.New()\n\t\t\t\t\t\t\t\tevent.ID = sub.ID // set ID to the subscriptionID\n\t\t\t\t\t\t\t\terr := restClient.PostEvent(sub.EndPointURI, event)\n\t\t\t\t\t\t\t\tpostHandler(err, sub.EndPointURI, d.Address)\n\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\tlog.Warnf(\"endpoint uri not given, posting event to log %#v for address %s\\n\", event, d.Address)\n\t\t\t\t\t\t\t\tlocalmetrics.UpdateEventReceivedCount(d.Address, localmetrics.SUCCESS)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tlog.Warnf(\"subscription not found, posting event %#v to log for address %s\\n\", event, d.Address)\n\t\t\t\t\t\t\tlocalmetrics.UpdateEventReceivedCount(d.Address, localmetrics.FAILED)\n\t\t\t\t\t\t}\n\t\t\t\t\t} else if d.Status == channel.SUCCESS || d.Status == channel.FAILED { // event sent ,ack back to publisher\n\t\t\t\t\t\tpostProcessFn(d.Address, d.Status)\n\t\t\t\t\t}\n\t\t\t\t} else if d.Type == channel.STATUS {\n\t\t\t\t\tif d.Status == channel.SUCCESS {\n\t\t\t\t\t\tlocalmetrics.UpdateStatusAckCount(d.Address, localmetrics.SUCCESS)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tlog.Errorf(\"failed to receive status request to address %s\", d.Address)\n\t\t\t\t\t\tlocalmetrics.UpdateStatusAckCount(d.Address, localmetrics.FAILED)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} // end switch\n\t\tcase <-scConfig.CloseCh:\n\t\t\treturn\n\t\t}\n\t}\n}", "func (client *Client) ListCityMapAoisWithChan(request *ListCityMapAoisRequest) (<-chan *ListCityMapAoisResponse, <-chan error) {\n\tresponseChan := make(chan *ListCityMapAoisResponse, 1)\n\terrChan := make(chan error, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tdefer close(responseChan)\n\t\tdefer close(errChan)\n\t\tresponse, err := client.ListCityMapAois(request)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t} else {\n\t\t\tresponseChan <- response\n\t\t}\n\t})\n\tif err != nil {\n\t\terrChan <- err\n\t\tclose(responseChan)\n\t\tclose(errChan)\n\t}\n\treturn responseChan, errChan\n}", "func getProductsSameEAN(c chan []ZunkaSiteProductRx, ean string) {\n\tproducts := []ZunkaSiteProductRx{}\n\n\t// Request product add.\n\tclient := &http.Client{}\n\t// title = \"GABINETE COOLER MASTER MASTERBOX LITE 3.1 TG LATERAL EM VIDRO TEMPERADO ATX/E-ATX/MINI-ITX/MICRO-AT\"\n\treq, err := http.NewRequest(\"GET\", zunkaSiteHost()+\"/setup/products-same-ean\", nil)\n\tif err != nil {\n\t\tError.Print(err)\n\t\tc <- products\n\t\treturn\n\t}\n\t// Query params\n\tq := req.URL.Query()\n\tq.Add(\"ean\", ean)\n\treq.URL.RawQuery = q.Encode()\n\t// Head.\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\treq.SetBasicAuth(zunkaSiteUser(), zunkaSitePass())\n\tres, err := client.Do(req)\n\tif err != nil {\n\t\tError.Print(err)\n\t\tc <- products\n\t\treturn\n\t}\n\t// res, err := http.Post(\"http://localhost:3080/setup/product/add\", \"application/json\", bytes.NewBuffer(reqBody))\n\tdefer res.Body.Close()\n\n\t// Result.\n\tresBody, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\tError.Print(err)\n\t\tc <- products\n\t\treturn\n\t}\n\t// No 200 status.\n\tif res.StatusCode != 200 {\n\t\tError.Print(errors.New(fmt.Sprintf(\"Getting products same Ean from zunkasite.\\nstatus: %v\\nbody: %v\", res.StatusCode, string(resBody))))\n\t\tc <- products\n\t\treturn\n\t}\n\terr = json.Unmarshal(resBody, &products)\n\tif err != nil {\n\t\tError.Print(err)\n\t}\n\t// Debug.Printf(\"Product[0]: %v\", products[0])\n\tc <- products\n\treturn\n}", "func (w *FabricSDKWrapper) AsyncInvoke(channelID string, userName string, orgName string, chaincodeID string, ccFunctionName string, args []string) (channel.Response, error) {\n\n\t// TODO implement callbackURL and remaining todos for normal invoke\n\n\t// Create channel client\n\tchannelClient, err := w.createChannelClient(channelID, userName, orgName)\n\n\t// Create invoke request\n\trequest := channel.Request{\n\t\tChaincodeID: chaincodeID,\n\t\tFcn: ccFunctionName,\n\t\tArgs: utils.AsBytes(args),\n\t}\n\n\t// Create a request (proposal) and send it\n\tresponse, err := channelClient.Execute(request)\n\tif err != nil {\n\t\treturn response, invokeerror.Errorf(invokeerror.TransientError, \"SendTransactionProposal return error: %v\", err)\n\t}\n\n\treturn response, nil\n}", "func main() {\n //\n //\n fmt.Printf(\"\\n-- main has been started --\\n\")\n //\n //\n //go func() {\n // fmt.Println(http.ListenAndServe(\"0.0.0.0:6060\", nil))\n //}()\n path1 := \"/tmp/test\"\n //path2 := \"/etc/group\"\n //path3 := \"/proc/net\"\n messages := make(chan majesta.CompNotes, 100)\n fmt.Printf(\"\\n:: creating worker-pool::\\n\")\n wp := chase.WPCreate()\n fmt.Printf(\"\\n:: worker pool has been created::\\n\")\n //\n fmt.Printf(\"\\n::start chasing ..\\n\")\n _ = chase.Listen(path1, messages, wp)\n fmt.Printf(\"\\n::chase has been started ..\\n\")\n //\n //_ = chase.Listen(path2, messages, wp)\n //_ = chase.Listen(path3, messages, wp)\n //go func(){\n // time.Sleep( 10000 * time.Millisecond)\n // wp.RemoveTarget(\"/tmp/test/test2/toremove.txt\")\n //}()\n evebridge.Handle(messages)\n //\n}", "func (client *Client) GetWsCustomizedChEcomContent(request *GetWsCustomizedChEcomContentRequest) (response *GetWsCustomizedChEcomContentResponse, err error) {\n\tresponse = CreateGetWsCustomizedChEcomContentResponse()\n\terr = client.DoAction(request, response)\n\treturn\n}", "func (c *InitRainbondCluster) GetChan() chan apiv1.Message {\n\treturn c.result\n}", "func (n *OpenBazaarNode) GetModeratorsAsync(ctx context.Context) <-chan peer.ID {\n\tch := make(chan peer.ID)\n\n\tgo func() {\n\t\tc, err := cid.Decode(moderatorCid)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Error decoding moderator cid: %s\", err)\n\t\t\tclose(ch)\n\t\t\treturn\n\t\t}\n\t\tprovCh := n.ipfsNode.Routing.FindProvidersAsync(ctx, c, maxModerators)\n\n\t\tfor prov := range provCh {\n\t\t\tch <- prov.ID\n\t\t}\n\t\tclose(ch)\n\t}()\n\n\treturn ch\n}", "func runNaiveGoRoutine(gr g.Graph, poolSize int, debug int, c chan g.Graph) {\n\tc <- RunNaive(gr, poolSize, debug)\n}", "func web(srvChan chan string, channel, nick, hostname string, args []string) {\n\tmessage := \"NOTICE \" + channel + \" :https://anex.us/\"\n\tlog.Println(message)\n\tsrvChan <- message\n}", "func finder(mineChan <-chan string) <-chan string {\n\tfoundOreChan := make(chan string)\n\tgo func() {\n\t\tfor item := range mineChan {\n\t\t\tif item == \"ore\" {\n\t\t\t\tfoundOreChan <- item\n\t\t\t\tfmt.Println(\"Finder found ore\")\n\t\t\t}\n\t\t}\n\t\tclose(foundOreChan)\n\t}()\n\treturn foundOreChan\n}", "func (client *Client) GetOcJusticeTerminalCaseWithChan(request *GetOcJusticeTerminalCaseRequest) (<-chan *GetOcJusticeTerminalCaseResponse, <-chan error) {\n\tresponseChan := make(chan *GetOcJusticeTerminalCaseResponse, 1)\n\terrChan := make(chan error, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tdefer close(responseChan)\n\t\tdefer close(errChan)\n\t\tresponse, err := client.GetOcJusticeTerminalCase(request)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t} else {\n\t\t\tresponseChan <- response\n\t\t}\n\t})\n\tif err != nil {\n\t\terrChan <- err\n\t\tclose(responseChan)\n\t\tclose(errChan)\n\t}\n\treturn responseChan, errChan\n}", "func (client *Client) GetOpenNLUWithChan(request *GetOpenNLURequest) (<-chan *GetOpenNLUResponse, <-chan error) {\n\tresponseChan := make(chan *GetOpenNLUResponse, 1)\n\terrChan := make(chan error, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tdefer close(responseChan)\n\t\tdefer close(errChan)\n\t\tresponse, err := client.GetOpenNLU(request)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t} else {\n\t\t\tresponseChan <- response\n\t\t}\n\t})\n\tif err != nil {\n\t\terrChan <- err\n\t\tclose(responseChan)\n\t\tclose(errChan)\n\t}\n\treturn responseChan, errChan\n}", "func (c *consulClient) Nodes(ctx context.Context, query *Query, resultChan chan *NodeInfo, errorChan chan error) {\n\t//todo optimize: reduce the no of calls\n\t//todo health checks passing\n\t//resultChan := make(chan NodeInfo)\n\t//errorChan := make(chan error)\n\tfor _, dc := range query.QString.Datacenters {\n\t\tqo := query.QueryOptions.WithContext(ctx) // returns a new obj with ctx\n\t\tqo.Datacenter = dc\n\t\tqo.Namespace = query.QString.Namespace\n\t\tfor _, tag := range query.QString.Tags {\n\t\t\tgo func(datacenter string, serviceName string, tag string, qo *api.QueryOptions) {\n\t\t\t\tfmt.Println(\"Go routine started\", serviceName, tag, datacenter)\n\t\t\t\tdefer fmt.Println(\"Go routine exited\", serviceName, tag, datacenter)\n\t\t\t\tres, _, err := c.api.Service(query.QString.ServiceName, tag, true, qo)\n\t\t\t\tif err != nil {\n\t\t\t\t\terrorChan <- err\n\t\t\t\t}\n\t\t\t\tfor _, r := range res {\n\t\t\t\t\tnodeInfo := NodeInfo{\n\t\t\t\t\t\tServiceName: r.Service.Service,\n\t\t\t\t\t\tDataCenter: r.Node.Datacenter,\n\t\t\t\t\t\tAddress: r.Node.Address,\n\t\t\t\t\t\tServicePort: r.Service.Port,\n\t\t\t\t\t\t//ServiceTags: r.Node.TaggedAddresses,\n\t\t\t\t\t\tModifyIndex: r.Node.ModifyIndex,\n\t\t\t\t\t}\n\t\t\t\t\tresultChan <- &nodeInfo\n\t\t\t\t}\n\t\t\t}(dc, query.QString.ServiceName, tag, qo)\n\t\t}\n\t}\n}", "func (client *Client) ReleaseAnycastEipAddressWithChan(request *ReleaseAnycastEipAddressRequest) (<-chan *ReleaseAnycastEipAddressResponse, <-chan error) {\n\tresponseChan := make(chan *ReleaseAnycastEipAddressResponse, 1)\n\terrChan := make(chan error, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tdefer close(responseChan)\n\t\tdefer close(errChan)\n\t\tresponse, err := client.ReleaseAnycastEipAddress(request)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t} else {\n\t\t\tresponseChan <- response\n\t\t}\n\t})\n\tif err != nil {\n\t\terrChan <- err\n\t\tclose(responseChan)\n\t\tclose(errChan)\n\t}\n\treturn responseChan, errChan\n}", "func (client *Client) ConvertInvoiceWithChan(request *ConvertInvoiceRequest) (<-chan *ConvertInvoiceResponse, <-chan error) {\n\tresponseChan := make(chan *ConvertInvoiceResponse, 1)\n\terrChan := make(chan error, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tdefer close(responseChan)\n\t\tdefer close(errChan)\n\t\tresponse, err := client.ConvertInvoice(request)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t} else {\n\t\t\tresponseChan <- response\n\t\t}\n\t})\n\tif err != nil {\n\t\terrChan <- err\n\t\tclose(responseChan)\n\t\tclose(errChan)\n\t}\n\treturn responseChan, errChan\n}", "func (client *Client) BeginVnDialogueWithCallback(request *BeginVnDialogueRequest, callback func(response *BeginVnDialogueResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *BeginVnDialogueResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.BeginVnDialogue(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func consume(newschannel chan *InfoChanel) {\n\tfor {\n\t\tval, ok := <-newschannel\n\t\tif !ok {\n\t\t\treturn\n\t\t}\n\t\trenderToSTDOUT(val)\n\t\ttime.Sleep(1)\n\t}\n}", "func (client *Client) DescribeClusterServiceConfigForAdminWithChan(request *DescribeClusterServiceConfigForAdminRequest) (<-chan *DescribeClusterServiceConfigForAdminResponse, <-chan error) {\n\tresponseChan := make(chan *DescribeClusterServiceConfigForAdminResponse, 1)\n\terrChan := make(chan error, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tdefer close(responseChan)\n\t\tdefer close(errChan)\n\t\tresponse, err := client.DescribeClusterServiceConfigForAdmin(request)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t} else {\n\t\t\tresponseChan <- response\n\t\t}\n\t})\n\tif err != nil {\n\t\terrChan <- err\n\t\tclose(responseChan)\n\t\tclose(errChan)\n\t}\n\treturn responseChan, errChan\n}", "func (f *frontier) Eligible(ctx context.Context) <-chan string {\n\turi := make(chan string)\n\tgo func() {\n\t\tdefer close(uri)\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t}\n\t\t\tif len(f.nbs) == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\turi <- f.nbs[0].uri.String()\n\t\t}\n\t}()\n\treturn uri\n}", "func (m *Monitor) RunAsync() chan error {\n\tklog.V(5).Info(\"starting leader elect bit\")\n\tgo func() {\n\t\tdefer close(m.c)\n\t\tif err := m.runLeaderElect(); err != nil {\n\t\t\t// Return the error to the calling thread\n\t\t\tm.c <- err\n\t\t}\n\t}()\n\tklog.V(5).Info(\"starting leader elect bit started\")\n\treturn m.c\n}", "func (client *Client) CreateBoxCodeWithChan(request *CreateBoxCodeRequest) (<-chan *CreateBoxCodeResponse, <-chan error) {\n\tresponseChan := make(chan *CreateBoxCodeResponse, 1)\n\terrChan := make(chan error, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tdefer close(responseChan)\n\t\tdefer close(errChan)\n\t\tresponse, err := client.CreateBoxCode(request)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t} else {\n\t\t\tresponseChan <- response\n\t\t}\n\t})\n\tif err != nil {\n\t\terrChan <- err\n\t\tclose(responseChan)\n\t\tclose(errChan)\n\t}\n\treturn responseChan, errChan\n}", "func (client *Client) VerifyCenWithChan(request *VerifyCenRequest) (<-chan *VerifyCenResponse, <-chan error) {\n\tresponseChan := make(chan *VerifyCenResponse, 1)\n\terrChan := make(chan error, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tdefer close(responseChan)\n\t\tdefer close(errChan)\n\t\tresponse, err := client.VerifyCen(request)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t} else {\n\t\t\tresponseChan <- response\n\t\t}\n\t})\n\tif err != nil {\n\t\terrChan <- err\n\t\tclose(responseChan)\n\t\tclose(errChan)\n\t}\n\treturn responseChan, errChan\n}", "func (z *ZkPlus) EventChan() <-chan zk.Event {\n\treturn z.exposedChan\n}", "func (p *HostedProgramInfo) WaitChan() <-chan bool {\n\treturn p.Done\n}", "func newCommune(n int) *commune {\n\treturn &commune{ch: make(chan string, n)}\n}", "func (client *Client) CreateCustomCallTaggingWithChan(request *CreateCustomCallTaggingRequest) (<-chan *CreateCustomCallTaggingResponse, <-chan error) {\n\tresponseChan := make(chan *CreateCustomCallTaggingResponse, 1)\n\terrChan := make(chan error, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tdefer close(responseChan)\n\t\tdefer close(errChan)\n\t\tresponse, err := client.CreateCustomCallTagging(request)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t} else {\n\t\t\tresponseChan <- response\n\t\t}\n\t})\n\tif err != nil {\n\t\terrChan <- err\n\t\tclose(responseChan)\n\t\tclose(errChan)\n\t}\n\treturn responseChan, errChan\n}", "func (client *Client) ReleaseEipSegmentAddressWithChan(request *ReleaseEipSegmentAddressRequest) (<-chan *ReleaseEipSegmentAddressResponse, <-chan error) {\n\tresponseChan := make(chan *ReleaseEipSegmentAddressResponse, 1)\n\terrChan := make(chan error, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tdefer close(responseChan)\n\t\tdefer close(errChan)\n\t\tresponse, err := client.ReleaseEipSegmentAddress(request)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t} else {\n\t\t\tresponseChan <- response\n\t\t}\n\t})\n\tif err != nil {\n\t\terrChan <- err\n\t\tclose(responseChan)\n\t\tclose(errChan)\n\t}\n\treturn responseChan, errChan\n}", "func (client *Client) GetArmsConsoleUrlWithChan(request *GetArmsConsoleUrlRequest) (<-chan *GetArmsConsoleUrlResponse, <-chan error) {\n\tresponseChan := make(chan *GetArmsConsoleUrlResponse, 1)\n\terrChan := make(chan error, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tdefer close(responseChan)\n\t\tdefer close(errChan)\n\t\tresponse, err := client.GetArmsConsoleUrl(request)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t} else {\n\t\t\tresponseChan <- response\n\t\t}\n\t})\n\tif err != nil {\n\t\terrChan <- err\n\t\tclose(responseChan)\n\t\tclose(errChan)\n\t}\n\treturn responseChan, errChan\n}", "func (dht *FullRT) FindProvidersAsync(ctx context.Context, key cid.Cid, count int) <-chan peer.AddrInfo {\n\tif !dht.enableProviders || !key.Defined() {\n\t\tpeerOut := make(chan peer.AddrInfo)\n\t\tclose(peerOut)\n\t\treturn peerOut\n\t}\n\n\tchSize := count\n\tif count == 0 {\n\t\tchSize = 1\n\t}\n\tpeerOut := make(chan peer.AddrInfo, chSize)\n\n\tkeyMH := key.Hash()\n\n\tlogger.Debugw(\"finding providers\", \"cid\", key, \"mh\", internal.LoggableProviderRecordBytes(keyMH))\n\tgo dht.findProvidersAsyncRoutine(ctx, keyMH, count, peerOut)\n\treturn peerOut\n}", "func dcrm_sign_ed(msgprex string,txhash string,save string,pk string,cointype string,ch chan interface{}) string {\n\n txhashs := []rune(txhash)\n if string(txhashs[0:2]) == \"0x\" {\n\ttxhash = string(txhashs[2:])\n }\n\n w,err := FindWorker(msgprex)\n if w == nil || err != nil {\n\tlogs.Debug(\"===========get worker fail.=============\")\n\tres := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:no find worker\",Err:GetRetErr(ErrNoFindWorker)}\n\tch <- res\n\treturn \"\"\n }\n id := w.id\n\n GetEnodesInfo(w.groupid) \n \n if int32(Enode_cnts) != int32(NodeCnt) {\n\tlogs.Debug(\"============the net group is not ready.please try again.================\")\n\tres := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:the group is not ready\",Err:GetRetErr(ErrGroupNotReady)}\n\tch <- res\n\treturn \"\"\n }\n\n logs.Debug(\"===================!!!Start!!!====================\")\n\n bak_sig := Sign_ed(msgprex,save,txhash,cointype,pk,ch,id)\n return bak_sig\n}", "func (client *Client) ServiceStatusWithChan(request *ServiceStatusRequest) (<-chan *ServiceStatusResponse, <-chan error) {\nresponseChan := make(chan *ServiceStatusResponse, 1)\nerrChan := make(chan error, 1)\nerr := client.AddAsyncTask(func() {\ndefer close(responseChan)\ndefer close(errChan)\nresponse, err := client.ServiceStatus(request)\nif err != nil {\nerrChan <- err\n} else {\nresponseChan <- response\n}\n})\nif err != nil {\nerrChan <- err\nclose(responseChan)\nclose(errChan)\n}\nreturn responseChan, errChan\n}", "func (client *Client) NormalRpcHsfApiWithChan(request *NormalRpcHsfApiRequest) (<-chan *NormalRpcHsfApiResponse, <-chan error) {\n\tresponseChan := make(chan *NormalRpcHsfApiResponse, 1)\n\terrChan := make(chan error, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tdefer close(responseChan)\n\t\tdefer close(errChan)\n\t\tresponse, err := client.NormalRpcHsfApi(request)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t} else {\n\t\t\tresponseChan <- response\n\t\t}\n\t})\n\tif err != nil {\n\t\terrChan <- err\n\t\tclose(responseChan)\n\t\tclose(errChan)\n\t}\n\treturn responseChan, errChan\n}", "func (client *Client) GetEMapWithCallback(request *GetEMapRequest, callback func(response *GetEMapResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *GetEMapResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.GetEMap(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func InitServidorVistas(me comun.HostPort) {\n\t// Init\n\te := new(typeSV)\n\te.tick = time.Tick(timeHeartbeat * time.Millisecond)\n\te.requests = make(chan msgsys.Message)\n\te.nodeList = make(map[comun.HostPort]*int)\n\te.actualView.NumView = 0\n\te.attemptView.NumView = 0\n\te.ms = msgsys.Make(me)\n\te.state = s_init\n\t// Start the mail box for request\n\tgo e.mailBox()\n\tfmt.Println(\"GV operative -> \", e.me)\n\t// Loop while all ok\n\tfor e.state != s_fatal {\n\t\te.tratarEventos()\n\t}\n\tlog.Fatal(\"ERROR: The content has been losed\")\n}", "func handle(s *OutboundServer) {\n\n\tclient, err := Dial(&DialConfig{Address: fmt.Sprintf(\"%s:%d\", cfg.Redis.Ip, cfg.Redis.Port)})\n\n\tif err != nil {\n\n\t\tError(\"Error occur in connecting redis\", err)\n\t\treturn\n\n\t}\n\n\tfor {\n\n\t\tselect {\n\n\t\tcase conn := <-s.Conns:\n\t\t\tNotice(\"New incomming connection: %v\", conn)\n\n\t\t\tif err := conn.Connect(); err != nil {\n\t\t\t\tError(\"Got error while accepting connection: %s\", err)\n\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tmsg, err := conn.ReadMessage()\n\n\t\t\tif err == nil && msg != nil {\n\n\t\t\t\t//conn.Send(\"myevents\")\n\t\t\t\tDebug(\"Connect message %s\", msg.Headers)\n\n\t\t\t\tuniqueID := msg.GetCallUUID()\n\t\t\t\tfrom := msg.GetHeader(\"Caller-Caller-Id-Number\")\n\t\t\t\tto := msg.GetHeader(\"Caller-Destination-Number\")\n\t\t\t\tdirection := msg.GetHeader(\"Call-Direction\")\n\t\t\t\tchannelStatus := msg.GetHeader(\"Answer-State\")\n\t\t\t\toriginateSession := msg.GetHeader(\"Variable_originate_session_uuid\")\n\t\t\t\tcompany := msg.GetHeader(\"Variable_company\")\n\t\t\t\ttenant := msg.GetHeader(\"Variable_tenant\")\n\t\t\t\tskill := msg.GetHeader(\"Variable_skill\")\n\t\t\t\tfsUUID := msg.GetHeader(\"Core-Uuid\")\n\t\t\t\tfsHost := msg.GetHeader(\"Freeswitch-Hostname\")\n\t\t\t\tfsName := msg.GetHeader(\"Freeswitch-Switchname\")\n\t\t\t\tfsIP := msg.GetHeader(\"Freeswitch-Ipv4\")\n\t\t\t\tcallerContext := msg.GetHeader(\"Caller-Context\")\n\n\t\t\t\t//conn.Send(fmt.Sprintf(\"myevent json %s\", uniqueID))\n\n\t\t\t\tif len(originateSession) == 0 {\n\n\t\t\t\t\tDebug(\"New Session created ---> %s\", uniqueID)\n\n\t\t\t\t} else {\n\n\t\t\t\t}\n\n\t\t\t\tDebug(from)\n\t\t\t\tDebug(to)\n\t\t\t\tDebug(direction)\n\t\t\t\tDebug(channelStatus)\n\t\t\t\tDebug(fsUUID)\n\t\t\t\tDebug(fsHost)\n\t\t\t\tDebug(fsName)\n\t\t\t\tDebug(fsIP)\n\t\t\t\tDebug(originateSession)\n\t\t\t\tDebug(callerContext)\n\t\t\t\tDebug(company)\n\t\t\t\tDebug(tenant)\n\t\t\t\tDebug(skill)\n\n\t\t\t\tcomapnyi, _ := strconv.Atoi(company)\n\t\t\t\ttenanti, _ := strconv.Atoi(tenant)\n\n\t\t\t\tif direction == \"outbound\" {\n\t\t\t\t\tDebug(\"OutBound Call recived ---->\")\n\n\t\t\t\t\t//if channelStatus != \"answered\" {\n\t\t\t\t\t////////////////////////////////////////////////////////////\n\t\t\t\t\tif len(originateSession) > 0 {\n\n\t\t\t\t\t\tDebug(\"Original session found %s\", originateSession)\n\n\t\t\t\t\t\tvar isStored = true\n\t\t\t\t\t\tpartykey := fmt.Sprintf(\"ARDS:Leg:%s\", uniqueID)\n\t\t\t\t\t\tkey := fmt.Sprintf(\"ARDS:Session:%s\", originateSession)\n\n\t\t\t\t\t\texsists, exsisterr := client.Exists(key)\n\t\t\t\t\t\tagentStatusRaw, _ := client.HGet(key, \"AgentStatus\")\n\t\t\t\t\t\tagentstatus := string(agentStatusRaw[:])\n\n\t\t\t\t\t\tDebug(\"Client exsists ----------------------->%s\", agentstatus)\n\t\t\t\t\t\tif exsisterr == nil && exsists == true && agentstatus == \"NotFound\" {\n\n\t\t\t\t\t\t\tredisErr := client.SimpleSet(partykey, originateSession)\n\t\t\t\t\t\t\tDebug(\"Store Data : %s \", redisErr)\n\t\t\t\t\t\t\tisStored, redisErr = client.HSet(key, \"AgentStatus\", \"AgentFound\")\n\t\t\t\t\t\t\tDebug(\"Store Data : %s %s\", isStored, redisErr)\n\t\t\t\t\t\t\tisStored, redisErr = client.HSet(key, \"AgentUUID\", uniqueID)\n\t\t\t\t\t\t\tDebug(\"Store Data : %s %s\", isStored, redisErr)\n\t\t\t\t\t\t\t//msg, err = conn.Execute(\"wait_for_answer\", \"\", true)\n\t\t\t\t\t\t\t//Debug(\"wait for answer ----> %s\", msg)\n\t\t\t\t\t\t\t//msg, err = conn.ExecuteSet(\"CHANNEL_CONNECTION\", \"true\", false)\n\t\t\t\t\t\t\t//Debug(\"Set variable ----> %s\", msg)\n\n\t\t\t\t\t\t\tif channelStatus == \"answered\" {\n\n\t\t\t\t\t\t\t\texsists, exsisterr := client.Exists(key)\n\t\t\t\t\t\t\t\tif exsisterr == nil && exsists == true {\n\n\t\t\t\t\t\t\t\t\tclient.HSet(key, \"AgentStatus\", \"AgentConnected\")\n\n\t\t\t\t\t\t\t\t\tcmd := fmt.Sprintf(\"uuid_bridge %s %s\", originateSession, uniqueID)\n\t\t\t\t\t\t\t\t\tDebug(cmd)\n\t\t\t\t\t\t\t\t\tconn.BgApi(cmd)\n\t\t\t\t\t\t\t\t\t/////////////////////Remove///////////////////////\n\n\t\t\t\t\t\t\t\t\tRemoveRequest(comapnyi, tenanti, originateSession)\n\n\t\t\t\t\t\t\t\t} else {\n\n\t\t\t\t\t\t\t\t\tRejectRequest(comapnyi, tenanti, originateSession, \"NoSession\")\n\n\t\t\t\t\t\t\t\t\tconn.ExecuteHangup(uniqueID, \"\", false)\n\n\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tconn.Send(\"myevents json\")\n\t\t\t\t\t\t\tgo func() {\n\n\t\t\t\t\t\t\t\tfor {\n\t\t\t\t\t\t\t\t\tmsg, err := conn.ReadMessage()\n\n\t\t\t\t\t\t\t\t\tif err != nil {\n\n\t\t\t\t\t\t\t\t\t\t// If it contains EOF, we really dont care...\n\t\t\t\t\t\t\t\t\t\tif !strings.Contains(err.Error(), \"EOF\") {\n\t\t\t\t\t\t\t\t\t\t\tError(\"Error while reading Freeswitch message: %s\", err)\n\n\t\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\t\tbreak\n\n\t\t\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\t\t\tif msg != nil {\n\n\t\t\t\t\t\t\t\t\t\t\tuuid := msg.GetHeader(\"Unique-ID\")\n\t\t\t\t\t\t\t\t\t\t\tDebug(uuid)\n\n\t\t\t\t\t\t\t\t\t\t\tcontentType := msg.GetHeader(\"Content-Type\")\n\t\t\t\t\t\t\t\t\t\t\tevent := msg.GetHeader(\"Event-Name\")\n\t\t\t\t\t\t\t\t\t\t\tDebug(\"Content types -------------------->\", contentType)\n\n\t\t\t\t\t\t\t\t\t\t\tif contentType == \"text/disconnect-notice\" {\n\n\t\t\t\t\t\t\t\t\t\t\t\t//key := fmt.Sprintf(\"ARDS:Session:%s\", uniqueID)\n\n\t\t\t\t\t\t\t\t\t\t\t} else {\n\n\t\t\t\t\t\t\t\t\t\t\t\tif event == \"CHANNEL_ANSWER\" {\n\n\t\t\t\t\t\t\t\t\t\t\t\t\tfmt.Printf(\"%s\", event)\n\n\t\t\t\t\t\t\t\t\t\t\t\t\texsists, exsisterr := client.Exists(key)\n\t\t\t\t\t\t\t\t\t\t\t\t\tif exsisterr == nil && exsists == true {\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tclient.HSet(key, \"AgentStatus\", \"AgentConnected\")\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tcmd := fmt.Sprintf(\"uuid_bridge %s %s\", originateSession, uniqueID)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tDebug(cmd)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tconn.BgApi(cmd)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t/////////////////////Remove///////////////////////\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tRemoveRequest(comapnyi, tenanti, originateSession)\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t} else {\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tRejectRequest(comapnyi, tenanti, originateSession, \"NoSession\")\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tconn.ExecuteHangup(uniqueID, \"\", false)\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\t\t\t\t\t} else if event == \"CHANNEL_HANGUP\" {\n\n\t\t\t\t\t\t\t\t\t\t\t\t\tvalue1, getErr1 := client.HGet(key, \"AgentStatus\")\n\t\t\t\t\t\t\t\t\t\t\t\t\tagentstatus := string(value1[:])\n\t\t\t\t\t\t\t\t\t\t\t\t\tif getErr1 == nil {\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tif agentstatus != \"AgentConnected\" {\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif agentstatus == \"AgentKilling\" {\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t} else {\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t//////////////////////////////Reject//////////////////////////////////////////////\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t//http://localhost:2225/request/remove/company/tenant/sessionid\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tRejectRequest(comapnyi, tenanti, originateSession, \"AgentRejected\")\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tDebug(\"Store Data : %s \", redisErr)\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tval, _ := client.HExists(key, \"AgentStatus\")\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif val == true {\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tisStored, redisErr = client.HSet(key, \"AgentStatus\", \"NotFound\")\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tDebug(\"Store Data : %s %s \", redisErr, isStored)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\t\tDebug(\"Got message: %s\", msg)\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tDebug(\"Leaving go routing after everithing completed OutBound %s %s\", key, partykey)\n\t\t\t\t\t\t\t\t//client.Del(key)\n\t\t\t\t\t\t\t\tclient.Del(partykey)\n\t\t\t\t\t\t\t}()\n\n\t\t\t\t\t\t\t//}\n\t\t\t\t\t\t\t/////////////////////////////////////////////////////////////\n\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\n\t\t\t\t\t\tRejectRequest(1, 3, originateSession, \"NoSession\")\n\n\t\t\t\t\t\tcmd := fmt.Sprintf(\"uuid_kill %s \", uniqueID)\n\t\t\t\t\t\tDebug(cmd)\n\t\t\t\t\t\tconn.BgApi(cmd)\n\n\t\t\t\t\t}\n\n\t\t\t\t} else {\n\n\t\t\t\t\tanswer, err := conn.ExecuteAnswer(\"\", false)\n\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tError(\"Got error while executing answer: %s\", err)\n\n\t\t\t\t\t}\n\n\t\t\t\t\tDebug(\"Answer Message: %s\", answer)\n\t\t\t\t\tDebug(\"Caller UUID: %s\", uniqueID)\n\n\t\t\t\t\t//////////////////////////////////////////Add to queue//////////////////////////////////////\n\t\t\t\t\tskills := []string{skill}\n\t\t\t\t\tAddRequest(comapnyi, tenanti, uniqueID, skills)\n\n\t\t\t\t\t///////////////////////////////////////////////////////////////////////////////////////////\n\n\t\t\t\t\tkey := fmt.Sprintf(\"ARDS:Session:%s\", uniqueID)\n\n\t\t\t\t\tpartykey := fmt.Sprintf(\"ARDS:Leg:%s\", uniqueID)\n\t\t\t\t\tvar isStored = true\n\t\t\t\t\tDebug(\"key ---> %s \", partykey)\n\t\t\t\t\tredisErr := client.SimpleSet(partykey, uniqueID)\n\t\t\t\t\tDebug(\"Store Data : %s \", redisErr)\n\n\t\t\t\t\tDebug(\"key ---> %s \", key)\n\t\t\t\t\tisStored, redisErr = client.HSet(key, \"CallStatus\", \"CallOnQueue\")\n\t\t\t\t\tDebug(\"Store Data : %s \", redisErr)\n\t\t\t\t\tisStored, redisErr = client.HSet(key, \"AgentStatus\", \"NotFound\")\n\n\t\t\t\t\tDebug(\"Store Data : %s %s \", redisErr, isStored)\n\n\t\t\t\t\tconn.Send(\"myevents json\")\n\t\t\t\t\tconn.Send(\"linger\")\n\t\t\t\t\tif sm, err := conn.Execute(\"playback\", \"local_stream://moh\", false); err != nil {\n\t\t\t\t\t\tError(\"Got error while executing speak: %s\", err)\n\n\t\t\t\t\t} else {\n\n\t\t\t\t\t\tDebug(\"Playback reply %s\", sm)\n\n\t\t\t\t\t}\n\n\t\t\t\t\tDebug(\"Leaving go routing after everithing completed Inbound\")\n\n\t\t\t\t\t/////////////////////////////////////////////////////////////////////////////////////////////////\n\n\t\t\t\t\tgo func() {\n\n\t\t\t\t\t\tfor {\n\t\t\t\t\t\t\tmsg, err := conn.ReadMessage()\n\n\t\t\t\t\t\t\tif err != nil {\n\n\t\t\t\t\t\t\t\t// If it contains EOF, we really dont care...\n\t\t\t\t\t\t\t\tif !strings.Contains(err.Error(), \"EOF\") {\n\t\t\t\t\t\t\t\t\tError(\"Error while reading Freeswitch message: %s\", err)\n\n\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\tbreak\n\n\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\tif msg != nil {\n\n\t\t\t\t\t\t\t\t\tuuid := msg.GetHeader(\"Unique-ID\")\n\t\t\t\t\t\t\t\t\tDebug(uuid)\n\n\t\t\t\t\t\t\t\t\tcontentType := msg.GetHeader(\"Content-Type\")\n\t\t\t\t\t\t\t\t\tevent := msg.GetHeader(\"Event-Name\")\n\t\t\t\t\t\t\t\t\tapplication := msg.GetHeader(\"variable_current_application\")\n\n\t\t\t\t\t\t\t\t\tDebug(\"Content types -------------------->\", contentType)\n\t\t\t\t\t\t\t\t\t//response := msg.GetHeader(\"variable_current_application_response\")\n\t\t\t\t\t\t\t\t\tif contentType == \"text/disconnect-notice\" {\n\n\t\t\t\t\t\t\t\t\t\t//key := fmt.Sprintf(\"ARDS:Session:%s\", uniqueID)\n\n\t\t\t\t\t\t\t\t\t} else {\n\n\t\t\t\t\t\t\t\t\t\tDebug(\"Event -------------------->\", event)\n\n\t\t\t\t\t\t\t\t\t\tif event == \"CHANNEL_EXECUTE_COMPLETE\" && application == \"playback\" {\n\n\t\t\t\t\t\t\t\t\t\t\tvalue1, getErr1 := client.HGet(key, \"AgentStatus\")\n\t\t\t\t\t\t\t\t\t\t\tsValue1 := string(value1[:])\n\n\t\t\t\t\t\t\t\t\t\t\tvalue2, getErr2 := client.HGet(key, \"AgentUUID\")\n\t\t\t\t\t\t\t\t\t\t\tsValue2 := string(value2[:])\n\n\t\t\t\t\t\t\t\t\t\t\tDebug(\"Client side connection values %s %s %s %s\", getErr1, getErr2, sValue1, sValue2)\n\n\t\t\t\t\t\t\t\t\t\t\tif getErr1 == nil && getErr2 == nil && sValue1 == \"AgentConnected\" && len(sValue2) > 0 {\n\n\t\t\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\t\t} else if event == \"CHANNEL_HANGUP\" {\n\n\t\t\t\t\t\t\t\t\t\t\tvalue1, getErr1 := client.HGet(key, \"AgentStatus\")\n\t\t\t\t\t\t\t\t\t\t\tagentstatus := string(value1[:])\n\n\t\t\t\t\t\t\t\t\t\t\tvalue2, _ := client.HGet(key, \"AgentUUID\")\n\t\t\t\t\t\t\t\t\t\t\tsValue2 := string(value2[:])\n\t\t\t\t\t\t\t\t\t\t\tif getErr1 == nil {\n\n\t\t\t\t\t\t\t\t\t\t\t\tif agentstatus != \"AgentConnected\" {\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t//////////////////////////////Remove//////////////////////////////////////////////\n\n\t\t\t\t\t\t\t\t\t\t\t\t\tif agentstatus == \"AgentFound\" {\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tclient.HSet(key, \"AgentStatus\", \"AgentKilling\")\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tcmd := fmt.Sprintf(\"uuid_kill %s \", sValue2)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tDebug(cmd)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tconn.Api(cmd)\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tRejectRequest(comapnyi, tenanti, uniqueID, \"ClientRejected\")\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tRemoveRequest(comapnyi, tenanti, uniqueID)\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\t\t\t\tclient.Del(key)\n\t\t\t\t\t\t\t\t\t\t\tclient.Del(partykey)\n\t\t\t\t\t\t\t\t\t\t\tconn.Exit()\n\n\t\t\t\t\t\t\t\t\t\t} else if event == \"CHANNEL_HANGUP_COMPLETED\" {\n\n\t\t\t\t\t\t\t\t\t\t\tconn.Close()\n\n\t\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t//Debug(\"Got message: %s\", msg)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tDebug(\"Leaving go routing after everithing completed Inbound %s %s\", key, partykey)\n\t\t\t\t\t\tclient.Del(key)\n\t\t\t\t\t\tclient.Del(partykey)\n\t\t\t\t\t}()\n\n\t\t\t\t}\n\n\t\t\t} else {\n\n\t\t\t\tError(\"Got Error %s\", err)\n\t\t\t\tconn.Exit()\n\t\t\t}\n\n\t\tdefault:\n\t\t}\n\t}\n\n}", "func (bsnet *impl) FindProvidersAsync(ctx context.Context, k cid.Cid, max int) <-chan peer.ID {\n\tout := make(chan peer.ID, max)\n\tgo func() {\n\t\tdefer close(out)\n\t\tproviders := bsnet.routing.FindProvidersAsync(ctx, k, max)\n\t\tfor info := range providers {\n\t\t\tif info.ID == bsnet.host.ID() {\n\t\t\t\tcontinue // ignore self as provider\n\t\t\t}\n\t\t\tbsnet.host.Peerstore().AddAddrs(info.ID, info.Addrs, peerstore.TempAddrTTL)\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\tcase out <- info.ID:\n\t\t\t}\n\t\t}\n\t}()\n\treturn out\n}", "func main() {\n\n\tc_amount_pages := make(chan int)\n\tgo fn_get__amount_pages(c_amount_pages)\n\tint_amount := <- c_amount_pages\n\n\tfn_get__job_infos(int_amount)\n}", "func cniEventHandler(ctx context.Context, targetCNIcfg *aws.TargetCNIconfig,\n\ttargetSetter func([]string, []string) error, informer func(context.Context, chan<- []string) error) {\n\tlog.Infoln(\"Starting CNI event handler\")\n\n\trateLimiter := time.NewTicker(cniEventRateLimit)\n\tdefer rateLimiter.Stop()\n\n\tendpointCh := make(chan []string, 10)\n\tgo func() {\n\t\terr := informer(ctx, endpointCh)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Informer failed: %v\", err)\n\t\t\treturn\n\t\t}\n\t}()\n\n\tvar cniTargetGroupARNs, endpoints []string\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tcase cniTargetGroupARNs = <-targetCNIcfg.TargetGroupCh:\n\t\t\tlog.Debugf(\"new message target groups: %v\", cniTargetGroupARNs)\n\t\tcase endpoints = <-endpointCh:\n\t\t\tlog.Debugf(\"new message endpoints: %v\", endpoints)\n\t\t}\n\n\t\t// prevent cleanup due to startup inconsistenty, arns and endpoints can be empty but never nil\n\t\tif cniTargetGroupARNs == nil || endpoints == nil {\n\t\t\tcontinue\n\t\t}\n\t\tif len(endpointCh) > 0 || len(targetCNIcfg.TargetGroupCh) > 0 {\n\t\t\tlog.Debugf(\"flushing, messages queued: %d:%d\", len(endpointCh), len(cniTargetGroupARNs))\n\t\t\tcontinue\n\t\t}\n\t\t<-rateLimiter.C\n\t\terr := targetSetter(endpoints, cniTargetGroupARNs)\n\t\tif err != nil {\n\t\t\tlog.Error(err)\n\t\t}\n\t}\n}", "func GetSummy(acc model.Account, c chan string) {\n\tswitch acc.Service {\n\tcase \"towngas\":\n\t\tgo GetNewsNoticeAsync(acc, c)\n\n\tcase \"clp\":\n\t\tgo GetServiceDashboard(acc, c)\n\n\tcase \"wsd\":\n\t\tgo ElectronicBill(acc, c)\n\n\t}\n}", "func GenerateServiceRequest() <-chan Sig { // returns a receive only channel of string\n\tchannel := make(chan Sig)\n\n\tgo func() {\n\t\ttime.Sleep(2 * time.Second) //wait a while before starting\n\t\tIdentity := 0\n\t\t//msisdn := rand.Int()\n\n\t\t// Send CCR every x*rand seconds\n\t\tfor {\n\t\t\t// create random fake MSISDN\n\t\t\tmsisdn := random(1000000, 4999999)\n\t\t\tmsisdn = msisdn + 46702000000\n\n\t\t\tIdentity = Identity + 1\n\n\t\t\tinfoElem := Sig{\n\t\t\t\tCode: \"CCR\",\n\t\t\t\tmsisdn: msisdn,\n\t\t\t\tIdentity: Identity,\n\t\t\t}\n\n\t\t\tsleeptime := time.Second * time.Duration(rand.Intn(10))\n\t\t\tlog.Printf(\"Time until next CCR: %s\", sleeptime)\n\t\t\ttime.Sleep(sleeptime)\n\t\t\tchannel <- infoElem\n\t\t}\n\t}()\n\treturn channel\n}", "func (client *Client) QueryVnConversationsWithChan(request *QueryVnConversationsRequest) (<-chan *QueryVnConversationsResponse, <-chan error) {\n\tresponseChan := make(chan *QueryVnConversationsResponse, 1)\n\terrChan := make(chan error, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tdefer close(responseChan)\n\t\tdefer close(errChan)\n\t\tresponse, err := client.QueryVnConversations(request)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t} else {\n\t\t\tresponseChan <- response\n\t\t}\n\t})\n\tif err != nil {\n\t\terrChan <- err\n\t\tclose(responseChan)\n\t\tclose(errChan)\n\t}\n\treturn responseChan, errChan\n}", "func (c *Config) Crawl(rawResultChan chan map[*string]int) {\n\tvar wg sync.WaitGroup\n\tworkerPool := make(chan struct{}, c.Customize.CPUCores)\n\n\tfor _, crawlType := range c.Types {\n\t\tfor _, url := range crawlType.CrawlList {\n\t\t\tworkerPool <- struct{}{}\n\t\t\twg.Add(1)\n\n\t\t\tgo func(url string, crawlType *CrawlType) {\n\t\t\t\tdefer func() {\n\t\t\t\t\tif err := recover(); err != nil {\n\t\t\t\t\t\tlog.Printf(\"Goroutine panic: fetching %v : %v\\n\", url, err)\n\t\t\t\t\t}\n\t\t\t\t}()\n\n\t\t\t\tcontainer := crawlType.CrawlElement.Container\n\t\t\t\tcontent := crawlType.CrawlElement.Content\n\t\t\t\tattr := crawlType.CrawlElement.Attr\n\t\t\t\tcondition := crawlType.CrawlElement.Condition\n\n\t\t\t\tlog.Println(\"Crawling:\", url)\n\t\t\t\tresp, err := crawler.Crawl(url, crawlType.CrawlReferer)\n\t\t\t\tutils.Must(err)\n\t\t\t\tdefer resp.Body.Close()\n\n\t\t\t\tgzipReader, err := gzip.NewReader(resp.Body)\n\t\t\t\tutils.Must(err)\n\t\t\t\tdefer gzipReader.Close()\n\n\t\t\t\t// Load the HTML document\n\t\t\t\tdoc, err := goquery.NewDocumentFromReader(gzipReader)\n\t\t\t\tutils.Must(err)\n\n\t\t\t\t// Find items\n\t\t\t\tdoc.Find(container).Each(func(i int, s *goquery.Selection) {\n\t\t\t\t\tpercent := 0\n\t\t\t\t\t// For each item found, get contents\n\t\t\t\t\trawDomain, _ := s.Find(content).Attr(attr)\n\t\t\t\t\tif blockedPercentage := strings.TrimSpace(s.Find(condition).Text()); blockedPercentage != \"\" {\n\t\t\t\t\t\tpercent, _ = strconv.Atoi(blockedPercentage[:len(blockedPercentage)-1])\n\t\t\t\t\t}\n\n\t\t\t\t\trawResult := make(map[*string]int)\n\t\t\t\t\trawResult[&rawDomain] = percent\n\t\t\t\t\trawResultChan <- rawResult\n\t\t\t\t})\n\n\t\t\t\twg.Done()\n\t\t\t\t<-workerPool\n\t\t\t}(url, crawlType)\n\t\t}\n\t}\n\n\twg.Wait()\n\tclose(rawResultChan)\n}", "func processGeo(geoChan, licChan chan *license, wg *sync.WaitGroup) {\n\tfor lic := range geoChan {\n\t\tv := url.Values{}\n\t\tv.Set(\"format\", \"json\")\n\t\tv.Set(\"street\", lic.Address)\n\t\tv.Set(\"city\", lic.City)\n\t\tv.Set(\"state\", lic.State)\n\n\t\tnr := nomResp{}\n\t\terr := getJSON(fmt.Sprint(nomURL, \"?\", v.Encode()), &nr)\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t\tcontinue\n\t\t}\n\t\tif len(nr) < 1 {\n\t\t\tlog.Println(\"No response from \", nomURL)\n\t\t\tcontinue\n\t\t}\n\n\t\tlic.Lat, err = strconv.ParseFloat(nr[0].Lat, 64)\n\t\tif err != nil {\n\t\t\tlog.Println(\"Can't convert latitude\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\tlic.Lon, err = strconv.ParseFloat(nr[0].Lon, 64)\n\t\tif err != nil {\n\t\t\tlog.Println(\"Can't convert longitude\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\tlog.Println(\"Got response from\", nomURL)\n\n\t\tlicChan <- lic\n\t}\n\twg.Done()\n}", "func (mdhth *MockDHTHandler) FindProvidersAsync(id string, count int) (<-chan peer2.AddrInfo, error) {\n\tif len(id) != 46 {\n\t\treturn nil, fmt.Errorf(\"FindProvidersAsync: wrong id %s\", id)\n\t}\n\n\tch := make(chan peer2.AddrInfo)\n\taddr, err := AddrToPeerInfo(\"/ip4/104.236.76.40/tcp/4001/ipfs/QmSoLV4Bbm51jM9C4gDYZQ9Cy3U6aXMJDAbzgu2fzawe34\")\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"FindProvidersAsync: AddrToPeerInfo wrong\")\n\t}\n\tgo func() {\n\t\tch <- *addr\n\t}()\n\n\ttime.Sleep(time.Second)\n\n\treturn ch, nil\n}", "func (c *MainChannelCC) Invoke(stub shim.ChaincodeStubInterface) pb.Response {\n funcName, args := stub.GetFunctionAndParameters()\n\n switch funcName {\n // 任务上传\n case \"requestUpload\":\n return requestUpload(stub, args)\n // 查询任务\n case \"requestQuery\":\n return requestQuery(stub, args)\n // 查询全部任务\n case \"requestQueryArr\":\n return requestQueryArr(stub, args)\n // 难度值上传\n case \"difficultyUpload\":\n return difficultyUpload(stub, args)\n // 难度值查询\n case \"difficultyQuery\":\n return difficultyQuery(stub, args)\n // 难度值统一查询\n case \"difficultyQueryArr\":\n return difficultyQueryArr(stub, args)\n // 判断胜利者\n case \"winnerUpload\":\n return winnerUpload(stub, args)\n // 查询胜利者\n case \"winnerQuery\":\n return winnerQuery(stub, args)\n // 查询全部胜利者\n case \"winnerQueryArr\":\n return winnerQueryArr(stub, args)\n // 子channel上传\n case \"subChannelUpload\":\n return subChannelUpload(stub, args)\n // 子channel查询\n case \"subChannelQuery\":\n return subChannelQuery(stub, args)\n // 数据上传\n case \"dataUpload\":\n return dataUpload(stub, args)\n // 查询数据\n case \"dataQuery\":\n return dataQuery(stub, args)\n // 数据统一查询\n case \"dataQueryArr\":\n return dataQueryArr(stub, args)\n // 奖励发放\n case \"rewardsUpload\":\n return rewardsUpload(stub, args)\n // 奖励获取\n case \"rewardsReceive\":\n return rewardsReceive(stub, args)\n }\n\n\treturn shim.Success(nil)\n}", "func dcrm_genPubKey(msgprex string,account string,cointype string,ch chan interface{}, mode string,nonce string) {\n\n fmt.Println(\"========dcrm_genPubKey============\")\n\n wk,err := FindWorker(msgprex)\n if err != nil || wk == nil {\n\tres := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:no find worker\",Err:err}\n\tch <- res\n\treturn\n }\n id := wk.id\n \n GetEnodesInfo(wk.groupid)\n\n if int32(Enode_cnts) != int32(NodeCnt) {\n\tres := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:group is not ready\",Err:GetRetErr(ErrGroupNotReady)}\n\tch <- res\n\treturn\n }\n\n if types.IsDefaultED25519(cointype) {\n\tok2 := KeyGenerate_ed(msgprex,ch,id,cointype)\n\tif ok2 == false {\n\t return\n\t}\n\n\titertmp := workers[id].edpk.Front()\n\tif itertmp == nil {\n\t logs.Debug(\"get workers[id].edpk fail.\")\n\t res := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:get workers[id].edpk fail\",Err:GetRetErr(ErrGetGenPubkeyFail)}\n\t ch <- res\n\t return\n\t}\n\tsedpk := []byte(itertmp.Value.(string))\n\n\titertmp = workers[id].edsave.Front()\n\tif itertmp == nil {\n\t logs.Debug(\"get workers[id].edsave fail.\")\n\t res := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:get workers[id].edsave fail\",Err:GetRetErr(ErrGetGenSaveDataFail)}\n\t ch <- res\n\t return\n\t}\n\t\n\tsedsave := itertmp.Value.(string)\n\tpubs := &PubKeyData{Pub:string(sedpk),Save:sedsave,Nonce:\"0\",GroupId:wk.groupid,LimitNum:wk.limitnum,Mode:mode}\n\tepubs,err := Encode2(pubs)\n\tif err != nil {\n\t res := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:encode PubKeyData fail in req ed pubkey\",Err:err}\n\t ch <- res\n\t return\n\t}\n\t\n\tss,err := Compress([]byte(epubs))\n\tif err != nil {\n\t res := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:compress PubKeyData fail in req ed pubkey\",Err:err}\n\t ch <- res\n\t return\n\t}\n\n\t////TODO\n\tAllAccounts = append(AllAccounts,pubs)\n\t////////\n\n\tpubkeyhex := hex.EncodeToString(sedpk)\n\tfmt.Println(\"===============dcrm_genPubKey,pubkey = %s,nonce =%s ==================\",pubkeyhex,nonce)\n\t////save to db\n\t////add for req addr\n\t/*reqnonce,_,err := GetReqAddrNonce(account)\n\tif err != nil {\n\t reqnonce = \"0\"\n\t}\n\tSetReqAddrNonce(account,reqnonce)*/\n\tkey2 := Keccak256Hash([]byte(strings.ToLower(account))).Hex()\n\tkd := KeyData{Key:[]byte(key2),Data:nonce}\n\tPubKeyDataChan <-kd\n\n\t/////\n\tLdbPubKeyData[key2] = []byte(nonce)\n\t//key2 = Keccak256Hash([]byte(strings.ToLower(account+\":\"+\"LOCKOUT\"))).Hex()\n\t//LdbPubKeyData[key2] = []byte(\"0\")\n\t////\n\n\ttip,reply := AcceptReqAddr(account,cointype,wk.groupid,nonce,wk.limitnum,mode,true,\"true\",\"Success\",pubkeyhex,\"\",\"\",\"\")\n\tif reply != nil {\n\t res := RpcDcrmRes{Ret:\"\",Tip:tip,Err:fmt.Errorf(\"update req addr status error.\")}\n\t ch <- res\n\t return\n\t}\n \n\tif !strings.EqualFold(cointype, \"ALL\") {\n\t h := cryptocoins.NewCryptocoinHandler(cointype)\n\t if h == nil {\n\t\tres := RpcDcrmRes{Ret:\"\",Tip:\"cointype is not supported\",Err:fmt.Errorf(\"req addr fail,cointype is not supported.\")}\n\t\tch <- res\n\t\treturn\n\t }\n\n\t ctaddr, err := h.PublicKeyToAddress(pubkeyhex)\n\t if err != nil {\n\t\tres := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:get dcrm addr fail from pubkey:\"+pubkeyhex,Err:err}\n\t\tch <- res\n\t\treturn\n\t }\n\n\t //add for lockout\n\t kd = KeyData{Key:sedpk[:],Data:ss}\n\t PubKeyDataChan <-kd\n\t /////\n\t LdbPubKeyData[string(sedpk[:])] = []byte(ss)\n\t ////\n\n\t key := Keccak256Hash([]byte(strings.ToLower(account + \":\" + cointype))).Hex()\n\t kd = KeyData{Key:[]byte(key),Data:ss}\n\t PubKeyDataChan <-kd\n\t /////\n\t LdbPubKeyData[key] = []byte(ss)\n\t ////\n\n\t key = Keccak256Hash([]byte(strings.ToLower(ctaddr))).Hex()\n\t kd = KeyData{Key:[]byte(key),Data:ss}\n\t PubKeyDataChan <-kd\n\t /////\n\t LdbPubKeyData[key] = []byte(ss)\n\t ////\n\n\t /*lock.Lock()\n\t dir := GetDbDir()\n\t db,err := ethdb.NewLDBDatabase(dir, 0, 0)\n\t if err != nil { \n\t\tlock.Unlock()\n\t\tres := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:open level db fail\",Err:err}\n\t\tch <- res\n\t\treturn\n\t }\n\n\t h := cryptocoins.NewCryptocoinHandler(cointype)\n\t if h == nil {\n\t\tdb.Close()\n\t\tlock.Unlock()\n\t\tres := RpcDcrmRes{Ret:\"\",Tip:\"cointype is not supported\",Err:fmt.Errorf(\"req addr fail,cointype is not supported.\")}\n\t\tch <- res\n\t\treturn\n\t }\n\n\t ctaddr, err := h.PublicKeyToAddress(pubkeyhex)\n\t if err != nil {\n\t\tdb.Close()\n\t\tlock.Unlock()\n\t\tres := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:get dcrm addr fail from pubkey:\"+pubkeyhex,Err:err}\n\t\tch <- res\n\t\treturn\n\t }\n\n\t //add for lockout\n\t db.Put(sedpk[:],[]byte(ss))\n\t key := Keccak256Hash([]byte(strings.ToLower(account + \":\" + cointype))).Hex()\n\t db.Put([]byte(key),[]byte(ss))\n\t key = Keccak256Hash([]byte(strings.ToLower(ctaddr))).Hex()\n\t db.Put([]byte(key),[]byte(ss))\n\t db.Close()\n\t lock.Unlock()*/\n\t} else {\n\t kd = KeyData{Key:sedpk[:],Data:ss}\n\t PubKeyDataChan <-kd\n\t /////\n\t LdbPubKeyData[string(sedpk[:])] = []byte(ss)\n\t ////\n\n\t key := Keccak256Hash([]byte(strings.ToLower(account + \":\" + cointype))).Hex()\n\t kd = KeyData{Key:[]byte(key),Data:ss}\n\t PubKeyDataChan <-kd\n\t /////\n\t LdbPubKeyData[key] = []byte(ss)\n\t ////\n\n\t for _, ct := range cryptocoins.Cointypes {\n\t\tif strings.EqualFold(ct, \"ALL\") {\n\t\t continue\n\t\t}\n\n\t\th := cryptocoins.NewCryptocoinHandler(ct)\n\t\tif h == nil {\n\t\t continue\n\t\t}\n\t\tctaddr, err := h.PublicKeyToAddress(pubkeyhex)\n\t\tif err != nil {\n\t\t continue\n\t\t}\n\t\t\n\t\tkey = Keccak256Hash([]byte(strings.ToLower(ctaddr))).Hex()\n\t\tkd = KeyData{Key:[]byte(key),Data:ss}\n\t\tPubKeyDataChan <-kd\n\t\t/////\n\t\tLdbPubKeyData[key] = []byte(ss)\n\t\t////\n\t }\n\t \n\t /*lock.Lock()\n\t dir := GetDbDir()\n\t db,err := ethdb.NewLDBDatabase(dir, 0, 0)\n\t if err != nil { \n\t\tlock.Unlock()\n\t\tres := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:open level db fail\",Err:err}\n\t\tch <- res\n\t\treturn\n\t }\n\n\t //add for lockout\n\t db.Put(sedpk[:],[]byte(ss))\n\t key := Keccak256Hash([]byte(strings.ToLower(account + \":\" + cointype))).Hex()\n\t db.Put([]byte(key),[]byte(ss))\n\t \n\t for _, ct := range cryptocoins.Cointypes {\n\t\tif strings.EqualFold(ct, \"ALL\") {\n\t\t continue\n\t\t}\n\n\t\th := cryptocoins.NewCryptocoinHandler(ct)\n\t\tif h == nil {\n\t\t continue\n\t\t}\n\t\tctaddr, err := h.PublicKeyToAddress(pubkeyhex)\n\t\tif err != nil {\n\t\t continue\n\t\t}\n\t\t\n\t\tkey = Keccak256Hash([]byte(strings.ToLower(ctaddr))).Hex()\n\t\tdb.Put([]byte(key),[]byte(ss))\n\t }\n\n\t db.Close()\n\t lock.Unlock()\n\t */\n\t}\n\n\tres := RpcDcrmRes{Ret:pubkeyhex,Tip:\"\",Err:nil}\n\tch <- res\n\treturn\n }\n \n ok := KeyGenerate_ec2(msgprex,ch,id,cointype)\n if ok == false {\n\treturn\n }\n\n iter := workers[id].pkx.Front()\n if iter == nil {\n\tres := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:get pkx fail in req ec2 pubkey\",Err:GetRetErr(ErrGetGenPubkeyFail)}\n\tch <- res\n\treturn\n }\n spkx := iter.Value.(string)\n pkx := new(big.Int).SetBytes([]byte(spkx))\n iter = workers[id].pky.Front()\n if iter == nil {\n\tres := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:get pky fail in req ec2 pubkey\",Err:GetRetErr(ErrGetGenPubkeyFail)}\n\tch <- res\n\treturn\n }\n spky := iter.Value.(string)\n pky := new(big.Int).SetBytes([]byte(spky))\n ys := secp256k1.S256().Marshal(pkx,pky)\n\n iter = workers[id].save.Front()\n if iter == nil {\n\tres := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:get save data fail in req ec2 pubkey\",Err:GetRetErr(ErrGetGenSaveDataFail)}\n\tch <- res\n\treturn\n }\n save := iter.Value.(string)\n pubs := &PubKeyData{Pub:string(ys),Save:save,Nonce:\"0\",GroupId:wk.groupid,LimitNum:wk.limitnum,Mode:mode}\n epubs,err := Encode2(pubs)\n if err != nil {\n\tres := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:encode PubKeyData fail in req ec2 pubkey\",Err:err}\n\tch <- res\n\treturn\n }\n \n ss,err := Compress([]byte(epubs))\n if err != nil {\n\tres := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:compress PubKeyData fail in req ec2 pubkey\",Err:err}\n\tch <- res\n\treturn\n }\n \n ////TODO\n AllAccounts = append(AllAccounts,pubs)\n ////////\n\n pubkeyhex := hex.EncodeToString(ys)\n fmt.Println(\"===============dcrm_genPubKey,pubkey = %s,nonce =%s ==================\",pubkeyhex,nonce)\n //tip, err := StorePubAccount(wk.groupid, pubkeyhex, mode)\n //fmt.Printf(\"==== dcrm_genPubKey() ====, StorePubAccount tip: %v, err: %v\\n\", tip, err)\n ////save to db\n \n ////add for req addr\n /*reqnonce,_,err := GetReqAddrNonce(account)\n if err != nil {\n\treqnonce = \"0\"\n }\n SetReqAddrNonce(account,reqnonce)*/\n key2 := Keccak256Hash([]byte(strings.ToLower(account))).Hex()\n kd := KeyData{Key:[]byte(key2),Data:nonce}\n PubKeyDataChan <-kd\n /////\n LdbPubKeyData[key2] = []byte(nonce)\n //key2 = Keccak256Hash([]byte(strings.ToLower(account+\":\"+\"LOCKOUT\"))).Hex()\n //LdbPubKeyData[key2] = []byte(\"0\")\n ////\n\n tip,reply := AcceptReqAddr(account,cointype,wk.groupid,nonce,wk.limitnum,mode,true,\"true\",\"Success\",pubkeyhex,\"\",\"\",\"\")\n if reply != nil {\n\tres := RpcDcrmRes{Ret:\"\",Tip:tip,Err:fmt.Errorf(\"update req addr status error.\")}\n\tch <- res\n\treturn\n }\n\n if !strings.EqualFold(cointype, \"ALL\") {\n\th := cryptocoins.NewCryptocoinHandler(cointype)\n\tif h == nil {\n\t res := RpcDcrmRes{Ret:\"\",Tip:\"cointype is not supported\",Err:fmt.Errorf(\"req addr fail,cointype is not supported.\")}\n\t ch <- res\n\t return\n\t}\n\n\tctaddr, err := h.PublicKeyToAddress(pubkeyhex)\n\tif err != nil {\n\t res := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:get dcrm addr fail from pubkey:\"+pubkeyhex,Err:err}\n\t ch <- res\n\t return\n\t}\n\t\n\tkd = KeyData{Key:ys,Data:ss}\n\tPubKeyDataChan <-kd\n\t/////\n\tLdbPubKeyData[string(ys)] = []byte(ss)\n\t////\n\n\tkey := Keccak256Hash([]byte(strings.ToLower(account + \":\" + cointype))).Hex()\n\tkd = KeyData{Key:[]byte(key),Data:ss}\n\tPubKeyDataChan <-kd\n\t/////\n\tLdbPubKeyData[key] = []byte(ss)\n\t////\n\n\tkey = Keccak256Hash([]byte(strings.ToLower(ctaddr))).Hex()\n\tkd = KeyData{Key:[]byte(key),Data:ss}\n\tPubKeyDataChan <-kd\n\t/////\n\tLdbPubKeyData[key] = []byte(ss)\n\t////\n\n\t/*lock.Lock()\n\tdir := GetDbDir()\n\tdb,err := ethdb.NewLDBDatabase(dir, 0, 0)\n\tif err != nil { \n\t lock.Unlock()\n\t res := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:open level db fail\",Err:err}\n\t ch <- res\n\t return\n\t}\n\n\th := cryptocoins.NewCryptocoinHandler(cointype)\n\tif h == nil {\n\t db.Close()\n\t lock.Unlock()\n\t res := RpcDcrmRes{Ret:\"\",Tip:\"cointype is not supported\",Err:fmt.Errorf(\"req addr fail,cointype is not supported.\")}\n\t ch <- res\n\t return\n\t}\n\n\tctaddr, err := h.PublicKeyToAddress(pubkeyhex)\n\tif err != nil {\n\t db.Close()\n\t lock.Unlock()\n\t res := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:get dcrm addr fail from pubkey:\"+pubkeyhex,Err:err}\n\t ch <- res\n\t return\n\t}\n\n\t//add for lockout\n\tdb.Put(ys,[]byte(ss))\n\tkey := Keccak256Hash([]byte(strings.ToLower(account + \":\" + cointype))).Hex()\n\tdb.Put([]byte(key),[]byte(ss))\n\tkey = Keccak256Hash([]byte(strings.ToLower(ctaddr))).Hex()\n\tdb.Put([]byte(key),[]byte(ss))\n\tdb.Close()\n\tlock.Unlock()*/\n } else {\n\tkd = KeyData{Key:ys,Data:ss}\n\tPubKeyDataChan <-kd\n\t/////\n\tLdbPubKeyData[string(ys)] = []byte(ss)\n\t////\n\n\tkey := Keccak256Hash([]byte(strings.ToLower(account + \":\" + cointype))).Hex()\n\tkd = KeyData{Key:[]byte(key),Data:ss}\n\tPubKeyDataChan <-kd\n\t/////\n\tLdbPubKeyData[key] = []byte(ss)\n\t////\n\n\tfor _, ct := range cryptocoins.Cointypes {\n\t if strings.EqualFold(ct, \"ALL\") {\n\t\tcontinue\n\t }\n\n\t h := cryptocoins.NewCryptocoinHandler(ct)\n\t if h == nil {\n\t\tcontinue\n\t }\n\t ctaddr, err := h.PublicKeyToAddress(pubkeyhex)\n\t if err != nil {\n\t\tcontinue\n\t }\n\t \n\t key = Keccak256Hash([]byte(strings.ToLower(ctaddr))).Hex()\n\t kd = KeyData{Key:[]byte(key),Data:ss}\n\t PubKeyDataChan <-kd\n\t /////\n\t LdbPubKeyData[key] = []byte(ss)\n\t ////\n\t}\n\t/*lock.Lock()\n\tdir := GetDbDir()\n\tdb,err := ethdb.NewLDBDatabase(dir, 0, 0)\n\tif err != nil { \n\t lock.Unlock()\n\t res := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:open level db fail\",Err:err}\n\t ch <- res\n\t return\n\t}\n\n\t//add for lockout\n\tdb.Put(ys,[]byte(ss))\n\tkey := Keccak256Hash([]byte(strings.ToLower(account + \":\" + cointype))).Hex()\n\tdb.Put([]byte(key),[]byte(ss))\n key = Keccak256Hash([]byte(strings.ToLower(pubkeyhex))).Hex()\n db.Put([]byte(key),[]byte(ss))\n\t\n\tfor _, ct := range cryptocoins.Cointypes {\n\t if strings.EqualFold(ct, \"ALL\") {\n\t\tcontinue\n\t }\n\n\t h := cryptocoins.NewCryptocoinHandler(ct)\n\t if h == nil {\n\t\tcontinue\n\t }\n\t ctaddr, err := h.PublicKeyToAddress(pubkeyhex)\n\t if err != nil {\n\t\tcontinue\n\t }\n\t \n\t key = Keccak256Hash([]byte(strings.ToLower(ctaddr))).Hex()\n\t db.Put([]byte(key),[]byte(ss))\n\t}\n\n\tdb.Close()\n\tlock.Unlock()\n\t*/\n }\n \n res := RpcDcrmRes{Ret:pubkeyhex,Tip:\"\",Err:nil}\n ch <- res\n}", "func NewWatcher(cfg *rest.Config, stopChan *chan struct{}) (*NetWatcher,error) {\n netWatcher := &NetWatcher{\n DanmFactories: make(map[string]danminformers.SharedInformerFactory),\n DanmClients: make(map[string]danmclientset.Interface),\n Controllers: make(map[string]cache.Controller),\n StopChan: stopChan,\n }\n //this is how we test if the specific API is used within the cluster, or not\n //we can only create an Informer for an existing API, otherwise we get errors\n dnetClient, err := danmclientset.NewForConfig(cfg)\n if err != nil {\n return nil, err\n }\n for i := 0; i < MaxRetryCount; i++ {\n log.Println(\"INFO: Trying to discover DanmNet API in the cluster...\")\n _, err = dnetClient.DanmV1().DanmNets(\"\").List(context.TODO(), meta_v1.ListOptions{})\n if err != nil {\n log.Println(\"INFO: DanmNet discovery query failed with error:\" + err.Error())\n time.Sleep(RetryInterval * time.Millisecond)\n } else {\n log.Println(\"INFO: DanmNet API seems to be installed in the cluster!\")\n netWatcher.createDnetInformer(dnetClient)\n break\n }\n }\n tnetClient, err := danmclientset.NewForConfig(cfg)\n if err != nil {\n return nil, err\n }\n for i := 0; i < MaxRetryCount; i++ {\n log.Println(\"INFO: Trying to discover TenantNetwork API in the cluster...\")\n _, err = tnetClient.DanmV1().TenantNetworks(\"\").List(context.TODO(), meta_v1.ListOptions{})\n if err != nil {\n log.Println(\"INFO: TenantNetwork discovery query failed with error:\" + err.Error())\n time.Sleep(RetryInterval * time.Millisecond)\n } else {\n log.Println(\"INFO: TenantNetwork API seems to be installed in the cluster!\")\n netWatcher.createTnetInformer(tnetClient)\n break\n }\n }\n cnetClient, err := danmclientset.NewForConfig(cfg)\n if err != nil {\n return nil, err\n }\n for i := 0; i < MaxRetryCount; i++ {\n log.Println(\"INFO: Trying to discover ClusterNetwork API in the cluster...\")\n _, err = cnetClient.DanmV1().ClusterNetworks().List(context.TODO(), meta_v1.ListOptions{})\n if err != nil {\n log.Println(\"INFO: ClusterNetwork discovery query failed with error:\" + err.Error())\n time.Sleep(RetryInterval * time.Millisecond)\n } else {\n log.Println(\"INFO: ClusterNetwork API seems to be installed in the cluster!\")\n netWatcher.createCnetInformer(cnetClient)\n break\n }\n }\n nadClient, err := nadclientset.NewForConfig(cfg)\n if err != nil {\n return nil, err\n }\n for i := 0; i < MaxRetryCount; i++ {\n log.Println(\"INFO: Trying to discover NetworkAttachmentDefinition API in the cluster...\")\n _, err = nadClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(\"\").List(context.TODO(), meta_v1.ListOptions{})\n if err != nil {\n log.Println(\"INFO: NetworkAttachmentDefinition discovery query failed with error:\" + err.Error())\n time.Sleep(RetryInterval * time.Millisecond)\n } else {\n log.Println(\"INFO: NetworkAttachmentDefinition API seems to be installed in the cluster!\")\n netWatcher.createNadInformer(nadClient)\n break\n }\n }\n if len(netWatcher.Controllers) == 0 {\n return nil, errors.New(\"no network management APIs are installed in the cluster, netwatcher cannot start!\")\n }\n log.Println(\"Number of watchers started for recognized APIs:\" + strconv.Itoa(len(netWatcher.Controllers)))\n return netWatcher, nil\n}", "func (gE *GameEventsFile) ChannelListener(client *http.Client) {\n\tfor inputData := range gE.DataInput {\n\t\tgE.rwg.Add(1)\n\t\tresp, err := client.Get(inputData)\n\t\tif err != nil {\n\t\t\tfmt.Println(err.Error())\n\t\t}\n\t\tgE.tokenize(inputData, resp)\n\t}\n\tgE.rwg.Wait()\n\n\t//Tell the pipeline we are done\n\tgE.wg.Done()\n}", "func ProcCurseAddon(addn *cav2.Addon, gv string /*, ml string*/) {\n\tdefer global.WG.Done()\n\tif _, ok := proced.LoadOrStore(addn.ID, true); !ok {\n\t\tfor _, adl := range addn.GameVersionLatestFiles {\n\t\t\tif adl.GameVersion == gv {\n\t\t\t\turl, _ := cav2.GetAddonFile(addn.ID, adl.ProjectFileID)\n\t\t\t\tif utils.Contains(url.GameVersion, gv) {\n\t\t\t\t\tglobal.WG.Add(1)\n\t\t\t\t\tgo func() {\n\t\t\t\t\t\tdefer global.WG.Done()\n\t\t\t\t\t\tfor _, depend := range url.Dependencies {\n\t\t\t\t\t\t\tif depend.Type == 3 {\n\t\t\t\t\t\t\t\tdep, _ := cav2.GetAddon(strconv.Itoa(depend.AddonID))\n\t\t\t\t\t\t\t\tglobal.WG.Add(1)\n\t\t\t\t\t\t\t\tgo ProcCurseAddon(dep, gv /*, ml*/)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}()\n\t\t\t\t\tglobal.WG.Add(1)\n\t\t\t\t\tgo DownloadCurseFile(url, addn.Name)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func (fM *FeslManager) NuGetPersonas(event GameSpy.EventClientTLSCommand) {\n\tif !event.Client.IsActive {\n\t\tlog.Noteln(\"Client left\")\n\t\treturn\n\t}\n\n\tif event.Client.RedisState.Get(\"clientType\") == \"server\" {\n\t\tfM.NuGetPersonasServer(event)\n\t\treturn\n\t}\n\n\trows, err := fM.stmtGetHeroesByUserID.Query(event.Client.RedisState.Get(\"uID\"))\n\tif err != nil {\n\t\treturn\n\t}\n\n\tpersonaPacket := make(map[string]string)\n\tpersonaPacket[\"TXN\"] = \"NuGetPersonas\"\n\n\tvar i = 0\n\tfor rows.Next() {\n\t\tvar id, userID, heroName, online string\n\t\terr := rows.Scan(&id, &userID, &heroName, &online)\n\t\tif err != nil {\n\t\t\tlog.Errorln(err)\n\t\t\treturn\n\t\t}\n\t\tpersonaPacket[\"personas.\"+strconv.Itoa(i)] = heroName\n\t\tevent.Client.RedisState.Set(\"ownerId.\"+strconv.Itoa(i+1), id)\n\t\ti++\n\t}\n\n\tevent.Client.RedisState.Set(\"numOfHeroes\", strconv.Itoa(i))\n\n\tpersonaPacket[\"personas.[]\"] = strconv.Itoa(i)\n\n\tevent.Client.WriteFESL(event.Command.Query, personaPacket, event.Command.PayloadID)\n\tfM.logAnswer(event.Command.Query, personaPacket, event.Command.PayloadID)\n}", "func workWithClient(conn net.Conn, db *sql.DB) {\n defer conn.Close()\n fmt.Println(\"connected\")\n newMsgCh := make(chan message, CHAN_NEW_MESSAGE_SIZE)\n\n LISTEN_LOOP:\n for {\n select {\n /// read command from user\n case req := <-recvDataCmd(conn):\n sErr := executeCommand(conn, db, req.data, req.err, &newMsgCh)\n if sErr.Err != nil {\n if sErr.Err == io.EOF {\n break LISTEN_LOOP\n }\n sendError(conn, sErr)\n }\n case msg := <- newMsgCh:\n sendData(conn, msg.ToStr(), OK_CODE)\n } // end select\n } // end for\n}", "func handleNode(client *net.Conn, clientid int, nodeChan []chan myParameters) {\n\treader := bufio.NewReader(*client)\n\tfor {\n\t\tmsg, er := reader.ReadString('\\n')\n\t\tif er != nil {\n\t\t\tcontinue\n\t\t}\n\t\tif msg[1] == 'm' && msg[2] == 'a' && msg[3] == 'p' {\n\t\t\t//Receive computer board from nodes\n\t\t\tp := stringToMatrix(msg)\n\t\t\tnodeChan[clientid] <- p\n\t\t}\n\t}\n}", "func (p *EventEmitter) EventPlus(clientChan chan interface{}, namaEvents ...string) {\n\n\tvar updateChanList []chan interface{}\n\tfor _, namaEvent := range namaEvents {\n\t\tupdateChanList, _ = p.mapClients[namaEvent]\n\t\tupdateChanList = append(updateChanList, clientChan)\n\t\tp.mapClients[namaEvent] = updateChanList\n\t}\n\tp.mapEvents[clientChan] = namaEvents\n\n}", "func (client *Client) QueryCustomerAddressListWithCallback(request *QueryCustomerAddressListRequest, callback func(response *QueryCustomerAddressListResponse, err error)) (<-chan int) {\nresult := make(chan int, 1)\nerr := client.AddAsyncTask(func() {\nvar response *QueryCustomerAddressListResponse\nvar err error\ndefer close(result)\nresponse, err = client.QueryCustomerAddressList(request)\ncallback(response, err)\nresult <- 1\n})\nif err != nil {\ndefer close(result)\ncallback(nil, err)\nresult <- 0\n}\nreturn result\n}", "func (client *Client) ModifyDasInstanceConfigWithChan(request *ModifyDasInstanceConfigRequest) (<-chan *ModifyDasInstanceConfigResponse, <-chan error) {\n\tresponseChan := make(chan *ModifyDasInstanceConfigResponse, 1)\n\terrChan := make(chan error, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tdefer close(responseChan)\n\t\tdefer close(errChan)\n\t\tresponse, err := client.ModifyDasInstanceConfig(request)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t} else {\n\t\t\tresponseChan <- response\n\t\t}\n\t})\n\tif err != nil {\n\t\terrChan <- err\n\t\tclose(responseChan)\n\t\tclose(errChan)\n\t}\n\treturn responseChan, errChan\n}", "func (g *getter) worker() {\n for c := range g.get_ch { // HL\n g.retryGetChunk(c)\n }\n\n}", "func (client *Client) FindInstanceNodeListWithChan(request *FindInstanceNodeListRequest) (<-chan *FindInstanceNodeListResponse, <-chan error) {\n\tresponseChan := make(chan *FindInstanceNodeListResponse, 1)\n\terrChan := make(chan error, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tdefer close(responseChan)\n\t\tdefer close(errChan)\n\t\tresponse, err := client.FindInstanceNodeList(request)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t} else {\n\t\t\tresponseChan <- response\n\t\t}\n\t})\n\tif err != nil {\n\t\terrChan <- err\n\t\tclose(responseChan)\n\t\tclose(errChan)\n\t}\n\treturn responseChan, errChan\n}", "func (client *Client) ModifyClusterServiceConfigForAdminWithCallback(request *ModifyClusterServiceConfigForAdminRequest, callback func(response *ModifyClusterServiceConfigForAdminResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *ModifyClusterServiceConfigForAdminResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.ModifyClusterServiceConfigForAdmin(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func (node *VincTriggerNode) WaitForEvent(nodeEventStream chan model.ReactorEvent) {\n\tnode.SetReactorRunning(true)\n\ttimeout := time.Second * time.Duration(node.config.Timeout)\n\tvar timer *time.Timer\n\tif timeout == 0 {\n\t\ttimer = time.NewTimer(time.Hour * 24)\n\t\ttimer.Stop()\n\t}else {\n\t\ttimer = time.NewTimer(timeout)\n\t}\n\tdefer func() {\n\t\tnode.SetReactorRunning(false)\n\t\tnode.GetLog().Debug(\"Msg processed by the node \")\n\t\ttimer.Stop()\n\t}()\n\tfor {\n\t\tif timeout > 0 {\n\t\t\ttimer.Reset(timeout)\n\t\t}\n\t\tselect {\n\t\tcase newMsg := <-node.msgInStream:\n\t\t\tvar eventValue string\n\t\t\tif newMsg.Payload.Type == \"cmd.pd7.request\" {\n\t\t\t\trequest := primefimp.Request{}\n\t\t\t\terr := newMsg.Payload.GetObjectValue(&request)\n\t\t\t\tif err != nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif request.Component == \"shortcut\" && request.Cmd == \"set\" {\n\t\t\t\t\tnode.GetLog().Info(\"shortcut\")\n\t\t\t\t\tif node.config.EventType == \"shortcut\" {\n\t\t\t\t\t\teventValue = fmt.Sprintf(\"%.0f\",request.Id)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}else if newMsg.Payload.Type == \"evt.pd7.notify\" {\n\t\t\t\tnotify := primefimp.Notify{}\n\t\t\t\terr := newMsg.Payload.GetObjectValue(&notify)\n\t\t\t\tif err != nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif notify.Component == \"hub\" && notify.Cmd == \"set\" {\n\t\t\t\t\tif node.config.EventType == \"mode\" {\n\t\t\t\t\t\thub := notify.GetModeChange()\n\t\t\t\t\t\tif hub != nil {\n\t\t\t\t\t\t\teventValue = hub.Current\n\t\t\t\t\t\t}else {\n\t\t\t\t\t\t\tnode.GetLog().Info(\"ERROR 2\")\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif eventValue != \"\" {\n\t\t\t\tnode.GetLog().Infof(\"Home event = %s\",eventValue)\n\t\t\t\tif !node.config.IsValueFilterEnabled || ((eventValue == node.config.ValueFilter) && node.config.IsValueFilterEnabled) {\n\t\t\t\t\tnode.GetLog().Debug(\"Starting flow\")\n\t\t\t\t\trMsg := model.Message{Payload: fimpgo.FimpMessage{Value: eventValue, ValueType: fimpgo.VTypeString}}\n\t\t\t\t\tnewEvent := model.ReactorEvent{Msg: rMsg, TransitionNodeId: node.Meta().SuccessTransition}\n\t\t\t\t\t// Flow is executed within flow runner goroutine\n\t\t\t\t\tnode.FlowRunner()(newEvent)\n\t\t\t\t}\n\t\t\t}\n\n\t\tcase <-timer.C:\n\t\t\tnode.GetLog().Debug(\"Timeout \")\n\t\t\tnewEvent := model.ReactorEvent{TransitionNodeId: node.Meta().TimeoutTransition}\n\t\t\tnode.GetLog().Debug(\"Starting new flow (timeout)\")\n\t\t\tnode.FlowRunner()(newEvent)\n\t\t\tnode.GetLog().Debug(\"Flow started (timeout) \")\n\t\tcase signal := <-node.FlowOpCtx().TriggerControlSignalChannel:\n\t\t\tnode.GetLog().Debug(\"Control signal \")\n\t\t\tif signal == model.SIGNAL_STOP {\n\t\t\t\tnode.GetLog().Info(\"VincTrigger stopped by SIGNAL_STOP \")\n\t\t\t\treturn\n\t\t\t}else {\n\t\t\t\ttime.Sleep(50*time.Millisecond)\n\t\t\t}\n\t\t}\n\n\t}\n}", "func main() {\n\n\t//get NATS server host\n\tvar urls = flag.String(\"s\", nats.DefaultURL, \"The nats server URLs (separated by comma)\")\n\tflag.Usage = usage\n\tflag.Parse()\n\tlog.Printf(\"urls : '%v'\\n\", urls)\n\tvar hosts = strings.Split(*urls, \",\")\n\tlog.Printf(\"hosts : '%v'\\n\", hosts)\n\n\t//init service and broker\n\tconfig := &moleculer.ServiceBrokerConfig{\n\t\tNatsHost: hosts,\n\t\tHostname: \"testHostname\",\n\t\tNodeID: \"moleculer-go-demo\",\n\t\t// LogLevel: moleculer.DebugLevel,\n\t\tLogLevel: moleculer.ErrorLevel,\n\t\tServices: make(map[string]moleculer.Service),\n\t}\n\tconfig.Services[\"demoService\"] = createDemoService()\n\tbroker, err := moleculer.NewServiceBroker(config)\n\tif err != nil {\n\t\tlog.Fatalf(\"NewServiceBroker err: %v\\n\", err)\n\t}\n\tpBroker = broker\n\tbroker.Start()\n\n\t//test call and emit\n\tgo time.AfterFunc(time.Second*1, func() {\n\t\tlog.Info(\"broker.Call demoService.actionA start\")\n\t\tres, err := broker.Call(\"demoService.actionA\", map[string]interface{}{\n\t\t\t\"arg1\": \"aaa\",\n\t\t\t\"arg2\": 123,\n\t\t}, nil)\n\t\tlog.Info(\"broker.Call demoService.actionA end, res: \", res)\n\t\tlog.Info(\"broker.Call demoService.actionA end, err: \", err)\n\n\t\tlog.Info(\"broker.Call demoService.actionB start\")\n\t\tres, err = broker.Call(\"demoService.actionB\", map[string]interface{}{\n\t\t\t\"arg1\": \"bbb\",\n\t\t\t\"arg2\": 456,\n\t\t}, nil)\n\t\tlog.Info(\"broker.Call demoService.actionB end, res: \", res)\n\t\tlog.Info(\"broker.Call demoService.actionB end, err: \", err)\n\n\t\tlog.Info(\"broker.Emit user.create start\")\n\t\terr = broker.Emit(\"user.create\", map[string]interface{}{\n\t\t\t\"user\": \"userA\",\n\t\t\t\"status\": \"create\",\n\t\t})\n\t\tlog.Info(\"broker.Emit user.create end, err: \", err)\n\n\t\tlog.Info(\"broker.Broadcast user.delete start\")\n\t\terr = broker.Broadcast(\"user.delete\", map[string]interface{}{\n\t\t\t\"user\": \"userB\",\n\t\t\t\"status\": \"delete\",\n\t\t})\n\t\tlog.Info(\"broker.Broadcast user.delete end, err: \", err)\n\n\t})\n\n\twaitExit()\n\n}", "func (gi *Invoker) StreamInvoke(ctx context.Context, param *common.Params, rspChanType reflect.Type) (reflect.Value, *common.Address, uint64, error) {\n\t//gloryPkg := newGloryRequestPackage(\"\", param.MethodName, uint64(common.StreamRequestPkg), param.Seq)\n\t//gloryPkg.Params = param.Ins\n\t//// only one rspChannel for once invoke\n\t//rspChannel := make(chan interface{})\n\t//gi.pendingMap.Store(param.Seq, rspChannel)\n\t//rspChan := reflect.MakeChan(rspChanType, 0)\n\t//if err := gloryPkg.sendToConn(gi.gloryConnClient, gi.handler); err != nil {\n\t//\tlog.Error(\"StreamInvoke:gloryPkg.sendToConn(gi.conn, gi.handler) err =\", err)\n\t//\treturn rspChan, nil, 0, GloryErrorConnErr\n\t//}\n\t//timeoutCaller := time.After(time.Millisecond * time.Duration(gi.timeout))\n\t//for {\n\t//\tvar rspRawPkg interface{}\n\t//\tselect {\n\t//\tcase <-timeoutCaller:\n\t//\t\tlog.Error(\"stream invoke timeout\")\n\t//\t\tclose(rspChannel)\n\t//\t\tgi.pendingMap.Delete(param.Seq)\n\t//\t\treturn rspChan, nil, 0, GloryErrorConnErr\n\t//\tcase rspRawPkg = <-rspChannel: // wait until receive StreamReady Pkg:\n\t//\t}\n\t//\trspPkg, ok := rspRawPkg.(*ResponsePackage)\n\t//\tif !ok {\n\t//\t\tlog.Error(\"StreamInvoke:rspRawPkg assert not *ResponsePackage err\")\n\t//\t\treturn rspChan, nil, 0, GloryErrorProtocol\n\t//\t}\n\t//\tif rspPkg.Error.Code != GloryErrorNoErr.Code { // stream rpc invoke not success\n\t//\t\tgi.pendingMap.Delete(param.Seq)\n\t//\t\tclose(rspChannel)\n\t//\t\treturn rspChan, nil, 0, rspPkg.Error\n\t//\t}\n\t//\tif common.PkgType(rspPkg.Header.PkgType) == common.StreamReadyPkg {\n\t//\t\tbreak\n\t//\t}\n\t//}\n\t//\n\t//go func() {\n\t//\tfor {\n\t//\t\trspPkg := (<-rspChannel).(*ResponsePackage)\n\t//\t\tif common.PkgType(rspPkg.Header.PkgType) == common.StreamRecvPkg {\n\t//\t\t\trspChan.Send(reflect.ValueOf(rspPkg.Result[0]).Elem())\n\t//\t\t}\n\t//\t}\n\t//}()\n\t// todo: now StreamInvoke never return error from server\n\treturn reflect.Value{}, gi.addr, param.Seq, nil\n}", "func (evpool *EvidencePool) EvidenceChan() <-chan ttypes.Evidence {\n\treturn evpool.evidenceChan\n}", "func EasyQuery(ch chan RCONQuery) func(string) []byte {\n\treturn func(cmd string) []byte {\n\t\tres := make(chan []byte)\n\t\tch <- RCONQuery{Command: cmd, Response: res}\n\n\t\treturn <-res\n\t}\n}", "func (client *Client) AssociateAclsWithListenerWithChan(request *AssociateAclsWithListenerRequest) (<-chan *AssociateAclsWithListenerResponse, <-chan error) {\n\tresponseChan := make(chan *AssociateAclsWithListenerResponse, 1)\n\terrChan := make(chan error, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tdefer close(responseChan)\n\t\tdefer close(errChan)\n\t\tresponse, err := client.AssociateAclsWithListener(request)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t} else {\n\t\t\tresponseChan <- response\n\t\t}\n\t})\n\tif err != nil {\n\t\terrChan <- err\n\t\tclose(responseChan)\n\t\tclose(errChan)\n\t}\n\treturn responseChan, errChan\n}", "func DevsHandler(res chan<- RpcRequest, minerInfo *MinerInformation, c *Client, wg *sync.WaitGroup) {\n\t//Signal that the thread is started\n\twg.Done()\n\n\t//Now do this forever and ever!\n\tfor {\n\t\t//If it return false it has failed to connect\n\t\t//So wait abit more before next time\n\t\tif UpdateDevs(c.Name, true) == false {\n\t\t\tlog.Println(\"Failed to fetch new data from: \" + c.Name)\n\t\t\t//No response so wait somee extra before try again\n\t\t\ttime.Sleep(time.Duration(c.RefreshInterval*2) * time.Second)\n\t\t}\n\n\t\t//Now sleep\n\t\ttime.Sleep(time.Duration(c.RefreshInterval) * time.Second)\n\t}\n}", "func (client *Client) RecognizeFlowerWithChan(request *RecognizeFlowerRequest) (<-chan *RecognizeFlowerResponse, <-chan error) {\n\tresponseChan := make(chan *RecognizeFlowerResponse, 1)\n\terrChan := make(chan error, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tdefer close(responseChan)\n\t\tdefer close(errChan)\n\t\tresponse, err := client.RecognizeFlower(request)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t} else {\n\t\t\tresponseChan <- response\n\t\t}\n\t})\n\tif err != nil {\n\t\terrChan <- err\n\t\tclose(responseChan)\n\t\tclose(errChan)\n\t}\n\treturn responseChan, errChan\n}", "func rpcClientConnectLoop(legacyRPCServer *legacyrpc.Server, loader *wallet.Loader) {\n\tvar certs []byte\n\t// if !cfg.UseSPV {\n\tcerts = readCAFile()\n\t// }\n\tfor {\n\t\tvar (\n\t\t\tchainClient chain.Interface\n\t\t\terr error\n\t\t)\n\t\t// if cfg.UseSPV {\n\t\t// \tvar (\n\t\t// \t\tchainService *neutrino.ChainService\n\t\t// \t\tspvdb walletdb.DB\n\t\t// \t)\n\t\t// \tnetDir := networkDir(cfg.AppDataDir.Value, ActiveNet.Params)\n\t\t// \tspvdb, err = walletdb.Create(\"bdb\",\n\t\t// \t\tfilepath.Join(netDir, \"neutrino.db\"))\n\t\t// \tdefer spvdb.Close()\n\t\t// \tif err != nil {\n\t\t// \t\tlog<-cl.Errorf{\"unable to create Neutrino DB: %s\", err)\n\t\t// \t\tcontinue\n\t\t// \t}\n\t\t// \tchainService, err = neutrino.NewChainService(\n\t\t// \t\tneutrino.Config{\n\t\t// \t\t\tDataDir: netDir,\n\t\t// \t\t\tDatabase: spvdb,\n\t\t// \t\t\tChainParams: *ActiveNet.Params,\n\t\t// \t\t\tConnectPeers: cfg.ConnectPeers,\n\t\t// \t\t\tAddPeers: cfg.AddPeers,\n\t\t// \t\t})\n\t\t// \tif err != nil {\n\t\t// \t\tlog<-cl.Errorf{\"couldn't create Neutrino ChainService: %s\", err)\n\t\t// \t\tcontinue\n\t\t// \t}\n\t\t// \tchainClient = chain.NewNeutrinoClient(ActiveNet.Params, chainService)\n\t\t// \terr = chainClient.Start()\n\t\t// \tif err != nil {\n\t\t// \t\tlog<-cl.Errorf{\"couldn't start Neutrino client: %s\", err)\n\t\t// \t}\n\t\t// } else {\n\t\tchainClient, err = startChainRPC(certs)\n\t\tif err != nil {\n\t\t\tlog <- cl.Error{\n\t\t\t\t\"unable to open connection to consensus RPC server:\", err}\n\t\t\tcontinue\n\t\t}\n\t\t// }\n\t\t// Rather than inlining this logic directly into the loader\n\t\t// callback, a function variable is used to avoid running any of\n\t\t// this after the client disconnects by setting it to nil. This\n\t\t// prevents the callback from associating a wallet loaded at a\n\t\t// later time with a client that has already disconnected. A\n\t\t// mutex is used to make this concurrent safe.\n\t\tassociateRPCClient := func(w *wallet.Wallet) {\n\t\t\tw.SynchronizeRPC(chainClient)\n\t\t\tif legacyRPCServer != nil {\n\t\t\t\tlegacyRPCServer.SetChainServer(chainClient)\n\t\t\t}\n\t\t}\n\t\tmu := new(sync.Mutex)\n\t\tloader.RunAfterLoad(func(w *wallet.Wallet) {\n\t\t\tmu.Lock()\n\t\t\tassociate := associateRPCClient\n\t\t\tmu.Unlock()\n\t\t\tif associate != nil {\n\t\t\t\tassociate(w)\n\t\t\t}\n\t\t})\n\t\tchainClient.WaitForShutdown()\n\t\tmu.Lock()\n\t\tassociateRPCClient = nil\n\t\tmu.Unlock()\n\t\tloadedWallet, ok := loader.LoadedWallet()\n\t\tif ok {\n\t\t\t// Do not attempt a reconnect when the wallet was explicitly stopped.\n\t\t\tif loadedWallet.ShuttingDown() {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tloadedWallet.SetChainSynced(false)\n\t\t\t// TODO: Rework the wallet so changing the RPC client does not require stopping and restarting everything.\n\t\t\tloadedWallet.Stop()\n\t\t\tloadedWallet.WaitForShutdown()\n\t\t\tloadedWallet.Start()\n\t\t}\n\t}\n}", "func (tm *ServiceTracerouteManager) SetOutChan(outchan chan string) {\n\ttm.OutChan = outchan\n}", "func videosHandler(videoChan chan ytvideo.YTVideo, tPoolNum chan int) {\n\tvideo := <-videoChan\n\t<-tPoolNum // get a turn in the pool\n\tdefer consumeThread(tPoolNum) // to give turn to other threads\n\tif debugOutput {\n\t\tfmt.Println(video.Id)\n\t}\n\tytdl := youtube_dl.YoutubeDl{}\n\tytdl.Path = \"$GOPATH/src/app/srts\"\n\terr := ytdl.DownloadVideo(video.Id)\n\tif err != nil {\n\t\tlog.Printf(\"%v\", err)\n\t}\n\t//if debugOutput {\n\t//\tlog.Printf(\"command : %v\", command)\n\t//}\n\tfmt.Print(\".\");\n\tStoreValue(video)\n\tgetVideoSuggestions(video.Id, videoChan, \"12\", tPoolNum)// 12 is a random token that works as initial value\n}", "func AppendCaller() {\n\tfor {\n\t\tlogentry := <-raft.Append_ch\n\t\traft.C1 <- 1\n\t\tvar no int\n\t\tno = 0;\n\t\tfor i:=0; i<len(r.clusterConfig.Servers); i++ {\t\t\t\n\t\t\t\tif i == r.id { continue }\t\t\t\n\t\t\t\targs := &AppendRPCArgs{logentry,r.id}\n\t\t\t\tvar reply string\n\t\t\t\trr := make(chan error, 1)\n\t\t\t\tgo func() { rr <- r.clusterConfig.Servers[i].Client.Call(\"RPC.AppendRPC\", args, &reply) } ()\n\t\t\t\tselect{\n\t\t\t\t\tcase err := <-rr:\n\t\t\t\t\t\tno++;\n\t\t\t\t\t\tif err != nil {\t\n\t\t\t\t\t\t\tlog.Println(\"[Server] AppendRPC Error:\", err)\n\t\t\t\t\t\t}\n\t\t\t\t\tcase <-time.After(1000*time.Millisecond):\n\t\t\t\t\t\tlog.Println(\"AppendRPC time out for: \",i)\n\t\t\t\t\t\tcontinue //log.Println(\"Heartbeat reply not got \",i)\n\t\t\t\t}// inner select loop\n\t\t\t}//end of inner for\n\t\t\traft.No_Append <- no\n\t\t}//end of outer for\n}" ]
[ "0.7701703", "0.64137083", "0.58868027", "0.55262583", "0.5286783", "0.52103513", "0.5023557", "0.501735", "0.4995522", "0.4890827", "0.48545292", "0.4846518", "0.48191816", "0.4794705", "0.47043338", "0.4639148", "0.46159783", "0.46019688", "0.45946375", "0.45439455", "0.45154098", "0.45147902", "0.4499341", "0.44839835", "0.4465964", "0.4449987", "0.4424959", "0.44134998", "0.44122797", "0.44101918", "0.43930364", "0.43692708", "0.43632293", "0.43575293", "0.43535292", "0.43425503", "0.4341048", "0.43396592", "0.43346214", "0.43344074", "0.4319067", "0.43140203", "0.43136322", "0.43082237", "0.42896935", "0.42866895", "0.42806274", "0.42737135", "0.42645615", "0.42548385", "0.4251252", "0.4242177", "0.4237802", "0.42351523", "0.4225462", "0.42089573", "0.42065004", "0.4196185", "0.4189117", "0.4187478", "0.4181439", "0.41791084", "0.41773245", "0.41770235", "0.4171287", "0.41670215", "0.4162252", "0.41620317", "0.41582778", "0.4143975", "0.4142229", "0.4124548", "0.41234985", "0.4122009", "0.41218933", "0.4116304", "0.41085124", "0.4101968", "0.41015795", "0.4098282", "0.4090793", "0.40897083", "0.4083791", "0.40808198", "0.40786028", "0.40574646", "0.40542024", "0.40493903", "0.40473622", "0.40409577", "0.4033454", "0.4029481", "0.40292484", "0.40281755", "0.4025334", "0.40222764", "0.40138867", "0.40107232", "0.40098777", "0.40090197" ]
0.8141107
0
GetNerCustomizedSeaEcomWithCallback invokes the alinlp.GetNerCustomizedSeaEcom API asynchronously
func (client *Client) GetNerCustomizedSeaEcomWithCallback(request *GetNerCustomizedSeaEcomRequest, callback func(response *GetNerCustomizedSeaEcomResponse, err error)) <-chan int { result := make(chan int, 1) err := client.AddAsyncTask(func() { var response *GetNerCustomizedSeaEcomResponse var err error defer close(result) response, err = client.GetNerCustomizedSeaEcom(request) callback(response, err) result <- 1 }) if err != nil { defer close(result) callback(nil, err) result <- 0 } return result }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (client *Client) GetNerCustomizedSeaEcomWithChan(request *GetNerCustomizedSeaEcomRequest) (<-chan *GetNerCustomizedSeaEcomResponse, <-chan error) {\n\tresponseChan := make(chan *GetNerCustomizedSeaEcomResponse, 1)\n\terrChan := make(chan error, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tdefer close(responseChan)\n\t\tdefer close(errChan)\n\t\tresponse, err := client.GetNerCustomizedSeaEcom(request)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t} else {\n\t\t\tresponseChan <- response\n\t\t}\n\t})\n\tif err != nil {\n\t\terrChan <- err\n\t\tclose(responseChan)\n\t\tclose(errChan)\n\t}\n\treturn responseChan, errChan\n}", "func (client *Client) GetNerCustomizedSeaEcom(request *GetNerCustomizedSeaEcomRequest) (response *GetNerCustomizedSeaEcomResponse, err error) {\n\tresponse = CreateGetNerCustomizedSeaEcomResponse()\n\terr = client.DoAction(request, response)\n\treturn\n}", "func (client *Client) GetWsCustomizedChEcomContentWithCallback(request *GetWsCustomizedChEcomContentRequest, callback func(response *GetWsCustomizedChEcomContentResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *GetWsCustomizedChEcomContentResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.GetWsCustomizedChEcomContent(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func CreateGetNerCustomizedSeaEcomResponse() (response *GetNerCustomizedSeaEcomResponse) {\n\tresponse = &GetNerCustomizedSeaEcomResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func CreateGetNerCustomizedSeaEcomRequest() (request *GetNerCustomizedSeaEcomRequest) {\n\trequest = &GetNerCustomizedSeaEcomRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"alinlp\", \"2020-06-29\", \"GetNerCustomizedSeaEcom\", \"alinlp\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (client *Client) OemSitingSelctionWithCallback(request *OemSitingSelctionRequest, callback func(response *OemSitingSelctionResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *OemSitingSelctionResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.OemSitingSelction(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func Callback(c *gin.Context) {\n\tprovider := c.Param(\"provider\")\n\n\tvar logincode vo.LoginReq\n\tif err := c.ShouldBindQuery(&logincode); err != nil {\n\t\tfmt.Println(\"xxxx\", err)\n\t}\n\n\tfmt.Println(\"provider\", provider, logincode)\n\n\tuserInfo := vo.GetUserInfoFromOauth(provider, logincode.Code, logincode.State)\n\tfmt.Println(\"get user info\", userInfo)\n\n\tif userInfo == nil {\n\t\tc.JSON(http.StatusOK, sailor.HTTPAirdbResponse{\n\t\t\tCode: enum.AirdbSuccess,\n\t\t\tSuccess: true,\n\t\t\tData: vo.LoginResp{\n\t\t\t\tNickname: \"xxx\",\n\t\t\t\tHeadimgurl: \"xxx.png\",\n\t\t\t},\n\t\t})\n\n\t\treturn\n\t}\n\n\tc.JSON(http.StatusOK, sailor.HTTPAirdbResponse{\n\t\tCode: enum.AirdbSuccess,\n\t\tSuccess: true,\n\t\tData: vo.LoginResp{\n\t\t\tNickname: userInfo.Login,\n\t\t\tHeadimgurl: userInfo.AvatarURL,\n\t\t},\n\t})\n}", "func (client *Client) GetWsCustomizedChO2OWithCallback(request *GetWsCustomizedChO2ORequest, callback func(response *GetWsCustomizedChO2OResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *GetWsCustomizedChO2OResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.GetWsCustomizedChO2O(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func (client *Client) GetIndustryCommerceInfoWithCallback(request *GetIndustryCommerceInfoRequest, callback func(response *GetIndustryCommerceInfoResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *GetIndustryCommerceInfoResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.GetIndustryCommerceInfo(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func (client *Client) GetKeywordChEcomWithCallback(request *GetKeywordChEcomRequest, callback func(response *GetKeywordChEcomResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *GetKeywordChEcomResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.GetKeywordChEcom(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func (client *Client) QueryCustomerAddressListWithCallback(request *QueryCustomerAddressListRequest, callback func(response *QueryCustomerAddressListResponse, err error)) (<-chan int) {\nresult := make(chan int, 1)\nerr := client.AddAsyncTask(func() {\nvar response *QueryCustomerAddressListResponse\nvar err error\ndefer close(result)\nresponse, err = client.QueryCustomerAddressList(request)\ncallback(response, err)\nresult <- 1\n})\nif err != nil {\ndefer close(result)\ncallback(nil, err)\nresult <- 0\n}\nreturn result\n}", "func (client *Client) GetEMapWithCallback(request *GetEMapRequest, callback func(response *GetEMapResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *GetEMapResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.GetEMap(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func (client *Client) CoreEngineWithCallback(request *CoreEngineRequest, callback func(response *CoreEngineResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *CoreEngineResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.CoreEngine(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func (t *SelfTester) SetOnNewPoliciesReadyCb(cb func()) {\n}", "func InvokeCallback(ctx *context.T, name string) {\n\tconfig, err := exec.ReadConfigFromOSEnv()\n\tif err != nil || config == nil {\n\t\treturn\n\t}\n\t// Device manager was started by self-update, notify the parent.\n\tcallbackName, err := config.Get(mgmt.ParentNameConfigKey)\n\tif err != nil {\n\t\t// Device manager was not started by self-update, return silently.\n\t\treturn\n\t}\n\tclient := device.ConfigClient(callbackName)\n\tctx, cancel := context.WithTimeout(ctx, rpcContextTimeout)\n\tdefer cancel()\n\tif err := client.Set(ctx, mgmt.ChildNameConfigKey, name); err != nil {\n\t\tctx.Fatalf(\"Set(%v, %v) failed: %v\", mgmt.ChildNameConfigKey, name, err)\n\t}\n}", "func (client *Client) GetWsCustomizedChEcomContentWithChan(request *GetWsCustomizedChEcomContentRequest) (<-chan *GetWsCustomizedChEcomContentResponse, <-chan error) {\n\tresponseChan := make(chan *GetWsCustomizedChEcomContentResponse, 1)\n\terrChan := make(chan error, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tdefer close(responseChan)\n\t\tdefer close(errChan)\n\t\tresponse, err := client.GetWsCustomizedChEcomContent(request)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t} else {\n\t\t\tresponseChan <- response\n\t\t}\n\t})\n\tif err != nil {\n\t\terrChan <- err\n\t\tclose(responseChan)\n\t\tclose(errChan)\n\t}\n\treturn responseChan, errChan\n}", "func (client *Client) VerifyCenWithCallback(request *VerifyCenRequest, callback func(response *VerifyCenResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *VerifyCenResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.VerifyCen(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func (client *Client) BeginVnDialogueWithCallback(request *BeginVnDialogueRequest, callback func(response *BeginVnDialogueResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *BeginVnDialogueResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.BeginVnDialogue(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func (client *Client) CreateCustomCallTaggingWithCallback(request *CreateCustomCallTaggingRequest, callback func(response *CreateCustomCallTaggingResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *CreateCustomCallTaggingResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.CreateCustomCallTagging(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func (client *Client) ModifyClusterServiceConfigForAdminWithCallback(request *ModifyClusterServiceConfigForAdminRequest, callback func(response *ModifyClusterServiceConfigForAdminResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *ModifyClusterServiceConfigForAdminResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.ModifyClusterServiceConfigForAdmin(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func (client *Client) GetOpenNLUWithCallback(request *GetOpenNLURequest, callback func(response *GetOpenNLUResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *GetOpenNLUResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.GetOpenNLU(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func cognitoCallback(ctx context.Context, d *aegis.HandlerDependencies, req *aegis.APIGatewayProxyRequest, res *aegis.APIGatewayProxyResponse, params url.Values) error {\n\t// Exchange code for token\n\ttokens, err := d.Services.Cognito.GetTokens(req.QueryStringParameters[\"code\"], []string{})\n\t// Depending on Cognito configuration, there could be an error here.\n\t// This service is for an OAuth2 with an authorization code flow.\n\t// NOTE: tokens.AccessToken is generally used.\n\t// If using an openid grant, you may also use tokens.IDToken with ParseAndVerifyJWT() below.\n\tif tokens.Error != \"\" {\n\t\terr = errors.New(tokens.Error)\n\t}\n\tif err != nil {\n\t\tlog.Println(\"Couldn't get access token\", err)\n\t\tres.JSONError(500, err)\n\t} else {\n\t\t// verify the token\n\t\t_, err := d.Services.Cognito.ParseAndVerifyJWT(tokens.AccessToken)\n\t\tif err == nil {\n\t\t\thost := req.GetHeader(\"Host\")\n\t\t\tstage := req.RequestContext.Stage\n\t\t\tres.SetHeader(\"Set-Cookie\", \"access_token=\"+tokens.AccessToken+\"; Domain=\"+host+\"; Secure; HttpOnly\")\n\t\t\tres.Redirect(301, \"https://\"+host+\"/\"+stage+\"/protected\")\n\t\t} else {\n\t\t\tres.JSONError(401, errors.New(\"unauthorized, invalid token\"))\n\t\t}\n\t}\n\treturn nil\n}", "func (client *Client) GetOfficePreviewURLWithCallback(request *GetOfficePreviewURLRequest, callback func(response *GetOfficePreviewURLResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *GetOfficePreviewURLResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.GetOfficePreviewURL(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func (client *Client) ReleaseEipSegmentAddressWithCallback(request *ReleaseEipSegmentAddressRequest, callback func(response *ReleaseEipSegmentAddressResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *ReleaseEipSegmentAddressResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.ReleaseEipSegmentAddress(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func (client *Client) CreateBoxCodeWithCallback(request *CreateBoxCodeRequest, callback func(response *CreateBoxCodeResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *CreateBoxCodeResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.CreateBoxCode(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func (client *Client) ServiceStatusWithCallback(request *ServiceStatusRequest, callback func(response *ServiceStatusResponse, err error)) (<-chan int) {\nresult := make(chan int, 1)\nerr := client.AddAsyncTask(func() {\nvar response *ServiceStatusResponse\nvar err error\ndefer close(result)\nresponse, err = client.ServiceStatus(request)\ncallback(response, err)\nresult <- 1\n})\nif err != nil {\ndefer close(result)\ncallback(nil, err)\nresult <- 0\n}\nreturn result\n}", "func (client *Client) NormalRpcHsfApiWithCallback(request *NormalRpcHsfApiRequest, callback func(response *NormalRpcHsfApiResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *NormalRpcHsfApiResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.NormalRpcHsfApi(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func (client *Client) DescribeReservedInstancesWithCallback(request *DescribeReservedInstancesRequest, callback func(response *DescribeReservedInstancesResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *DescribeReservedInstancesResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.DescribeReservedInstances(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func (client *Client) ConvertInvoiceWithCallback(request *ConvertInvoiceRequest, callback func(response *ConvertInvoiceResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *ConvertInvoiceResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.ConvertInvoice(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func CallbackHandler(c *gin.Context) {\n\n\t// Retrieve query params for state and code\n\tstate := c.Query(\"state\")\n\tcode := c.Query(\"code\")\n\t//2次進攻redirectURL\n\tresp, err := http.Post(\"https://app.asana.com/-/oauth_token\",\n\t\t\"application/x-www-form-urlencoded\",\n\t\tstrings.NewReader(\"grant_type=authorization_code&client_id=\"+clientID+\"&client_secret=\"+clientSecret+\"&redirect_uri=\"+redirectURL+\"&state=\"+state+\"&code=\"+code))\n\tif err != nil {\n\t\tutil.Error(err.Error())\n\t}\n\tdefer resp.Body.Close()\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tutil.Error(\"resp error\")\n\t}\n\n\tServerslice1 := UserType{}\n\te := json.Unmarshal([]byte(body), &Serverslice1)\n\tif e != nil {\n\t\tutil.Error(e.Error())\n\t}\n\n\t// Save the username in the session\n\t//session.Set(userkey, Serverslice1.Data.Name)\n\n\t//fmt.Println(body)\n\n\t//rsp回來的資料\n\tutil.Info(string(\" > User \"+Serverslice1.Data.Name) + \" login ! \")\n\tc.Writer.Write([]byte(\"Hi, \" + string(Serverslice1.Data.Name)))\n}", "func (client *Client) DescribeUserVvTopByDayWithCallback(request *DescribeUserVvTopByDayRequest, callback func(response *DescribeUserVvTopByDayResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *DescribeUserVvTopByDayResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.DescribeUserVvTopByDay(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func (l *Libvirt) DomainEventCallbackBlockJob() (err error) {\n\tvar buf []byte\n\n\n\t_, err = l.requestStream(326, constants.Program, buf, nil, nil)\n\tif err != nil {\n\t\treturn\n\t}\n\n\treturn\n}", "func (client *Client) QueryPublicModelEngineWithCallback(request *QueryPublicModelEngineRequest, callback func(response *QueryPublicModelEngineResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *QueryPublicModelEngineResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.QueryPublicModelEngine(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func (client *Client) AssociateAclsWithListenerWithCallback(request *AssociateAclsWithListenerRequest, callback func(response *AssociateAclsWithListenerResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *AssociateAclsWithListenerResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.AssociateAclsWithListener(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func (client *Client) ListCityMapAoisWithCallback(request *ListCityMapAoisRequest, callback func(response *ListCityMapAoisResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *ListCityMapAoisResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.ListCityMapAois(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func (client *Client) ModifyVnRepeatingConfigWithCallback(request *ModifyVnRepeatingConfigRequest, callback func(response *ModifyVnRepeatingConfigResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *ModifyVnRepeatingConfigResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.ModifyVnRepeatingConfig(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func (c *JSONRPCSignalClient) OnNegotiate(cb func(offer *webrtc.SessionDescription)) {\n\tc.onNegotiate = cb\n}", "func (client *Client) ReleaseAnycastEipAddressWithCallback(request *ReleaseAnycastEipAddressRequest, callback func(response *ReleaseAnycastEipAddressResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *ReleaseAnycastEipAddressResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.ReleaseAnycastEipAddress(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func PersonalyCallback(\n\tgetUserByID dependencyGetUserByID,\n\tgetNumberOfPersonalyOffers dependencyGetNumberOfPersonalyOffers,\n\tgetSystemConfig dependencyGetSystemConfig,\n\tcreatePersonalyIncome dependencyCreatePersonalyIncome,\n\tbroadcast dependencyBroadcast,\n) gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\tpayload := personalyPayload{}\n\t\tif err := c.BindWith(&payload, binding.Form); err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tlogrus.WithFields(logrus.Fields{\n\t\t\t\"event\": models.EventPersonalyCallback,\n\t\t\t\"query\": c.Request.URL.Query().Encode(),\n\t\t\t\"user_id\": payload.UserID,\n\t\t\t\"amount\": payload.Amount,\n\t\t\t\"offer_id\": payload.OfferID,\n\t\t}).Debug(\"get superrewards callback\")\n\n\t\tuser, err := getUserByID(payload.UserID)\n\t\tif err != nil {\n\t\t\tc.AbortWithError(http.StatusInternalServerError, err)\n\t\t\treturn\n\t\t}\n\n\t\tcount, err := getNumberOfPersonalyOffers(payload.OfferID, payload.UserID)\n\t\tif err != nil {\n\t\t\tc.AbortWithError(http.StatusInternalServerError, err)\n\t\t\treturn\n\t\t}\n\n\t\tif count > 0 {\n\t\t\tc.String(http.StatusOK, \"1\")\n\t\t\treturn\n\t\t}\n\n\t\t// create income personaly\n\t\tamount := payload.Amount / 1e8\n\t\tincome := models.Income{\n\t\t\tUserID: user.ID,\n\t\t\tRefererID: user.RefererID,\n\t\t\tType: models.IncomeTypePersonaly,\n\t\t\tIncome: amount,\n\t\t\tRefererIncome: amount * getSystemConfig().RefererRewardRate,\n\t\t}\n\t\tif err := createPersonalyIncome(income, payload.OfferID); err != nil {\n\t\t\tc.AbortWithError(http.StatusInternalServerError, err)\n\t\t\treturn\n\t\t}\n\n\t\t// broadcast delta income to all clients\n\t\tdeltaIncome := struct {\n\t\t\tAddress string `json:\"address\"`\n\t\t\tAmount float64 `json:\"amount\"`\n\t\t\tType string `json:\"type\"`\n\t\t\tTime time.Time `json:\"time\"`\n\t\t}{user.Address, amount, \"personaly\", time.Now()}\n\t\tmsg, _ := json.Marshal(models.WebsocketMessage{DeltaIncome: deltaIncome})\n\t\tbroadcast(msg)\n\n\t\tc.String(http.StatusOK, \"1\")\n\t}\n}", "func (l *Libvirt) DomainEventCallbackMigrationIteration() (err error) {\n\tvar buf []byte\n\n\n\t_, err = l.requestStream(359, constants.Program, buf, nil, nil)\n\tif err != nil {\n\t\treturn\n\t}\n\n\treturn\n}", "func (client *Client) DescribeClusterServiceConfigForAdminWithCallback(request *DescribeClusterServiceConfigForAdminRequest, callback func(response *DescribeClusterServiceConfigForAdminResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *DescribeClusterServiceConfigForAdminResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.DescribeClusterServiceConfigForAdmin(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func (client *Client) AddPredictiveNameListWithCallback(request *AddPredictiveNameListRequest, callback func(response *AddPredictiveNameListResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *AddPredictiveNameListResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.AddPredictiveNameList(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func (client *Client) GetOcJusticeTerminalCaseWithCallback(request *GetOcJusticeTerminalCaseRequest, callback func(response *GetOcJusticeTerminalCaseResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *GetOcJusticeTerminalCaseResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.GetOcJusticeTerminalCase(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func (client *Client) QueryVnConversationsWithCallback(request *QueryVnConversationsRequest, callback func(response *QueryVnConversationsResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *QueryVnConversationsResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.QueryVnConversations(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func (client *Client) ModifySkillGroupExWithCallback(request *ModifySkillGroupExRequest, callback func(response *ModifySkillGroupExResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *ModifySkillGroupExResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.ModifySkillGroupEx(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func socialLoginNonInviteBasedAuthCallback(c buffalo.Context, authEmail, authType, clientID string) error {\n\tdomain.NewExtra(c, \"authEmail\", authEmail)\n\tdomain.NewExtra(c, \"authType\", authType)\n\n\tvar user models.User\n\tif err := user.FindByEmailAndSocialAuthProvider(models.Tx(c), authEmail, authType); err != nil {\n\t\treturn logErrorAndRedirect(c, api.ErrorGettingSocialAuthUser,\n\t\t\tfmt.Sprintf(\"error loading social auth user for '%s' ... %v\", authType, err))\n\t}\n\n\tcallbackValues := processSocialAuthCallback(c, authEmail, authType)\n\tif callbackValues.errCode != \"\" {\n\t\treturn logErrorAndRedirect(c, callbackValues.errCode, callbackValues.errMsg)\n\t}\n\n\tauthUser, err := newOrglessAuthUser(c, clientID, user)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.SetUser(c, authUser.ID, authUser.Nickname, authUser.Email)\n\n\treturn c.Redirect(302, getLoginSuccessRedirectURL(authUser, callbackValues.returnTo))\n}", "func runCallback(receivedMessage *Message, consumerMessage *sarama.ConsumerMessage) {\n\tcallback := subscribeMap[consumerMessage.Topic][receivedMessage.MessageType]\n\n\tif callback == nil {\n\t\tlogrus.Error(fmt.Sprintf(\"callback not found for topic : %s, message type : %s\", consumerMessage.Topic,\n\t\t\treceivedMessage.MessageType))\n\t\treturn\n\t}\n\n\tgo callback(&Message{\n\t\tTopic: consumerMessage.Topic,\n\t\tMessage: receivedMessage.Message,\n\t\tMessageType: receivedMessage.MessageType,\n\t\tService: receivedMessage.Service,\n\t\tTraceId: receivedMessage.TraceId,\n\t\tMessageId: receivedMessage.MessageId,\n\t}, nil)\n}", "func (client *Client) GetWsCustomizedChEcomContent(request *GetWsCustomizedChEcomContentRequest) (response *GetWsCustomizedChEcomContentResponse, err error) {\n\tresponse = CreateGetWsCustomizedChEcomContentResponse()\n\terr = client.DoAction(request, response)\n\treturn\n}", "func onEventCallback(e event.Event, ctx interface{}) {\n\tservice := ctx.(*metadataService)\n\tservice.eventChan <- e\n}", "func (gs *GRPCClient) AfterInit() {}", "func (client *Client) CreateFaceConfigWithCallback(request *CreateFaceConfigRequest, callback func(response *CreateFaceConfigResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *CreateFaceConfigResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.CreateFaceConfig(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func (handler *HTTPCallBackHanlder) Callback(c echo.Context) error {\n\n\tctx := c.Request().Context()\n\tif ctx == nil {\n\t\tctx = context.Background()\n\t}\n\tevents, err := handler.Bot.ParseRequest(c.Request())\n\tif err != nil {\n\t\tif err == linebot.ErrInvalidSignature {\n\t\t\tc.String(400, linebot.ErrInvalidSignature.Error())\n\t\t} else {\n\t\t\tc.String(500, \"internal\")\n\t\t}\n\t}\n\n\tfor _, event := range events {\n\t\tif event.Type == linebot.EventTypeMessage {\n\t\t\tswitch message := event.Message.(type) {\n\t\t\tcase *linebot.TextMessage:\n\t\t\t\tmessageFromPing := servicemanagement.PingService(message.Text, handler.ServicesInfo, time.Second*5)\n\t\t\t\tif _, err = handler.Bot.ReplyMessage(event.ReplyToken, linebot.NewTextMessage(messageFromPing)).Do(); err != nil {\n\t\t\t\t\tlog.Print(err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn c.JSON(200, \"\")\n}", "func (client *Client) CreateQualityEntityWithCallback(request *CreateQualityEntityRequest, callback func(response *CreateQualityEntityResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *CreateQualityEntityResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.CreateQualityEntity(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func (api *CoreHandler) AuthorizeCallback(w http.ResponseWriter, r *http.Request) {\n\tlog.Println(\"Reaceived callback from Instagram oauth\")\n\n\t// Get the query string\n\tvals := r.URL.Query()\n\n\t// If \"error\" is not an empty string we have not received our access code\n\t// This is error param is specified by the Reddit API\n\tif val, ok := vals[\"error\"]; ok {\n\t\tif len(val) != 0 {\n\t\t\tlog.Printf(\"Did not receive authorization. Error: %v\\n\", vals[\"error\"][0])\n\t\t\t// This is the case where the user likely denied us access\n\t\t\t// TODO: should redirect back to appropriate page in front-end\n\t\t\treturn\n\t\t}\n\t}\n\n\tvar instaAuth *InstagramAuthResponse\n\tvar err error\n\t// Make sure the code exists\n\tif len(vals[\"code\"]) > 0 {\n\t\t// Now request bearer token using the code we received\n\t\tinstaAuth, err = api.requestToken(vals[\"code\"][0])\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Unable to receive bearer token: %v\\n\", err)\n\t\t\treturn\n\t\t}\n\t}\n\n\tlog.Printf(\"Received the following auth from instagram: %+v\", *instaAuth)\n\n\t// Post code back to core async as the rest is not dependant on this -- vals[\"state\"] should be userID\n\tgo api.postInstaAuth(instaAuth, vals[\"state\"][0])\n\n\t// Redirect to frontend\n\thttp.Redirect(w, r, api.conf.FrontendURL, http.StatusMovedPermanently)\n}", "func (locator *ServiceLocatorImpl) InstallEndCallBack(f func(Worker)) {\n\tlocator.endCallBack = append(locator.endCallBack, f)\n}", "func (c *controller) Callback(ctx context.Context, request *web.Request) web.Result {\n\tif resp := c.service.callback(ctx, request); resp != nil {\n\t\treturn resp\n\t}\n\treturn c.responder.NotFound(errors.New(\"broker for callback not found\"))\n}", "func (client *Client) SearchEventsWithCallback(request *SearchEventsRequest, callback func(response *SearchEventsResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *SearchEventsResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.SearchEvents(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func (client *Client) GetArmsConsoleUrlWithCallback(request *GetArmsConsoleUrlRequest, callback func(response *GetArmsConsoleUrlResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *GetArmsConsoleUrlResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.GetArmsConsoleUrl(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func (client *Client) ModifyDasInstanceConfigWithCallback(request *ModifyDasInstanceConfigRequest, callback func(response *ModifyDasInstanceConfigResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *ModifyDasInstanceConfigResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.ModifyDasInstanceConfig(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func (cs *Callbacks) AddAfterServedCallBack(f func(context *Context)) {\n\tcs.Lock()\n\tcs.afterServed = append(cs.afterServed, f)\n\tcs.Unlock()\n}", "func bungieCallback(c *gin.Context) {\n code := c.Query(\"code\")\n state := c.Query(\"state\")\n\n // Now use the code to receive an access token\n client := &http.Client{}\n data := url.Values{}\n data.Set(\"grant_type\", \"authorization_code\")\n data.Set(\"code\", code)\n req, _ := http.NewRequest(\"POST\", \"https://www.bungie.net/platform/app/oauth/token/\", strings.NewReader(data.Encode()))\n req.Header.Add(\"Authorization\", \"Basic \" + base64.StdEncoding.EncodeToString([]byte(os.Getenv(\"CLIENT_ID\") + \":\" + os.Getenv(\"CLIENT_SECRET\"))))\n req.Header.Add(\"Content-Type\", \"application/x-www-form-urlencoded\")\n resp, _ := client.Do(req)\n\n // Assess GetToken Response Code\n if resp.StatusCode == http.StatusOK {\n var tokenResponse TokenResponse\n // This could potentialy be changed to use unmarshalling to save memory\n err := json.NewDecoder(resp.Body).Decode(&tokenResponse)\n // err := json.Unmarshal(resp.Body, &tokenResponse)\n resp.Body.Close()\n if err != nil {\n fmt.Println(err)\n }\n\n deleteUser(state)\n\n // Collect the available destiny membership id(s) as an array\n req, _ = http.NewRequest(\"GET\", \"https://www.bungie.net/platform/User/GetBungieAccount/\" + tokenResponse.Membership_id + \"/254/\", nil)\n req.Header.Add(\"X-API-Key\", os.Getenv(\"API_KEY\"))\n resp, _ = client.Do(req)\n\n // Assess GetBungieAccount Response Code\n if resp.StatusCode == http.StatusOK {\n destinyMemberships := make([]Membership, 0)\n\n // Determine which Destiny membership IDs are associated with the Bungie account\n var accountResponse interface{}\n err = json.NewDecoder(resp.Body).Decode(&accountResponse)\n resp.Body.Close()\n\n accountMap := accountResponse.(map[string]interface{})\n responseMap := accountMap[\"Response\"].(map[string]interface{})\n destinyMembershipsArray := responseMap[\"destinyMemberships\"].([]interface{})\n\n activeMembership := \"-1\"\n for _, u := range destinyMembershipsArray {\n valuesMap := u.(map[string]interface{})\n\n\n //////\n ////\n //// For now, we assume PC is the active membership\n activeMembershipType := valuesMap[\"membershipType\"].(float64)\n if ( activeMembershipType == 3 ) {\n activeMembership = valuesMap[\"membershipId\"].(string)\n fmt.Println( \"Active Membership: \" + valuesMap[\"displayName\"].(string) )\n }\n //// Replace with getActiveMembership() implementation\n ////\n //////\n\n\n tmpMembership := Membership{activeMembershipType, valuesMap[\"membershipId\"].(string)}\n destinyMemberships = append(destinyMemberships, tmpMembership)\n }\n\n // Empty User Values\n loadouts := make([]Loadout, 0)\n\n // Insert new user entry\n newUser := User{loadouts, destinyMemberships, state, activeMembership, \"-1\", tokenResponse.Access_token, tokenResponse.Refresh_token}\n createUser(newUser)\n } else {\n // Error in GetBungieAccount\n fmt.Println(resp.StatusCode)\n }\n\n } else {\n // Error in GetTokenResponse\n fmt.Println(resp.StatusCode)\n }\n}", "func (client *Client) ModifyOfficeSiteAttributeWithCallback(request *ModifyOfficeSiteAttributeRequest, callback func(response *ModifyOfficeSiteAttributeResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *ModifyOfficeSiteAttributeResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.ModifyOfficeSiteAttribute(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func (l *Libvirt) DomainEventCallbackGraphics() (err error) {\n\tvar buf []byte\n\n\n\t_, err = l.requestStream(323, constants.Program, buf, nil, nil)\n\tif err != nil {\n\t\treturn\n\t}\n\n\treturn\n}", "func DiscoveryEndCB(h DiscoveryEndFunc) RequestOption {\n\treturn func(o *RequestOptions) {\n\t\to.DiscoveryEndCB = h\n\t}\n}", "func (client *Client) CreateECSDBInstanceWithCallback(request *CreateECSDBInstanceRequest, callback func(response *CreateECSDBInstanceResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *CreateECSDBInstanceResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.CreateECSDBInstance(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func (c *Operation) callback(w http.ResponseWriter, r *http.Request) { //nolint: funlen,gocyclo\n\tif len(r.URL.Query()[\"error\"]) != 0 {\n\t\tif r.URL.Query()[\"error\"][0] == \"access_denied\" {\n\t\t\thttp.Redirect(w, r, c.homePage, http.StatusTemporaryRedirect)\n\t\t}\n\t}\n\n\ttk, err := c.tokenIssuer.Exchange(r)\n\tif err != nil {\n\t\tlogger.Errorf(\"failed to exchange code for token: %s\", err.Error())\n\t\tc.writeErrorResponse(w, http.StatusBadRequest,\n\t\t\tfmt.Sprintf(\"failed to exchange code for token: %s\", err.Error()))\n\n\t\treturn\n\t}\n\n\t// user info from token will be used for to retrieve data from cms\n\tinfo, err := c.tokenResolver.Resolve(tk.AccessToken)\n\tif err != nil {\n\t\tlogger.Errorf(\"failed to get token info: %s\", err.Error())\n\t\tc.writeErrorResponse(w, http.StatusBadRequest,\n\t\t\tfmt.Sprintf(\"failed to get token info: %s\", err.Error()))\n\n\t\treturn\n\t}\n\n\tuserID, subject, err := c.getCMSData(tk, \"email=\"+info.Subject, info.Scope)\n\tif err != nil {\n\t\tlogger.Errorf(\"failed to get cms data: %s\", err.Error())\n\t\tc.writeErrorResponse(w, http.StatusBadRequest,\n\t\t\tfmt.Sprintf(\"failed to get cms data: %s\", err.Error()))\n\n\t\treturn\n\t}\n\n\tcallbackURLCookie, err := r.Cookie(callbackURLCookie)\n\tif err != nil && !errors.Is(err, http.ErrNoCookie) {\n\t\tc.writeErrorResponse(w, http.StatusBadRequest,\n\t\t\tfmt.Sprintf(\"failed to get authMode cookie: %s\", err.Error()))\n\n\t\treturn\n\t}\n\n\tif callbackURLCookie != nil && callbackURLCookie.Value != \"\" {\n\t\ttxnID := uuid.NewString()\n\t\tdata := txnData{\n\t\t\tUserID: userID,\n\t\t\tScope: info.Scope,\n\t\t\tToken: tk.AccessToken,\n\t\t}\n\n\t\tdataBytes, mErr := json.Marshal(data)\n\t\tif mErr != nil {\n\t\t\tc.writeErrorResponse(w, http.StatusInternalServerError,\n\t\t\t\tfmt.Sprintf(\"failed to marshal txn data: %s\", mErr.Error()))\n\t\t\treturn\n\t\t}\n\n\t\terr = c.store.Put(txnID, dataBytes)\n\t\tif err != nil {\n\t\t\tc.writeErrorResponse(w, http.StatusInternalServerError,\n\t\t\t\tfmt.Sprintf(\"failed to save txn data: %s\", err.Error()))\n\n\t\t\treturn\n\t\t}\n\n\t\thttp.Redirect(w, r, callbackURLCookie.Value+\"?txnID=\"+txnID, http.StatusTemporaryRedirect)\n\n\t\treturn\n\t}\n\n\tvcsProfileCookie, err := r.Cookie(vcsProfileCookie)\n\tif err != nil {\n\t\tlogger.Errorf(\"failed to get cookie: %s\", err.Error())\n\t\tc.writeErrorResponse(w, http.StatusBadRequest,\n\t\t\tfmt.Sprintf(\"failed to get cookie: %s\", err.Error()))\n\n\t\treturn\n\t}\n\n\tcred, err := c.prepareCredential(subject, info.Scope, vcsProfileCookie.Value)\n\tif err != nil {\n\t\tlogger.Errorf(\"failed to create credential: %s\", err.Error())\n\t\tc.writeErrorResponse(w, http.StatusInternalServerError,\n\t\t\tfmt.Sprintf(\"failed to create credential: %s\", err.Error()))\n\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"text/html; charset=utf-8\")\n\n\tt, err := template.ParseFiles(c.didAuthHTML)\n\tif err != nil {\n\t\tlogger.Errorf(err.Error())\n\t\tc.writeErrorResponse(w, http.StatusInternalServerError,\n\t\t\tfmt.Sprintf(\"unable to load html: %s\", err.Error()))\n\n\t\treturn\n\t}\n\n\tif err := t.Execute(w, map[string]interface{}{\n\t\t\"Path\": generate + \"?\" + \"profile=\" + vcsProfileCookie.Value,\n\t\t\"Cred\": string(cred),\n\t}); err != nil {\n\t\tlogger.Errorf(fmt.Sprintf(\"failed execute qr html template: %s\", err.Error()))\n\t}\n}", "func onEventCallback(e event.Event, ctx interface{}) {\n\tservice := ctx.(*qutoService)\n\tservice.eventChan <- e\n}", "func (mn *MockNetwork) SetConnectCallback(network.ConnectCallback) {\n\n}", "func CallbackHandler(c *gin.Context) {\n\t// Retrieve query params for state and code\n\tstate := c.Query(\"state\")\n\tcode := c.Query(\"code\")\n\tprovider := c.Param(\"provider\")\n\n\t// Handle callback and check for errors\n\tuser, _, err := config.Gocial.Handle(state, code)\n\tif err != nil {\n\t\tc.Writer.Write([]byte(\"Error: \" + err.Error()))\n\t\treturn\n\t}\n\n\tvar newUser = getOrRegisterUser(provider, user)\n\tvar jtwToken = createToken(&newUser)\n\n\tc.JSON(200, gin.H{\n\t\t\"data\": newUser,\n\t\t\"token\": jtwToken,\n\t\t\"message\": \"berhasil login\",\n\t})\n}", "func WithNativeElection(committee committee.Committee) Option {\n\treturn func(svr *coreService) {\n\t\tsvr.electionCommittee = committee\n\t}\n}", "func callback(nlm *C.struct_nl_msg, nla unsafe.Pointer) C.int {\n\tcbID := uintptr(nla)\n\tcallbacksLock.RLock()\n\tcbArg := callbacks[cbID]\n\tcallbacksLock.RUnlock()\n\n\tif cbArg == nil {\n\t\tpanic(fmt.Sprintf(\"No netlink callback with ID %d\", cbID))\n\t}\n\n\tcbMsg := &Message{nlm: nlm}\n\tif err := cbArg.fn(cbMsg, cbArg.arg); err != nil {\n\t\tcbArg.err = err\n\t\treturn C.NL_STOP\n\t}\n\treturn C.NL_OK\n}", "func TestCallbackInvokedWhenSetEarly(t *testing.T) {\n\twg := &sync.WaitGroup{}\n\twg.Add(1)\n\tapp := blockedABCIApplication{\n\t\twg: wg,\n\t}\n\t_, c := setupClientServer(t, app)\n\treqRes := c.CheckTxAsync(types.RequestCheckTx{})\n\n\tdone := make(chan struct{})\n\tcb := func(_ *types.Response) {\n\t\tclose(done)\n\t}\n\treqRes.SetCallback(cb)\n\tapp.wg.Done()\n\n\tcalled := func() bool {\n\t\tselect {\n\t\tcase <-done:\n\t\t\treturn true\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\t}\n\trequire.Eventually(t, called, time.Second, time.Millisecond*25)\n}", "func (client *Client) UpdateEcsImageWithCallback(request *UpdateEcsImageRequest, callback func(response *UpdateEcsImageResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *UpdateEcsImageResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.UpdateEcsImage(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func (client *Client) DescribeExplorerWithCallback(request *DescribeExplorerRequest, callback func(response *DescribeExplorerResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *DescribeExplorerResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.DescribeExplorer(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func (parser *Parser) OnConnectResponse(callback func(resp connect_resp.Response)) {\n\tparser.onConnectResp = callback\n}", "func callback(\n\tservice models.DeviceService,\n\tid string,\n\taction string,\n\tactionType models.ActionType,\n\tlc logger.LoggingClient) error {\n\n\tclient := &http.Client{}\n\turl := service.Addressable.GetCallbackURL()\n\tif len(url) > 0 {\n\t\tbody, err := getBody(id, actionType)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treq, err := http.NewRequest(string(action), url, bytes.NewReader(body))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treq.Header.Add(clients.ContentType, clients.ContentTypeJSON)\n\n\t\tgo makeRequest(client, req, lc)\n\t} else {\n\t\tlc.Info(\"callback::no addressable for \" + service.Name)\n\t}\n\treturn nil\n}", "func Callback(cbReq *CallbackRequest, opts *CallbackOptions) error {\n\tclient := opts.Client\n\tif client == nil {\n\t\tclient = http.DefaultClient\n\t}\n\tbuf := bytes.NewBuffer(nil)\n\terr := json.NewEncoder(buf).Encode(cbReq)\n\tif err != nil {\n\t\treturn err\n\t}\n\tsignature, err := opts.Signer.Sign(buf.Bytes())\n\tif err != nil {\n\t\treturn err\n\t}\n\treq, err := http.NewRequest(\"POST\", cbReq.StatusCallbackUrl, buf)\n\tif err != nil {\n\t\treturn err\n\t}\n\treq.Header.Set(\"X-OpenGDPR-Processor-Domain\", opts.ProcessorDomain)\n\treq.Header.Set(\"X-OpenGDPR-Signature\", signature)\n\t// Attempt to make callback\n\tfor i := 0; i < opts.MaxAttempts; i++ {\n\t\tresp, err := client.Do(req)\n\t\tif err != nil || resp.StatusCode != 200 {\n\t\t\ttime.Sleep(opts.Backoff)\n\t\t\tcontinue\n\t\t}\n\t\t// Success\n\t\treturn nil\n\t}\n\treturn fmt.Errorf(\"callback timed out for %s\", cbReq.StatusCallbackUrl)\n}", "func (client *Client) RecognizeFlowerWithCallback(request *RecognizeFlowerRequest, callback func(response *RecognizeFlowerResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *RecognizeFlowerResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.RecognizeFlower(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func Callback(w http.ResponseWriter, r *http.Request) {\n\tcode := ParseResponse(w, r)\n\taccess := AccessToken(code, w, r)\n\tfmt.Fprintf(w, access.Token)\n\tGetData(access.Token, w, r)\n}", "func (client *Client) DescribeVnKnowledgeWithCallback(request *DescribeVnKnowledgeRequest, callback func(response *DescribeVnKnowledgeResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *DescribeVnKnowledgeResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.DescribeVnKnowledge(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func (client *Client) DescribeFpgaImagesWithCallback(request *DescribeFpgaImagesRequest, callback func(response *DescribeFpgaImagesResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *DescribeFpgaImagesResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.DescribeFpgaImages(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func (w *FabricSDKWrapper) AsyncInvoke(channelID string, userName string, orgName string, chaincodeID string, ccFunctionName string, args []string) (channel.Response, error) {\n\n\t// TODO implement callbackURL and remaining todos for normal invoke\n\n\t// Create channel client\n\tchannelClient, err := w.createChannelClient(channelID, userName, orgName)\n\n\t// Create invoke request\n\trequest := channel.Request{\n\t\tChaincodeID: chaincodeID,\n\t\tFcn: ccFunctionName,\n\t\tArgs: utils.AsBytes(args),\n\t}\n\n\t// Create a request (proposal) and send it\n\tresponse, err := channelClient.Execute(request)\n\tif err != nil {\n\t\treturn response, invokeerror.Errorf(invokeerror.TransientError, \"SendTransactionProposal return error: %v\", err)\n\t}\n\n\treturn response, nil\n}", "func (c *Conn) callNetInfoCallback(ni *tailcfg.NetInfo) {\n\tc.netInfoMu.Lock()\n\tdefer c.netInfoMu.Unlock()\n\tif ni.BasicallyEqual(c.netInfoLast) {\n\t\treturn\n\t}\n\tc.netInfoLast = ni\n\tif c.netInfoFunc != nil {\n\t\tc.logf(\"netInfo update: %+v\", ni)\n\t\tgo c.netInfoFunc(ni)\n\t}\n}", "func (client *Client) TestSeeWithCallback(request *TestSeeRequest, callback func(response *TestSeeResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *TestSeeResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.TestSee(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func (client *Client) DescribeInstanceAmortizedCostByAmortizationPeriodDateWithCallback(request *DescribeInstanceAmortizedCostByAmortizationPeriodDateRequest, callback func(response *DescribeInstanceAmortizedCostByAmortizationPeriodDateResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *DescribeInstanceAmortizedCostByAmortizationPeriodDateResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.DescribeInstanceAmortizedCostByAmortizationPeriodDate(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func (locator *ServiceLocatorImpl) InstallBeginCallBack(f func(Worker)) {\n\tlocator.beginCallBack = append(locator.beginCallBack, f)\n}", "func (client *Client) AddScalingConfigItemV2WithCallback(request *AddScalingConfigItemV2Request, callback func(response *AddScalingConfigItemV2Response, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *AddScalingConfigItemV2Response\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.AddScalingConfigItemV2(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func (client *Client) DescribeEventTopWithCallback(request *DescribeEventTopRequest, callback func(response *DescribeEventTopResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *DescribeEventTopResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.DescribeEventTop(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func (client *Client) ListCasesWithCallback(request *ListCasesRequest, callback func(response *ListCasesResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *ListCasesResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.ListCases(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func (client *Client) GetRegionListWithCallback(request *GetRegionListRequest, callback func(response *GetRegionListResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *GetRegionListResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.GetRegionList(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func executeCallback(pipeline *sdk.Pipeline, operation *sdk.Operation, data []byte) error {\n\tvar err error\n\n\tcburl := operation.CallbackUrl\n\tparams := operation.GetParams()\n\theaders := operation.GetHeaders()\n\n\tmethod := os.Getenv(\"default-method\")\n\tif method == \"\" {\n\t\tmethod = \"POST\"\n\t}\n\n\tif m, ok := headers[\"method\"]; ok {\n\t\tmethod = m\n\t}\n\n\thttpreq, err := buildHttpRequest(cburl, method, data, params, headers)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"cannot connect to Function on URL: %s\", cburl)\n\t}\n\n\tif operation.Requesthandler != nil {\n\t\toperation.Requesthandler(httpreq)\n\t}\n\n\tclient := &http.Client{}\n\tresp, err := client.Do(httpreq)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdefer resp.Body.Close()\n\tif operation.OnResphandler != nil {\n\t\t_, err = operation.OnResphandler(resp)\n\t} else {\n\t\tif resp.StatusCode < 200 || resp.StatusCode > 299 {\n\t\t\tcbresult, _ := ioutil.ReadAll(resp.Body)\n\t\t\terr := fmt.Errorf(\"%v:%s\", err, string(cbresult))\n\t\t\treturn err\n\t\t}\n\t}\n\treturn err\n\n}", "func (cli *OpsGenieAlertV2Client) ExecuteCustomAction(req alertsv2.ExecuteCustomActionRequest) (*AsyncRequestResponse, error) {\n\treturn cli.sendAsyncPostRequest(&req)\n}", "func ProvideClientCustom[F ClientFactory](clientName string, external ...Option[http.Client]) fx.Option {\n\tif len(clientName) == 0 {\n\t\treturn fx.Error(ErrClientNameRequired)\n\t}\n\n\treturn fx.Provide(\n\t\tfx.Annotate(\n\t\t\tNewClientCustom[F],\n\t\t\tarrange.Tags().\n\t\t\t\tOptionalName(clientName+\".config\").\n\t\t\t\tGroup(clientName+\".options\").\n\t\t\t\tParamTags(),\n\t\t\tarrange.Tags().Name(clientName).ResultTags(),\n\t\t),\n\t)\n}", "func callbackHandler(res http.ResponseWriter, req *http.Request) {\n\n\t// Complete the authentication process and fetch all of the\n\t// basic information about the user from the provider.\n\tuser, err := gothic.CompleteUserAuth(res, req)\n\tif err != nil {\n\t\tfmt.Fprintln(res, err)\n\t\treturn\n\t}\n\n\t// Execute the template for this user and respond with\n\t// the user page.\n\ttemplates.ExecuteTemplate(res, \"user\", user)\n}", "func (client *Client) DescribeAntChainConsortiumsWithCallback(request *DescribeAntChainConsortiumsRequest, callback func(response *DescribeAntChainConsortiumsResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *DescribeAntChainConsortiumsResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.DescribeAntChainConsortiums(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func (client *Client) CreateAgentWithCallback(request *CreateAgentRequest, callback func(response *CreateAgentResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *CreateAgentResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.CreateAgent(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func findAsync(findStruct *Find, callback chan *Callback) {\n\trecords, err := find(findStruct)\n\tcb := new(Callback)\n\tcb.Data = records\n\tcb.Error = err\n\tcallback <- cb\n}", "func (client *Client) DescribeCompanyWithCallback(request *DescribeCompanyRequest, callback func(response *DescribeCompanyResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *DescribeCompanyResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.DescribeCompany(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func (s *server) SetNewClientCB(callback func(c *Client)) {\n\ts.onNewClientCallback = callback\n}", "func newEnsClient(endpoint string, addr common.Address, config *api.Config) (*ens.ENS, error) {\n\tlog.Info(\"connecting to ENS API\", \"url\", endpoint)\n\tclient, err := rpc.Dial(endpoint)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error connecting to ENS API %s: %s\", endpoint, err)\n\t}\n\tensClient := wonclient.NewClient(client)\n\n\tensRoot := config.EnsRoot\n\tif addr != (common.Address{}) {\n\t\tensRoot = addr\n\t} else {\n\t\ta, err := detectEnsAddr(client)\n\t\tif err == nil {\n\t\t\tensRoot = a\n\t\t} else {\n\t\t\tlog.Warn(fmt.Sprintf(\"could not determine ENS contract address, using default %s\", ensRoot), \"err\", err)\n\t\t}\n\t}\n\ttransactOpts := bind.NewKeyedTransactor(config.Swap.PrivateKey())\n\tdns, err := ens.NewENS(transactOpts, ensRoot, ensClient)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlog.Debug(fmt.Sprintf(\"-> Swarm Domain Name Registrar %v @ address %v\", endpoint, ensRoot.Hex()))\n\treturn dns, err\n}" ]
[ "0.66507435", "0.6284932", "0.5937611", "0.547515", "0.5420296", "0.52048594", "0.51196504", "0.49444628", "0.49300382", "0.4908999", "0.48851317", "0.48516223", "0.4745386", "0.47324798", "0.4731915", "0.4646688", "0.46445283", "0.46423602", "0.45770156", "0.45028207", "0.45027348", "0.44629917", "0.4460383", "0.44513455", "0.44329992", "0.4412356", "0.43972656", "0.43910927", "0.4390321", "0.43725803", "0.43685055", "0.43493822", "0.43438601", "0.42960703", "0.4293179", "0.4281569", "0.42604288", "0.42434558", "0.42355457", "0.42313054", "0.42202193", "0.42073157", "0.41930386", "0.4167372", "0.41628227", "0.41427806", "0.41384178", "0.4134888", "0.41345856", "0.4133592", "0.4131014", "0.40935948", "0.40893954", "0.40855807", "0.40831205", "0.40789363", "0.40721327", "0.40715393", "0.40687266", "0.40674114", "0.40664154", "0.40586662", "0.4051555", "0.40456617", "0.40393198", "0.40389282", "0.40323603", "0.4030717", "0.40266925", "0.4019137", "0.40160096", "0.4015603", "0.40024444", "0.40001273", "0.399831", "0.39922562", "0.3989593", "0.39868695", "0.39822426", "0.39811745", "0.39730105", "0.39728057", "0.39528248", "0.39527878", "0.3948498", "0.39460945", "0.3941223", "0.3937826", "0.39312568", "0.39292693", "0.39267722", "0.39181617", "0.39181605", "0.39181012", "0.39178973", "0.391626", "0.39155388", "0.39122537", "0.39117613", "0.39108518" ]
0.8188641
0
CreateGetNerCustomizedSeaEcomRequest creates a request to invoke GetNerCustomizedSeaEcom API
func CreateGetNerCustomizedSeaEcomRequest() (request *GetNerCustomizedSeaEcomRequest) { request = &GetNerCustomizedSeaEcomRequest{ RpcRequest: &requests.RpcRequest{}, } request.InitWithApiInfo("alinlp", "2020-06-29", "GetNerCustomizedSeaEcom", "alinlp", "openAPI") request.Method = requests.POST return }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (client *Client) GetNerCustomizedSeaEcom(request *GetNerCustomizedSeaEcomRequest) (response *GetNerCustomizedSeaEcomResponse, err error) {\n\tresponse = CreateGetNerCustomizedSeaEcomResponse()\n\terr = client.DoAction(request, response)\n\treturn\n}", "func CreateGetNerCustomizedSeaEcomResponse() (response *GetNerCustomizedSeaEcomResponse) {\n\tresponse = &GetNerCustomizedSeaEcomResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func CreateGetWsCustomizedChEcomContentRequest() (request *GetWsCustomizedChEcomContentRequest) {\n\trequest = &GetWsCustomizedChEcomContentRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"alinlp\", \"2020-06-29\", \"GetWsCustomizedChEcomContent\", \"alinlp\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (client *Client) GetNerCustomizedSeaEcomWithCallback(request *GetNerCustomizedSeaEcomRequest, callback func(response *GetNerCustomizedSeaEcomResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *GetNerCustomizedSeaEcomResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.GetNerCustomizedSeaEcom(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func (client *Client) GetNerCustomizedSeaEcomWithChan(request *GetNerCustomizedSeaEcomRequest) (<-chan *GetNerCustomizedSeaEcomResponse, <-chan error) {\n\tresponseChan := make(chan *GetNerCustomizedSeaEcomResponse, 1)\n\terrChan := make(chan error, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tdefer close(responseChan)\n\t\tdefer close(errChan)\n\t\tresponse, err := client.GetNerCustomizedSeaEcom(request)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t} else {\n\t\t\tresponseChan <- response\n\t\t}\n\t})\n\tif err != nil {\n\t\terrChan <- err\n\t\tclose(responseChan)\n\t\tclose(errChan)\n\t}\n\treturn responseChan, errChan\n}", "func CreateGetWsCustomizedChO2ORequest() (request *GetWsCustomizedChO2ORequest) {\n\trequest = &GetWsCustomizedChO2ORequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"alinlp\", \"2020-06-29\", \"GetWsCustomizedChO2O\", \"alinlp\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func CreateOemSitingSelctionRequest() (request *OemSitingSelctionRequest) {\n\trequest = &OemSitingSelctionRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"cloudwf\", \"2017-03-28\", \"OemSitingSelction\", \"cloudwf\", \"openAPI\")\n\treturn\n}", "func CreateGetIndustryCommerceInfoRequest() (request *GetIndustryCommerceInfoRequest) {\n\trequest = &GetIndustryCommerceInfoRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"companyreg\", \"2020-10-22\", \"GetIndustryCommerceInfo\", \"companyreg\", \"openAPI\")\n\trequest.Method = requests.GET\n\treturn\n}", "func CreateGetKeywordChEcomRequest() (request *GetKeywordChEcomRequest) {\n\trequest = &GetKeywordChEcomRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"alinlp\", \"2020-06-29\", \"GetKeywordChEcom\", \"alinlp\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func CreateCoreEngineRequest() (request *CoreEngineRequest) {\n\trequest = &CoreEngineRequest{\n\t\tRoaRequest: &requests.RoaRequest{},\n\t}\n\trequest.InitWithApiInfo(\"amp\", \"2020-07-08\", \"CoreEngine\", \"/getVersion/demo\", \"ServiceCode\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func CreateGetEMapRequest() (request *GetEMapRequest) {\n\trequest = &GetEMapRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"cusanalytic_sc_online\", \"2019-05-24\", \"GetEMap\", \"\", \"\")\n\treturn\n}", "func (client *CustomAssessmentAutomationsClient) getCreateRequest(ctx context.Context, resourceGroupName string, customAssessmentAutomationName string, options *CustomAssessmentAutomationsGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Security/customAssessmentAutomations/{customAssessmentAutomationName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif customAssessmentAutomationName == \"\" {\n\t\treturn nil, errors.New(\"parameter customAssessmentAutomationName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{customAssessmentAutomationName}\", url.PathEscape(customAssessmentAutomationName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-07-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *AssociationsClient) getCreateRequest(ctx context.Context, scope string, associationName string, options *AssociationsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/{scope}/providers/Microsoft.CustomProviders/associations/{associationName}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{scope}\", scope)\n\tif associationName == \"\" {\n\t\treturn nil, errors.New(\"parameter associationName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{associationName}\", url.PathEscape(associationName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2018-09-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *NetworkToNetworkInterconnectsClient) getCreateRequest(ctx context.Context, resourceGroupName string, networkFabricName string, networkToNetworkInterconnectName string, options *NetworkToNetworkInterconnectsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedNetworkFabric/networkFabrics/{networkFabricName}/networkToNetworkInterconnects/{networkToNetworkInterconnectName}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif networkFabricName == \"\" {\n\t\treturn nil, errors.New(\"parameter networkFabricName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{networkFabricName}\", url.PathEscape(networkFabricName))\n\tif networkToNetworkInterconnectName == \"\" {\n\t\treturn nil, errors.New(\"parameter networkToNetworkInterconnectName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{networkToNetworkInterconnectName}\", url.PathEscape(networkToNetworkInterconnectName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-06-15\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func CreateDescribeReservedInstancesRequest() (request *DescribeReservedInstancesRequest) {\n\trequest = &DescribeReservedInstancesRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Ecs\", \"2014-05-26\", \"DescribeReservedInstances\", \"ecs\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func NewCreateanewNcosLevelRequestWithBody(server string, contentType string, body io.Reader) (*http.Request, error) {\n\tvar err error\n\n\tqueryUrl, err := url.Parse(server)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbasePath := fmt.Sprintf(\"/ncoslevels\")\n\tif basePath[0] == '/' {\n\t\tbasePath = basePath[1:]\n\t}\n\n\tqueryUrl, err = queryUrl.Parse(basePath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq, err := http.NewRequest(\"POST\", queryUrl.String(), body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq.Header.Add(\"Content-Type\", contentType)\n\treturn req, nil\n}", "func CreateConvertInvoiceRequest() (request *ConvertInvoiceRequest) {\n\trequest = &ConvertInvoiceRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"companyreg\", \"2020-10-22\", \"ConvertInvoice\", \"companyreg\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func NewCreateanewNcosLevelRequest(server string, body CreateanewNcosLevelJSONRequestBody) (*http.Request, error) {\n\tvar bodyReader io.Reader\n\tbuf, err := json.Marshal(body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbodyReader = bytes.NewReader(buf)\n\treturn NewCreateanewNcosLevelRequestWithBody(server, \"application/json\", bodyReader)\n}", "func CreateVerifyCenRequest() (request *VerifyCenRequest) {\n\trequest = &VerifyCenRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"ecd\", \"2020-09-30\", \"VerifyCen\", \"\", \"\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (client *ReplicationvCentersClient) getCreateRequest(ctx context.Context, fabricName string, vcenterName string, options *ReplicationvCentersClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/replicationvCenters/{vcenterName}\"\n\tif client.resourceName == \"\" {\n\t\treturn nil, errors.New(\"parameter client.resourceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceName}\", url.PathEscape(client.resourceName))\n\tif client.resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter client.resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(client.resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif fabricName == \"\" {\n\t\treturn nil, errors.New(\"parameter fabricName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{fabricName}\", url.PathEscape(fabricName))\n\tif vcenterName == \"\" {\n\t\treturn nil, errors.New(\"parameter vcenterName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{vcenterName}\", url.PathEscape(vcenterName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-11-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *ClustersClient) getCreateRequest(ctx context.Context, resourceGroupName string, clusterName string, options *ClustersGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/clusters/{clusterName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif clusterName == \"\" {\n\t\treturn nil, errors.New(\"parameter clusterName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{clusterName}\", url.PathEscape(clusterName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *IntegrationRuntimeNodesClient) getCreateRequest(ctx context.Context, resourceGroupName string, factoryName string, integrationRuntimeName string, nodeName string, options *IntegrationRuntimeNodesClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataFactory/factories/{factoryName}/integrationRuntimes/{integrationRuntimeName}/nodes/{nodeName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif factoryName == \"\" {\n\t\treturn nil, errors.New(\"parameter factoryName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{factoryName}\", url.PathEscape(factoryName))\n\tif integrationRuntimeName == \"\" {\n\t\treturn nil, errors.New(\"parameter integrationRuntimeName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{integrationRuntimeName}\", url.PathEscape(integrationRuntimeName))\n\tif nodeName == \"\" {\n\t\treturn nil, errors.New(\"parameter nodeName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{nodeName}\", url.PathEscape(nodeName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2018-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *ConnectedEnvironmentsClient) getCreateRequest(ctx context.Context, resourceGroupName string, connectedEnvironmentName string, options *ConnectedEnvironmentsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.App/connectedEnvironments/{connectedEnvironmentName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif connectedEnvironmentName == \"\" {\n\t\treturn nil, errors.New(\"parameter connectedEnvironmentName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{connectedEnvironmentName}\", url.PathEscape(connectedEnvironmentName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-06-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *CustomDomainsClient) getCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, appName string, domainName string, options *CustomDomainsGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/domains/{domainName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\tif appName == \"\" {\n\t\treturn nil, errors.New(\"parameter appName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{appName}\", url.PathEscape(appName))\n\tif domainName == \"\" {\n\t\treturn nil, errors.New(\"parameter domainName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{domainName}\", url.PathEscape(domainName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-09-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func CreateSearchInventoryRequest() (request *SearchInventoryRequest) {\n\trequest = &SearchInventoryRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"oos\", \"2019-06-01\", \"SearchInventory\", \"oos\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (client *LROsCustomHeaderClient) post202Retry200CreateRequest(ctx context.Context, options *LROsCustomHeaderClientBeginPost202Retry200Options) (*policy.Request, error) {\n\turlPath := \"/lro/customheader/post/202/retry/200\"\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\tif options != nil && options.Product != nil {\n\t\tif err := runtime.MarshalAsJSON(req, *options.Product); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn req, nil\n\t}\n\treturn req, nil\n}", "func CreateSearchEventsRequest() (request *SearchEventsRequest) {\n\trequest = &SearchEventsRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"ARMS\", \"2019-08-08\", \"SearchEvents\", \"arms\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (client *VendorNetworkFunctionsClient) getCreateRequest(ctx context.Context, locationName string, vendorName string, serviceKey string, options *VendorNetworkFunctionsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.HybridNetwork/locations/{locationName}/vendors/{vendorName}/networkFunctions/{serviceKey}\"\n\tif locationName == \"\" {\n\t\treturn nil, errors.New(\"parameter locationName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{locationName}\", url.PathEscape(locationName))\n\tif vendorName == \"\" {\n\t\treturn nil, errors.New(\"parameter vendorName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{vendorName}\", url.PathEscape(vendorName))\n\tif serviceKey == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceKey cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceKey}\", url.PathEscape(serviceKey))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *VirtualApplianceSKUsClient) getCreateRequest(ctx context.Context, skuName string, options *VirtualApplianceSKUsGetOptions) (*azcore.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Network/networkVirtualApplianceSkus/{skuName}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\turlPath = strings.ReplaceAll(urlPath, \"{skuName}\", url.PathEscape(skuName))\n\treq, err := azcore.NewRequest(ctx, http.MethodGet, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\tquery := req.URL.Query()\n\tquery.Set(\"api-version\", \"2020-07-01\")\n\treq.URL.RawQuery = query.Encode()\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func CreateBeginVnDialogueRequest() (request *BeginVnDialogueRequest) {\n\trequest = &BeginVnDialogueRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"CloudCallCenter\", \"2017-07-05\", \"BeginVnDialogue\", \"\", \"\")\n\trequest.Method = requests.GET\n\treturn\n}", "func (client *ClustersClient) getCreateRequest(ctx context.Context, resourceGroupName string, clusterName string, options *ClustersClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/serverGroupsv2/{clusterName}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif clusterName == \"\" {\n\t\treturn nil, errors.New(\"parameter clusterName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{clusterName}\", url.PathEscape(clusterName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-11-08\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *CloudServicesClient) getCreateRequest(ctx context.Context, resourceGroupName string, cloudServiceName string, options *CloudServicesClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif cloudServiceName == \"\" {\n\t\treturn nil, errors.New(\"parameter cloudServiceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{cloudServiceName}\", url.PathEscape(cloudServiceName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-09-04\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *ManagedClustersClient) getCreateRequest(ctx context.Context, resourceGroupName string, resourceName string, options *ManagedClustersClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif resourceName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceName}\", url.PathEscape(resourceName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-11-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func CreateDescribeClusterServiceConfigForAdminRequest() (request *DescribeClusterServiceConfigForAdminRequest) {\n\trequest = &DescribeClusterServiceConfigForAdminRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Emr\", \"2016-04-08\", \"DescribeClusterServiceConfigForAdmin\", \"emr\", \"openAPI\")\n\treturn\n}", "func CreateGetOcJusticeTerminalCaseRequest() (request *GetOcJusticeTerminalCaseRequest) {\n\trequest = &GetOcJusticeTerminalCaseRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"dt-oc-info\", \"2022-08-29\", \"GetOcJusticeTerminalCase\", \"\", \"\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (client *ConsumerInvitationsClient) getCreateRequest(ctx context.Context, location string, invitationID string, options *ConsumerInvitationsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/providers/Microsoft.DataShare/locations/{location}/consumerInvitations/{invitationId}\"\n\tif location == \"\" {\n\t\treturn nil, errors.New(\"parameter location cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{location}\", url.PathEscape(location))\n\tif invitationID == \"\" {\n\t\treturn nil, errors.New(\"parameter invitationID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{invitationId}\", url.PathEscape(invitationID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2020-09-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func CreateDescribeExplorerRequest() (request *DescribeExplorerRequest) {\n\trequest = &DescribeExplorerRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Baas\", \"2018-07-31\", \"DescribeExplorer\", \"\", \"\")\n\treturn\n}", "func NewCreateanewNcosLnpCarrierRequestWithBody(server string, contentType string, body io.Reader) (*http.Request, error) {\n\tvar err error\n\n\tqueryUrl, err := url.Parse(server)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbasePath := fmt.Sprintf(\"/ncoslnpcarriers\")\n\tif basePath[0] == '/' {\n\t\tbasePath = basePath[1:]\n\t}\n\n\tqueryUrl, err = queryUrl.Parse(basePath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq, err := http.NewRequest(\"POST\", queryUrl.String(), body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq.Header.Add(\"Content-Type\", contentType)\n\treturn req, nil\n}", "func encodeGRPCEchoRequest(_ context.Context, request interface{}) (interface{}, error) {\n\n\t\n\tr:=request.(endpoints.EchoRequest);\n\treturn &pb.EchoRequest{Word: r.Word}, nil\n\t\n\t\n}", "func (client *DefenderSettingsClient) getCreateRequest(ctx context.Context, options *DefenderSettingsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.IoTSecurity/defenderSettings/default\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-02-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *CustomAssessmentAutomationsClient) createCreateRequest(ctx context.Context, resourceGroupName string, customAssessmentAutomationName string, customAssessmentAutomationBody CustomAssessmentAutomationRequest, options *CustomAssessmentAutomationsCreateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Security/customAssessmentAutomations/{customAssessmentAutomationName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif customAssessmentAutomationName == \"\" {\n\t\treturn nil, errors.New(\"parameter customAssessmentAutomationName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{customAssessmentAutomationName}\", url.PathEscape(customAssessmentAutomationName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-07-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, runtime.MarshalAsJSON(req, customAssessmentAutomationBody)\n}", "func (client *GuestAgentsClient) getCreateRequest(ctx context.Context, resourceGroupName string, virtualMachineName string, name string, options *GuestAgentsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ConnectedVMwarevSphere/virtualMachines/{virtualMachineName}/guestAgents/{name}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif virtualMachineName == \"\" {\n\t\treturn nil, errors.New(\"parameter virtualMachineName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{virtualMachineName}\", url.PathEscape(virtualMachineName))\n\tif name == \"\" {\n\t\treturn nil, errors.New(\"parameter name cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{name}\", url.PathEscape(name))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-01-10-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *VirtualMachineImagesEdgeZoneClient) getCreateRequest(ctx context.Context, location string, edgeZone string, publisherName string, offer string, skus string, version string, options *VirtualMachineImagesEdgeZoneClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/edgeZones/{edgeZone}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus/{skus}/versions/{version}\"\n\tif location == \"\" {\n\t\treturn nil, errors.New(\"parameter location cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{location}\", url.PathEscape(location))\n\tif edgeZone == \"\" {\n\t\treturn nil, errors.New(\"parameter edgeZone cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{edgeZone}\", url.PathEscape(edgeZone))\n\tif publisherName == \"\" {\n\t\treturn nil, errors.New(\"parameter publisherName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{publisherName}\", url.PathEscape(publisherName))\n\tif offer == \"\" {\n\t\treturn nil, errors.New(\"parameter offer cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{offer}\", url.PathEscape(offer))\n\tif skus == \"\" {\n\t\treturn nil, errors.New(\"parameter skus cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{skus}\", url.PathEscape(skus))\n\tif version == \"\" {\n\t\treturn nil, errors.New(\"parameter version cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{version}\", url.PathEscape(version))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-11-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (i *ICommunityService) GetUserPartnerEventNews() (*geyser.Request, error) {\n\tsm, err := i.Interface.Methods.Get(schema.MethodKey{\n\t\tName: \"GetUserPartnerEventNews\",\n\t\tVersion: 1,\n\t})\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq := geyser.NewRequest(i.Interface, sm)\n\n\treturn req, nil\n}", "func encodeCreateRequest(_ context.Context, request interface{}) (interface{}, error) {\n\tr := request.(endpoint1.CreateRequest)\n\treturn &pb.CreateRequest{\n\t\tEmail: r.Email,\n\t\tPassword: r.Password,\n\t\tOrgname: r.OrgName}, nil\n}", "func (client *ExpressRoutePortsLocationsClient) getCreateRequest(ctx context.Context, locationName string, options *ExpressRoutePortsLocationsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.Network/ExpressRoutePortsLocations/{locationName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif locationName == \"\" {\n\t\treturn nil, errors.New(\"parameter locationName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{locationName}\", url.PathEscape(locationName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *IncidentsClient) getCreateRequest(ctx context.Context, resourceGroupName string, workspaceName string, incidentID string, options *IncidentsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/providers/Microsoft.SecurityInsights/incidents/{incidentId}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif workspaceName == \"\" {\n\t\treturn nil, errors.New(\"parameter workspaceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{workspaceName}\", url.PathEscape(workspaceName))\n\tif incidentID == \"\" {\n\t\treturn nil, errors.New(\"parameter incidentID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{incidentId}\", url.PathEscape(incidentID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-05-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *CassandraClustersClient) getCreateRequest(ctx context.Context, resourceGroupName string, clusterName string, options *CassandraClustersClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/cassandraClusters/{clusterName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif clusterName == \"\" {\n\t\treturn nil, errors.New(\"parameter clusterName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{clusterName}\", url.PathEscape(clusterName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-03-15-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *IotSecuritySolutionClient) getCreateRequest(ctx context.Context, resourceGroupName string, solutionName string, options *IotSecuritySolutionClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Security/iotSecuritySolutions/{solutionName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif solutionName == \"\" {\n\t\treturn nil, errors.New(\"parameter solutionName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{solutionName}\", url.PathEscape(solutionName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2019-08-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *DataCollectionEndpointsClient) getCreateRequest(ctx context.Context, resourceGroupName string, dataCollectionEndpointName string, options *DataCollectionEndpointsGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/dataCollectionEndpoints/{dataCollectionEndpointName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif dataCollectionEndpointName == \"\" {\n\t\treturn nil, errors.New(\"parameter dataCollectionEndpointName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{dataCollectionEndpointName}\", url.PathEscape(dataCollectionEndpointName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *VendorSKUPreviewClient) getCreateRequest(ctx context.Context, vendorName string, skuName string, previewSubscription string, options *VendorSKUPreviewClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/providers/Microsoft.HybridNetwork/vendors/{vendorName}/vendorSkus/{skuName}/previewSubscriptions/{previewSubscription}\"\n\tif vendorName == \"\" {\n\t\treturn nil, errors.New(\"parameter vendorName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{vendorName}\", url.PathEscape(vendorName))\n\tif skuName == \"\" {\n\t\treturn nil, errors.New(\"parameter skuName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{skuName}\", url.PathEscape(skuName))\n\tif previewSubscription == \"\" {\n\t\treturn nil, errors.New(\"parameter previewSubscription cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{previewSubscription}\", url.PathEscape(previewSubscription))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *ManagedInstancesClient) getCreateRequest(ctx context.Context, resourceGroupName string, managedInstanceName string, options *ManagedInstancesClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif managedInstanceName == \"\" {\n\t\treturn nil, errors.New(\"parameter managedInstanceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{managedInstanceName}\", url.PathEscape(managedInstanceName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\tif options != nil && options.Expand != nil {\n\t\treqQP.Set(\"$expand\", *options.Expand)\n\t}\n\treqQP.Set(\"api-version\", \"2022-08-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (ibmAnalyticsEngineApi *IbmAnalyticsEngineApiV2) CreateCustomizationRequest(createCustomizationRequestOptions *CreateCustomizationRequestOptions) (result *AnalyticsEngineCreateCustomizationResponse, response *core.DetailedResponse, err error) {\n\terr = core.ValidateNotNil(createCustomizationRequestOptions, \"createCustomizationRequestOptions cannot be nil\")\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.ValidateStruct(createCustomizationRequestOptions, \"createCustomizationRequestOptions\")\n\tif err != nil {\n\t\treturn\n\t}\n\n\tpathSegments := []string{\"v2/analytics_engines\", \"customization_requests\"}\n\tpathParameters := []string{*createCustomizationRequestOptions.InstanceGuid}\n\n\tbuilder := core.NewRequestBuilder(core.POST)\n\t_, err = builder.ConstructHTTPURL(ibmAnalyticsEngineApi.Service.Options.URL, pathSegments, pathParameters)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tfor headerName, headerValue := range createCustomizationRequestOptions.Headers {\n\t\tbuilder.AddHeader(headerName, headerValue)\n\t}\n\n\tsdkHeaders := common.GetSdkHeaders(\"ibm_analytics_engine_api\", \"V2\", \"CreateCustomizationRequest\")\n\tfor headerName, headerValue := range sdkHeaders {\n\t\tbuilder.AddHeader(headerName, headerValue)\n\t}\n\tbuilder.AddHeader(\"Accept\", \"application/json\")\n\tbuilder.AddHeader(\"Content-Type\", \"application/json\")\n\n\tbody := make(map[string]interface{})\n\tif createCustomizationRequestOptions.Target != nil {\n\t\tbody[\"target\"] = createCustomizationRequestOptions.Target\n\t}\n\tif createCustomizationRequestOptions.CustomActions != nil {\n\t\tbody[\"custom_actions\"] = createCustomizationRequestOptions.CustomActions\n\t}\n\t_, err = builder.SetBodyContentJSON(body)\n\tif err != nil {\n\t\treturn\n\t}\n\n\trequest, err := builder.Build()\n\tif err != nil {\n\t\treturn\n\t}\n\n\tresponse, err = ibmAnalyticsEngineApi.Service.Request(request, make(map[string]interface{}))\n\tif err == nil {\n\t\tm, ok := response.Result.(map[string]interface{})\n\t\tif !ok {\n\t\t\terr = fmt.Errorf(\"an error occurred while processing the operation response\")\n\t\t\treturn\n\t\t}\n\t\tresult, err = UnmarshalAnalyticsEngineCreateCustomizationResponse(m)\n\t\tresponse.Result = result\n\t}\n\n\treturn\n}", "func CreateCreateBoxCodeRequest() (request *CreateBoxCodeRequest) {\n\trequest = &CreateBoxCodeRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"ITaaS\", \"2017-05-05\", \"CreateBoxCode\", \"itaas\", \"openAPI\")\n\treturn\n}", "func (client *DataCollectionEndpointsClient) getCreateRequest(ctx context.Context, resourceGroupName string, dataCollectionEndpointName string, options *DataCollectionEndpointsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/dataCollectionEndpoints/{dataCollectionEndpointName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif dataCollectionEndpointName == \"\" {\n\t\treturn nil, errors.New(\"parameter dataCollectionEndpointName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{dataCollectionEndpointName}\", url.PathEscape(dataCollectionEndpointName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-04-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *DicomServicesClient) getCreateRequest(ctx context.Context, resourceGroupName string, workspaceName string, dicomServiceName string, options *DicomServicesClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HealthcareApis/workspaces/{workspaceName}/dicomservices/{dicomServiceName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif workspaceName == \"\" {\n\t\treturn nil, errors.New(\"parameter workspaceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{workspaceName}\", url.PathEscape(workspaceName))\n\tif dicomServiceName == \"\" {\n\t\treturn nil, errors.New(\"parameter dicomServiceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{dicomServiceName}\", url.PathEscape(dicomServiceName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func NewEndUserLicenseAgreementsCreateInstanceRequest(server string, body EndUserLicenseAgreementsCreateInstanceJSONRequestBody) (*http.Request, error) {\n\tvar bodyReader io.Reader\n\tbuf, err := json.Marshal(body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbodyReader = bytes.NewReader(buf)\n\treturn NewEndUserLicenseAgreementsCreateInstanceRequestWithBody(server, \"application/json\", bodyReader)\n}", "func decodeGetByCreteriaRequest(_ context.Context, r *http1.Request) (interface{}, error) {\n\n\tvars := mux.Vars(r)\n\tname, ok := vars[\"name\"]\n\tif !ok {\n\t\treturn nil, errors.New(\"not a valid creteria\")\n\t}\n\treq := endpoint.GetByCreteriaRequest{\n\t\tCreteria: name,\n\t}\n\treturn req, nil\n}", "func (client *AFDOriginsClient) getCreateRequest(ctx context.Context, resourceGroupName string, profileName string, originGroupName string, originName string, options *AFDOriginsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/originGroups/{originGroupName}/origins/{originName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif profileName == \"\" {\n\t\treturn nil, errors.New(\"parameter profileName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{profileName}\", url.PathEscape(profileName))\n\tif originGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter originGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{originGroupName}\", url.PathEscape(originGroupName))\n\tif originName == \"\" {\n\t\treturn nil, errors.New(\"parameter originName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{originName}\", url.PathEscape(originName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func CreateModifyClusterServiceConfigForAdminRequest() (request *ModifyClusterServiceConfigForAdminRequest) {\n\trequest = &ModifyClusterServiceConfigForAdminRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Emr\", \"2016-04-08\", \"ModifyClusterServiceConfigForAdmin\", \"emr\", \"openAPI\")\n\treturn\n}", "func (s *OidcService) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := s.NewRequest(op, params, data)\n\n\treturn req\n}", "func CreateGetArmsConsoleUrlRequest() (request *GetArmsConsoleUrlRequest) {\n\trequest = &GetArmsConsoleUrlRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"ARMS\", \"2019-08-08\", \"GetArmsConsoleUrl\", \"arms\", \"openAPI\")\n\trequest.Method = requests.GET\n\treturn\n}", "func (client *VirtualApplianceSitesClient) getCreateRequest(ctx context.Context, resourceGroupName string, networkVirtualApplianceName string, siteName string, options *VirtualApplianceSitesGetOptions) (*azcore.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkVirtualAppliances/{networkVirtualApplianceName}/virtualApplianceSites/{siteName}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\turlPath = strings.ReplaceAll(urlPath, \"{networkVirtualApplianceName}\", url.PathEscape(networkVirtualApplianceName))\n\turlPath = strings.ReplaceAll(urlPath, \"{siteName}\", url.PathEscape(siteName))\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := azcore.NewRequest(ctx, http.MethodGet, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\tquery := req.URL.Query()\n\tquery.Set(\"api-version\", \"2020-07-01\")\n\treq.URL.RawQuery = query.Encode()\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *AvailabilitySetsClient) getCreateRequest(ctx context.Context, resourceGroupName string, availabilitySetName string, options *AvailabilitySetsGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif availabilitySetName == \"\" {\n\t\treturn nil, errors.New(\"parameter availabilitySetName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{availabilitySetName}\", url.PathEscape(availabilitySetName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-07-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func NewCreateanewNcosPatternRequestWithBody(server string, contentType string, body io.Reader) (*http.Request, error) {\n\tvar err error\n\n\tqueryUrl, err := url.Parse(server)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbasePath := fmt.Sprintf(\"/ncospatterns\")\n\tif basePath[0] == '/' {\n\t\tbasePath = basePath[1:]\n\t}\n\n\tqueryUrl, err = queryUrl.Parse(basePath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq, err := http.NewRequest(\"POST\", queryUrl.String(), body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq.Header.Add(\"Content-Type\", contentType)\n\treturn req, nil\n}", "func (client *NetworkToNetworkInterconnectsClient) createCreateRequest(ctx context.Context, resourceGroupName string, networkFabricName string, networkToNetworkInterconnectName string, body NetworkToNetworkInterconnect, options *NetworkToNetworkInterconnectsClientBeginCreateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedNetworkFabric/networkFabrics/{networkFabricName}/networkToNetworkInterconnects/{networkToNetworkInterconnectName}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif networkFabricName == \"\" {\n\t\treturn nil, errors.New(\"parameter networkFabricName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{networkFabricName}\", url.PathEscape(networkFabricName))\n\tif networkToNetworkInterconnectName == \"\" {\n\t\treturn nil, errors.New(\"parameter networkToNetworkInterconnectName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{networkToNetworkInterconnectName}\", url.PathEscape(networkToNetworkInterconnectName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-06-15\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, body)\n}", "func (client *ServersClient) getCreateRequest(ctx context.Context, resourceGroupName string, serverName string, options *ServersClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serverName == \"\" {\n\t\treturn nil, errors.New(\"parameter serverName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serverName}\", url.PathEscape(serverName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\tif options != nil && options.Expand != nil {\n\t\treqQP.Set(\"$expand\", *options.Expand)\n\t}\n\treqQP.Set(\"api-version\", \"2022-08-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *PortalConfigClient) getEntityTagCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, portalConfigID string, options *PortalConfigClientGetEntityTagOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/portalconfigs/{portalConfigId}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\tif portalConfigID == \"\" {\n\t\treturn nil, errors.New(\"parameter portalConfigID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{portalConfigId}\", url.PathEscape(portalConfigID))\n\treq, err := runtime.NewRequest(ctx, http.MethodHead, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *CloudServiceRoleInstancesClient) getCreateRequest(ctx context.Context, roleInstanceName string, resourceGroupName string, cloudServiceName string, options *CloudServiceRoleInstancesGetOptions) (*azcore.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/roleInstances/{roleInstanceName}\"\n\tif roleInstanceName == \"\" {\n\t\treturn nil, errors.New(\"parameter roleInstanceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{roleInstanceName}\", url.PathEscape(roleInstanceName))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif cloudServiceName == \"\" {\n\t\treturn nil, errors.New(\"parameter cloudServiceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{cloudServiceName}\", url.PathEscape(cloudServiceName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := azcore.NewRequest(ctx, http.MethodGet, azcore.JoinPaths(client.con.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Telemetry(telemetryInfo)\n\treqQP := req.URL.Query()\n\treqQP.Set(\"api-version\", \"2021-03-01\")\n\tif options != nil && options.Expand != nil {\n\t\treqQP.Set(\"$expand\", string(*options.Expand))\n\t}\n\treq.URL.RawQuery = reqQP.Encode()\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (client *CompliancesClient) getCreateRequest(ctx context.Context, scope string, complianceName string, options *CompliancesClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/{scope}/providers/Microsoft.Security/compliances/{complianceName}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{scope}\", scope)\n\tif complianceName == \"\" {\n\t\treturn nil, errors.New(\"parameter complianceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{complianceName}\", url.PathEscape(complianceName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2017-08-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func NewCreateanewNcosPatternRequest(server string, body CreateanewNcosPatternJSONRequestBody) (*http.Request, error) {\n\tvar bodyReader io.Reader\n\tbuf, err := json.Marshal(body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbodyReader = bytes.NewReader(buf)\n\treturn NewCreateanewNcosPatternRequestWithBody(server, \"application/json\", bodyReader)\n}", "func (c *InputService14ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func NewCreateanewInvoiceRequest(server string, body CreateanewInvoiceJSONRequestBody) (*http.Request, error) {\n\tvar bodyReader io.Reader\n\tbuf, err := json.Marshal(body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbodyReader = bytes.NewReader(buf)\n\treturn NewCreateanewInvoiceRequestWithBody(server, \"application/json\", bodyReader)\n}", "func CreateFindInstanceNodeListRequest() (request *FindInstanceNodeListRequest) {\n\trequest = &FindInstanceNodeListRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"CSB\", \"2017-11-18\", \"FindInstanceNodeList\", \"\", \"\")\n\trequest.Method = requests.GET\n\treturn\n}", "func (client *ReplicationsClient) getCreateRequest(ctx context.Context, resourceGroupName string, registryName string, replicationName string, options *ReplicationsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications/{replicationName}\"\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif registryName == \"\" {\n\t\treturn nil, errors.New(\"parameter registryName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{registryName}\", url.PathEscape(registryName))\n\tif replicationName == \"\" {\n\t\treturn nil, errors.New(\"parameter replicationName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{replicationName}\", url.PathEscape(replicationName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-01-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (c *InputService15ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (client *PortalConfigClient) getCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, portalConfigID string, options *PortalConfigClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/portalconfigs/{portalConfigId}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\tif portalConfigID == \"\" {\n\t\treturn nil, errors.New(\"parameter portalConfigID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{portalConfigId}\", url.PathEscape(portalConfigID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func NewReplacechangeaspecificResellerRequest(server string, id string, body ReplacechangeaspecificResellerJSONRequestBody) (*http.Request, error) {\n\tvar bodyReader io.Reader\n\tbuf, err := json.Marshal(body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbodyReader = bytes.NewReader(buf)\n\treturn NewReplacechangeaspecificResellerRequestWithBody(server, id, \"application/json\", bodyReader)\n}", "func CreateQueryVnConversationsRequest() (request *QueryVnConversationsRequest) {\n\trequest = &QueryVnConversationsRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"CloudCallCenter\", \"2017-07-05\", \"QueryVnConversations\", \"\", \"\")\n\trequest.Method = requests.GET\n\treturn\n}", "func (client *CapacityReservationsClient) getCreateRequest(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, capacityReservationName string, options *CapacityReservationsGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/capacityReservationGroups/{capacityReservationGroupName}/capacityReservations/{capacityReservationName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif capacityReservationGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter capacityReservationGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{capacityReservationGroupName}\", url.PathEscape(capacityReservationGroupName))\n\tif capacityReservationName == \"\" {\n\t\treturn nil, errors.New(\"parameter capacityReservationName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{capacityReservationName}\", url.PathEscape(capacityReservationName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\tif options != nil && options.Expand != nil {\n\t\treqQP.Set(\"$expand\", string(*options.Expand))\n\t}\n\treqQP.Set(\"api-version\", \"2021-07-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func NewGetaspecificResellerRequest(server string, id string) (*http.Request, error) {\n\tvar err error\n\n\tvar pathParam0 string\n\n\tpathParam0, err = runtime.StyleParam(\"simple\", false, \"id\", id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tqueryUrl, err := url.Parse(server)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbasePath := fmt.Sprintf(\"/resellers/%s\", pathParam0)\n\tif basePath[0] == '/' {\n\t\tbasePath = basePath[1:]\n\t}\n\n\tqueryUrl, err = queryUrl.Parse(basePath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq, err := http.NewRequest(\"GET\", queryUrl.String(), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn req, nil\n}", "func (client *ApplicationTypeVersionsClient) getCreateRequest(ctx context.Context, resourceGroupName string, clusterName string, applicationTypeName string, version string, options *ApplicationTypeVersionsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceFabric/clusters/{clusterName}/applicationTypes/{applicationTypeName}/versions/{version}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif clusterName == \"\" {\n\t\treturn nil, errors.New(\"parameter clusterName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{clusterName}\", url.PathEscape(clusterName))\n\tif applicationTypeName == \"\" {\n\t\treturn nil, errors.New(\"parameter applicationTypeName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{applicationTypeName}\", url.PathEscape(applicationTypeName))\n\tif version == \"\" {\n\t\treturn nil, errors.New(\"parameter version cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{version}\", url.PathEscape(version))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2021-06-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *MachineExtensionsClient) getCreateRequest(ctx context.Context, resourceGroupName string, name string, extensionName string, options *MachineExtensionsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ConnectedVMwarevSphere/virtualMachines/{name}/extensions/{extensionName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif name == \"\" {\n\t\treturn nil, errors.New(\"parameter name cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{name}\", url.PathEscape(name))\n\tif extensionName == \"\" {\n\t\treturn nil, errors.New(\"parameter extensionName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{extensionName}\", url.PathEscape(extensionName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-01-10-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func CreateGetOpenNLURequest() (request *GetOpenNLURequest) {\n\trequest = &GetOpenNLURequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"alinlp\", \"2020-06-29\", \"GetOpenNLU\", \"alinlp\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (c *OutputService15ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func CreateDescribeClusterServiceConfigTagRequest() (request *DescribeClusterServiceConfigTagRequest) {\n\trequest = &DescribeClusterServiceConfigTagRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Emr\", \"2016-04-08\", \"DescribeClusterServiceConfigTag\", \"emr\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func CreateModifyVnRepeatingConfigRequest() (request *ModifyVnRepeatingConfigRequest) {\n\trequest = &ModifyVnRepeatingConfigRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"CloudCallCenter\", \"2017-07-05\", \"ModifyVnRepeatingConfig\", \"\", \"\")\n\trequest.Method = requests.GET\n\treturn\n}", "func (client *AgentsClient) getCreateRequest(ctx context.Context, resourceGroupName string, storageMoverName string, agentName string, options *AgentsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.StorageMover/storageMovers/{storageMoverName}/agents/{agentName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif storageMoverName == \"\" {\n\t\treturn nil, errors.New(\"parameter storageMoverName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{storageMoverName}\", url.PathEscape(storageMoverName))\n\tif agentName == \"\" {\n\t\treturn nil, errors.New(\"parameter agentName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{agentName}\", url.PathEscape(agentName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-07-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func CreateQueryPhoneNoAByTrackNoRequest() (request *QueryPhoneNoAByTrackNoRequest) {\n\trequest = &QueryPhoneNoAByTrackNoRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Dyplsapi\", \"2017-05-25\", \"QueryPhoneNoAByTrackNo\", \"\", \"\")\n\trequest.Method = requests.POST\n\treturn\n}", "func CreateQueryCustomerAddressListRequest() (request *QueryCustomerAddressListRequest) {\nrequest = &QueryCustomerAddressListRequest{\nRpcRequest: &requests.RpcRequest{},\n}\nrequest.InitWithApiInfo(\"BssOpenApi\", \"2017-12-14\", \"QueryCustomerAddressList\", \"\", \"\")\nreturn\n}", "func (c *OutputService14ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func NewGetaspecificNcosLevelRequest(server string, id string) (*http.Request, error) {\n\tvar err error\n\n\tvar pathParam0 string\n\n\tpathParam0, err = runtime.StyleParam(\"simple\", false, \"id\", id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tqueryUrl, err := url.Parse(server)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbasePath := fmt.Sprintf(\"/ncoslevels/%s\", pathParam0)\n\tif basePath[0] == '/' {\n\t\tbasePath = basePath[1:]\n\t}\n\n\tqueryUrl, err = queryUrl.Parse(basePath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq, err := http.NewRequest(\"GET\", queryUrl.String(), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn req, nil\n}", "func (client *PeeringPoliciesClient) getCreateRequest(ctx context.Context, resourceGroupName string, managedNetworkName string, managedNetworkPeeringPolicyName string, options *PeeringPoliciesClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedNetwork/managedNetworks/{managedNetworkName}/managedNetworkPeeringPolicies/{managedNetworkPeeringPolicyName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif managedNetworkName == \"\" {\n\t\treturn nil, errors.New(\"parameter managedNetworkName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{managedNetworkName}\", url.PathEscape(managedNetworkName))\n\tif managedNetworkPeeringPolicyName == \"\" {\n\t\treturn nil, errors.New(\"parameter managedNetworkPeeringPolicyName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{managedNetworkPeeringPolicyName}\", url.PathEscape(managedNetworkPeeringPolicyName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2019-06-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *MonitoringSettingsClient) getCreateRequest(ctx context.Context, resourceGroupName string, serviceName string, options *MonitoringSettingsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/monitoringSettings/default\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif serviceName == \"\" {\n\t\treturn nil, errors.New(\"parameter serviceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{serviceName}\", url.PathEscape(serviceName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-01-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *InteractionsClient) getCreateRequest(ctx context.Context, resourceGroupName string, hubName string, interactionName string, options *InteractionsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CustomerInsights/hubs/{hubName}/interactions/{interactionName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif hubName == \"\" {\n\t\treturn nil, errors.New(\"parameter hubName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{hubName}\", url.PathEscape(hubName))\n\tif interactionName == \"\" {\n\t\treturn nil, errors.New(\"parameter interactionName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{interactionName}\", url.PathEscape(interactionName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\tif options != nil && options.LocaleCode != nil {\n\t\treqQP.Set(\"locale-code\", *options.LocaleCode)\n\t}\n\treqQP.Set(\"api-version\", \"2017-04-26\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func (client *RPCClient) createRequest(call *Call, seq uint64) (*protocol.Message, error) {\n\tcall.seq = seq\n\n\treq := protocol.GetPooledMsg()\n\treq.ServicePath = call.ServicePath\n\treq.ServiceMethod = call.ServiceMethod\n\n\tif call.heartBeat {\n\t\treq.SetHeartbeat(true)\n\t} else {\n\t\treq.SetHeartbeat(false)\n\t}\n\n\tif call.compressType != protocol.None {\n\t\treq.SetCompressType(protocol.Gzip)\n\t}\n\n\treq.SetSeq(seq)\n\n\tmd := extractMdFromClientCtx(call.ctx)\n\tif len(md) > 0 {\n\t\treq.Metadata = md\n\t}\n\n\tif call.serializeType != protocol.SerializeNone {\n\t\treq.SetSerializeType(call.serializeType)\n\t\tcodec := share.Codecs[req.SerializeType()]\n\t\tif codec == nil {\n\t\t\terr := fmt.Errorf(\"can not find codec for %d\", req.SerializeType())\n\t\t\treturn nil, err\n\t\t}\n\t\tdata, err := codec.Encode(call.Args)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"encode \")\n\t\t}\n\t\treq.Payload = data\n\t}\n\n\treturn req, nil\n}", "func (client *ManagementAssociationsClient) getCreateRequest(ctx context.Context, resourceGroupName string, managementAssociationName string, options *ManagementAssociationsGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{providerName}/{resourceType}/{resourceName}/providers/Microsoft.OperationsManagement/ManagementAssociations/{managementAssociationName}\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif client.providerName == \"\" {\n\t\treturn nil, errors.New(\"parameter client.providerName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{providerName}\", url.PathEscape(client.providerName))\n\tif client.resourceType == \"\" {\n\t\treturn nil, errors.New(\"parameter client.resourceType cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceType}\", url.PathEscape(client.resourceType))\n\tif client.resourceName == \"\" {\n\t\treturn nil, errors.New(\"parameter client.resourceName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceName}\", url.PathEscape(client.resourceName))\n\tif managementAssociationName == \"\" {\n\t\treturn nil, errors.New(\"parameter managementAssociationName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{managementAssociationName}\", url.PathEscape(managementAssociationName))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.ep, urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2015-11-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header.Set(\"Accept\", \"application/json\")\n\treturn req, nil\n}", "func (c *InputService11ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func NewCreateanewSystemContactRequest(server string, body CreateanewSystemContactJSONRequestBody) (*http.Request, error) {\n\tvar bodyReader io.Reader\n\tbuf, err := json.Marshal(body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbodyReader = bytes.NewReader(buf)\n\treturn NewCreateanewSystemContactRequestWithBody(server, \"application/json\", bodyReader)\n}", "func (client *AccountsClient) getCreateRequest(ctx context.Context, resourceGroupName string, accountName string, options *AccountsClientGetOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CognitiveServices/accounts/{accountName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif accountName == \"\" {\n\t\treturn nil, errors.New(\"parameter accountName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{accountName}\", url.PathEscape(accountName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-05-01\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}" ]
[ "0.7064167", "0.68019885", "0.67888623", "0.6246826", "0.619077", "0.6190453", "0.5972899", "0.58737063", "0.5718027", "0.5634352", "0.5581408", "0.55074394", "0.54870504", "0.54577786", "0.5399441", "0.5375696", "0.53739506", "0.533545", "0.5314561", "0.5306372", "0.5286929", "0.5286219", "0.526692", "0.52543783", "0.5248801", "0.5199698", "0.51933193", "0.51914126", "0.51842827", "0.517721", "0.51766783", "0.51727766", "0.5166414", "0.5152922", "0.5137709", "0.5123715", "0.5120187", "0.51132226", "0.5103704", "0.510258", "0.50812435", "0.50786453", "0.5075925", "0.5075533", "0.5058401", "0.50580233", "0.5047425", "0.50450104", "0.504231", "0.503778", "0.5035541", "0.50253356", "0.50246245", "0.5023822", "0.5015015", "0.5007725", "0.50070506", "0.50023437", "0.5001533", "0.49847224", "0.4982388", "0.49816146", "0.49803975", "0.49771592", "0.49723113", "0.49721965", "0.4969209", "0.49662143", "0.495835", "0.49579024", "0.49572873", "0.4956918", "0.49502975", "0.49394423", "0.49379876", "0.49348333", "0.49343988", "0.4933832", "0.49288177", "0.49234757", "0.4918827", "0.49124533", "0.4907945", "0.4906079", "0.49039948", "0.4903073", "0.49002713", "0.48976013", "0.48944175", "0.4892715", "0.48910868", "0.48849583", "0.4881895", "0.4874715", "0.48692453", "0.4867737", "0.48675898", "0.48663136", "0.48620868", "0.48571068" ]
0.89601254
0
CreateGetNerCustomizedSeaEcomResponse creates a response to parse from GetNerCustomizedSeaEcom response
func CreateGetNerCustomizedSeaEcomResponse() (response *GetNerCustomizedSeaEcomResponse) { response = &GetNerCustomizedSeaEcomResponse{ BaseResponse: &responses.BaseResponse{}, } return }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func CreateGetNerCustomizedSeaEcomRequest() (request *GetNerCustomizedSeaEcomRequest) {\n\trequest = &GetNerCustomizedSeaEcomRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"alinlp\", \"2020-06-29\", \"GetNerCustomizedSeaEcom\", \"alinlp\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (client *Client) GetNerCustomizedSeaEcom(request *GetNerCustomizedSeaEcomRequest) (response *GetNerCustomizedSeaEcomResponse, err error) {\n\tresponse = CreateGetNerCustomizedSeaEcomResponse()\n\terr = client.DoAction(request, response)\n\treturn\n}", "func CreateGetWsCustomizedChEcomContentResponse() (response *GetWsCustomizedChEcomContentResponse) {\n\tresponse = &GetWsCustomizedChEcomContentResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func CreateGetWsCustomizedChO2OResponse() (response *GetWsCustomizedChO2OResponse) {\n\tresponse = &GetWsCustomizedChO2OResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func (client *Client) GetNerCustomizedSeaEcomWithCallback(request *GetNerCustomizedSeaEcomRequest, callback func(response *GetNerCustomizedSeaEcomResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *GetNerCustomizedSeaEcomResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.GetNerCustomizedSeaEcom(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func (client *Client) GetNerCustomizedSeaEcomWithChan(request *GetNerCustomizedSeaEcomRequest) (<-chan *GetNerCustomizedSeaEcomResponse, <-chan error) {\n\tresponseChan := make(chan *GetNerCustomizedSeaEcomResponse, 1)\n\terrChan := make(chan error, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tdefer close(responseChan)\n\t\tdefer close(errChan)\n\t\tresponse, err := client.GetNerCustomizedSeaEcom(request)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t} else {\n\t\t\tresponseChan <- response\n\t\t}\n\t})\n\tif err != nil {\n\t\terrChan <- err\n\t\tclose(responseChan)\n\t\tclose(errChan)\n\t}\n\treturn responseChan, errChan\n}", "func CreateOemSitingSelctionResponse() (response *OemSitingSelctionResponse) {\n\tresponse = &OemSitingSelctionResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func newCustomsResponse(answers string) (customsResponse, error) {\n\tresult := customsResponse{}\n\tfor _, cc := range answers {\n\t\tif cc < 'a' || cc > 'z' {\n\t\t\terr := errors.Errorf(\"could not parse `%s`: invalid character `%c`\", answers, cc)\n\t\t\treturn customsResponse{}, err\n\t\t}\n\t\tresult[cc-'a'] = true\n\t}\n\treturn result, nil\n}", "func CreateGetIndustryCommerceInfoResponse() (response *GetIndustryCommerceInfoResponse) {\n\tresponse = &GetIndustryCommerceInfoResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func ParseCreateanewNcosLevelResponse(rsp *http.Response) (*CreateanewNcosLevelResponse, error) {\n\tbodyBytes, err := ioutil.ReadAll(rsp.Body)\n\tdefer rsp.Body.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresponse := &CreateanewNcosLevelResponse{\n\t\tBody: bodyBytes,\n\t\tHTTPResponse: rsp,\n\t}\n\n\tswitch {\n\tcase strings.Contains(rsp.Header.Get(\"Content-Type\"), \"json\") && rsp.StatusCode == 201:\n\t\tvar dest []Thenewlycreateditemorempty32\n\t\tif err := json.Unmarshal(bodyBytes, &dest); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tresponse.JSON201 = &dest\n\n\tcase strings.Contains(rsp.Header.Get(\"Content-Type\"), \"json\") && rsp.StatusCode == 422:\n\t\tvar dest Anerror\n\t\tif err := json.Unmarshal(bodyBytes, &dest); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tresponse.JSON422 = &dest\n\n\t}\n\n\treturn response, nil\n}", "func CreateCoreEngineResponse() (response *CoreEngineResponse) {\n\tresponse = &CoreEngineResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func CreateGetEMapResponse() (response *GetEMapResponse) {\n\tresponse = &GetEMapResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func CreateCreateCustomCallTaggingResponse() (response *CreateCustomCallTaggingResponse) {\n\tresponse = &CreateCustomCallTaggingResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func CreateDescribeReservedInstancesResponse() (response *DescribeReservedInstancesResponse) {\n\tresponse = &DescribeReservedInstancesResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func CreateModifyVnRepeatingConfigResponse() (response *ModifyVnRepeatingConfigResponse) {\n\tresponse = &ModifyVnRepeatingConfigResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func CreateVerifyCenResponse() (response *VerifyCenResponse) {\n\tresponse = &VerifyCenResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func CreateBeginVnDialogueResponse() (response *BeginVnDialogueResponse) {\n\tresponse = &BeginVnDialogueResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func CreateModifySkillGroupExResponse() (response *ModifySkillGroupExResponse) {\n\tresponse = &ModifySkillGroupExResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func CreateDescribeVnKnowledgeResponse() (response *DescribeVnKnowledgeResponse) {\n\tresponse = &DescribeVnKnowledgeResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func CreateConvertInvoiceResponse() (response *ConvertInvoiceResponse) {\n\tresponse = &ConvertInvoiceResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func CreateGetWsCustomizedChEcomContentRequest() (request *GetWsCustomizedChEcomContentRequest) {\n\trequest = &GetWsCustomizedChEcomContentRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"alinlp\", \"2020-06-29\", \"GetWsCustomizedChEcomContent\", \"alinlp\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func CreateGetOcJusticeTerminalCaseResponse() (response *GetOcJusticeTerminalCaseResponse) {\n\tresponse = &GetOcJusticeTerminalCaseResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func CreateAddBsnFabricBizChainResponse() (response *AddBsnFabricBizChainResponse) {\n\tresponse = &AddBsnFabricBizChainResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func (ccr ContainersCreateResponse) Response() *http.Response {\n\treturn ccr.rawResponse\n}", "func CreateCreateQualityEntityResponse() (response *CreateQualityEntityResponse) {\n\tresponse = &CreateQualityEntityResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func CreateResponse(w *gin.Context, payload interface{}) {\n\tw.JSON(200, payload)\n}", "func CreateGetKeywordChEcomResponse() (response *GetKeywordChEcomResponse) {\n\tresponse = &GetKeywordChEcomResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func CreateCreateBoxCodeResponse() (response *CreateBoxCodeResponse) {\n\tresponse = &CreateBoxCodeResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func CreateFindInstanceNodeListResponse() (response *FindInstanceNodeListResponse) {\n\tresponse = &FindInstanceNodeListResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func newResponse(data map[string]string) (*AMIResponse, error) {\n\tr, found := data[\"Response\"]\n\tif !found {\n\t\treturn nil, errors.New(\"Not Response\")\n\t}\n\tresponse := &AMIResponse{ID: data[\"ActionID\"], Status: r, Params: make(map[string]string)}\n\tfor k, v := range data {\n\t\tif k == \"Response\" {\n\t\t\tcontinue\n\t\t}\n\t\tresponse.Params[k] = v\n\t}\n\treturn response, nil\n}", "func CustomResponse(r Response, err error) Response {\n\tret := r\n\tret.Extra = err.Error()\n\treturn ret\n}", "func encodeGetByCreteriaResponse(ctx context.Context, w http1.ResponseWriter, response interface{}) (err error) {\n\tif f, ok := response.(endpoint.Failure); ok && f.Failed() != nil {\n\t\tErrorEncoder(ctx, f.Failed(), w)\n\t\treturn nil\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\terr = json.NewEncoder(w).Encode(response)\n\treturn\n}", "func CreateGetOpenNLUResponse() (response *GetOpenNLUResponse) {\n\tresponse = &GetOpenNLUResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func CreateModifyDasInstanceConfigResponse() (response *ModifyDasInstanceConfigResponse) {\n\tresponse = &ModifyDasInstanceConfigResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func CreateRetryChangeAccountEmailResponse() (response *RetryChangeAccountEmailResponse) {\n\tresponse = &RetryChangeAccountEmailResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func CreateSearchInventoryResponse() (response *SearchInventoryResponse) {\n\tresponse = &SearchInventoryResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func ParseCreateanewNcosPatternResponse(rsp *http.Response) (*CreateanewNcosPatternResponse, error) {\n\tbodyBytes, err := ioutil.ReadAll(rsp.Body)\n\tdefer rsp.Body.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresponse := &CreateanewNcosPatternResponse{\n\t\tBody: bodyBytes,\n\t\tHTTPResponse: rsp,\n\t}\n\n\tswitch {\n\tcase strings.Contains(rsp.Header.Get(\"Content-Type\"), \"json\") && rsp.StatusCode == 201:\n\t\tvar dest string\n\t\tif err := json.Unmarshal(bodyBytes, &dest); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tresponse.JSON201 = &dest\n\n\tcase strings.Contains(rsp.Header.Get(\"Content-Type\"), \"json\") && rsp.StatusCode == 422:\n\t\tvar dest Anerror\n\t\tif err := json.Unmarshal(bodyBytes, &dest); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tresponse.JSON422 = &dest\n\n\t}\n\n\treturn response, nil\n}", "func CreateDescribeDialogueNodeStatisticsResponse() (response *DescribeDialogueNodeStatisticsResponse) {\n\tresponse = &DescribeDialogueNodeStatisticsResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func CreateCreateExchangeResponse() (response *CreateExchangeResponse) {\n\tresponse = &CreateExchangeResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func ParseCreateanewNcosLnpCarrierResponse(rsp *http.Response) (*CreateanewNcosLnpCarrierResponse, error) {\n\tbodyBytes, err := ioutil.ReadAll(rsp.Body)\n\tdefer rsp.Body.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresponse := &CreateanewNcosLnpCarrierResponse{\n\t\tBody: bodyBytes,\n\t\tHTTPResponse: rsp,\n\t}\n\n\tswitch {\n\tcase strings.Contains(rsp.Header.Get(\"Content-Type\"), \"json\") && rsp.StatusCode == 201:\n\t\tvar dest []Thenewlycreateditemorempty47\n\t\tif err := json.Unmarshal(bodyBytes, &dest); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tresponse.JSON201 = &dest\n\n\tcase strings.Contains(rsp.Header.Get(\"Content-Type\"), \"json\") && rsp.StatusCode == 422:\n\t\tvar dest Anerror\n\t\tif err := json.Unmarshal(bodyBytes, &dest); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tresponse.JSON422 = &dest\n\n\t}\n\n\treturn response, nil\n}", "func CreateGetArmsConsoleUrlResponse() (response *GetArmsConsoleUrlResponse) {\n\tresponse = &GetArmsConsoleUrlResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func CreateGetManagedRuleResponse() (response *GetManagedRuleResponse) {\n\tresponse = &GetManagedRuleResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func CreateGetOfficePreviewURLResponse() (response *GetOfficePreviewURLResponse) {\n\tresponse = &GetOfficePreviewURLResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func CreateModifyOfficeSiteAttributeResponse() (response *ModifyOfficeSiteAttributeResponse) {\n\tresponse = &ModifyOfficeSiteAttributeResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func CreateQueryVnConversationsResponse() (response *QueryVnConversationsResponse) {\n\tresponse = &QueryVnConversationsResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func CreateDescribeExplorerResponse() (response *DescribeExplorerResponse) {\n\tresponse = &DescribeExplorerResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func (c *Client) DecodeEasypostCustomsinfo(resp *http.Response) (*EasypostCustomsinfo, error) {\n\tvar decoded EasypostCustomsinfo\n\terr := c.Decoder.Decode(&decoded, resp.Body, resp.Header.Get(\"Content-Type\"))\n\treturn &decoded, err\n}", "func CreateReleaseEipSegmentAddressResponse() (response *ReleaseEipSegmentAddressResponse) {\n\tresponse = &ReleaseEipSegmentAddressResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func CreateSearchEventsResponse() (response *SearchEventsResponse) {\n\tresponse = &SearchEventsResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func createResponse(req *http.Request) *http.Response {\n\treturn &http.Response{\n\t\tStatusCode: http.StatusOK,\n\t\tRequest: req,\n\t\tHeader: make(http.Header),\n\t\tBody: ioutil.NopCloser(bytes.NewBuffer([]byte{})),\n\t}\n}", "func (c *ClientWithResponses) CreateanewNcosLevelWithBodyWithResponse(ctx context.Context, contentType string, body io.Reader) (*CreateanewNcosLevelResponse, error) {\n\trsp, err := c.CreateanewNcosLevelWithBody(ctx, contentType, body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn ParseCreateanewNcosLevelResponse(rsp)\n}", "func CreateDescribeParentPlatformResponse() (response *DescribeParentPlatformResponse) {\n\tresponse = &DescribeParentPlatformResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func CreateListMcubeNebulaResourcesResponse() (response *ListMcubeNebulaResourcesResponse) {\n\tresponse = &ListMcubeNebulaResourcesResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func CreateReleaseAnycastEipAddressResponse() (response *ReleaseAnycastEipAddressResponse) {\n\tresponse = &ReleaseAnycastEipAddressResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func CreateGetClusterMetricsResponse() (response *GetClusterMetricsResponse) {\n\tresponse = &GetClusterMetricsResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func (client *CustomAssessmentAutomationsClient) createHandleResponse(resp *http.Response) (CustomAssessmentAutomationsCreateResponse, error) {\n\tresult := CustomAssessmentAutomationsCreateResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.CustomAssessmentAutomation); err != nil {\n\t\treturn CustomAssessmentAutomationsCreateResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func CreateNormalRpcHsfApiResponse() (response *NormalRpcHsfApiResponse) {\n\tresponse = &NormalRpcHsfApiResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func CreateDescribeOssObjectDetailResponse() (response *DescribeOssObjectDetailResponse) {\n\tresponse = &DescribeOssObjectDetailResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func CreateCreateClusterResponse() (response *CreateClusterResponse) {\n\tresponse = &CreateClusterResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func (client ModelClient) CreateCustomPrebuiltEntityRoleResponder(resp *http.Response) (result UUID, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),\n\t\tautorest.ByUnmarshallingJSON(&result.Value),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (j *JsonRpc) CreateDefaultExceptionResponse(reqId interface{}, code int, message string) (JsonRpcExceptionResponse) {\n\tvar resp JsonRpcExceptionResponse\n\tresp.Id = reqId\n\tresp.JsonRpc = JsonRpcVersion\n\tresp.Error.Code = code\n\tresp.Error.Message = message\n\treturn resp\n\n}", "func CreateQueryPhoneNoAByTrackNoResponse() (response *QueryPhoneNoAByTrackNoResponse) {\n\tresponse = &QueryPhoneNoAByTrackNoResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func CreateTestSeeResponse() (response *TestSeeResponse) {\n\tresponse = &TestSeeResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func newIdemixEnrollmentResponseNet(resp *idemix.EnrollmentResponse) api.IdemixEnrollmentResponseNet {\n\treturn api.IdemixEnrollmentResponseNet{\n\t\tNonce: resp.Nonce,\n\t\tAttrs: resp.Attrs,\n\t\tCredential: resp.Credential,\n\t\tCRI: resp.CRI,\n\t\tCAInfo: api.CAInfoResponseNet{}}\n}", "func CreateDescribeEventDetailResponse() (response *DescribeEventDetailResponse) {\n\tresponse = &DescribeEventDetailResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func CreateDescribeCustinsKernelReleaseNotesResponse() (response *DescribeCustinsKernelReleaseNotesResponse) {\n\tresponse = &DescribeCustinsKernelReleaseNotesResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func CreateCreateECSDBInstanceResponse() (response *CreateECSDBInstanceResponse) {\n\tresponse = &CreateECSDBInstanceResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func NewIosCustomConfigurationCollectionResponse()(*IosCustomConfigurationCollectionResponse) {\n m := &IosCustomConfigurationCollectionResponse{\n BaseCollectionPaginationCountResponse: *NewBaseCollectionPaginationCountResponse(),\n }\n return m\n}", "func CreateDescribeOrganizationSpecsResponse() (response *DescribeOrganizationSpecsResponse) {\n\tresponse = &DescribeOrganizationSpecsResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func CreateStartNotaryResponse() (response *StartNotaryResponse) {\n\tresponse = &StartNotaryResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func ParseCreateanewResellerResponse(rsp *http.Response) (*CreateanewResellerResponse, error) {\n\tbodyBytes, err := ioutil.ReadAll(rsp.Body)\n\tdefer rsp.Body.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresponse := &CreateanewResellerResponse{\n\t\tBody: bodyBytes,\n\t\tHTTPResponse: rsp,\n\t}\n\n\tswitch {\n\tcase strings.Contains(rsp.Header.Get(\"Content-Type\"), \"json\") && rsp.StatusCode == 201:\n\t\tvar dest []Thenewlycreateditemorempty25\n\t\tif err := json.Unmarshal(bodyBytes, &dest); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tresponse.JSON201 = &dest\n\n\tcase strings.Contains(rsp.Header.Get(\"Content-Type\"), \"json\") && rsp.StatusCode == 422:\n\t\tvar dest Anerror\n\t\tif err := json.Unmarshal(bodyBytes, &dest); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tresponse.JSON422 = &dest\n\n\t}\n\n\treturn response, nil\n}", "func (client ModelClient) CreateRegexEntityModelResponder(resp *http.Response) (result UUID, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),\n\t\tautorest.ByUnmarshallingJSON(&result.Value),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func CreateApplyNumberDistrictInfoParsingResultResponse() (response *ApplyNumberDistrictInfoParsingResultResponse) {\n\tresponse = &ApplyNumberDistrictInfoParsingResultResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func ParseCreateanewInvoiceResponse(rsp *http.Response) (*CreateanewInvoiceResponse, error) {\n\tbodyBytes, err := ioutil.ReadAll(rsp.Body)\n\tdefer rsp.Body.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresponse := &CreateanewInvoiceResponse{\n\t\tBody: bodyBytes,\n\t\tHTTPResponse: rsp,\n\t}\n\n\tswitch {\n\tcase strings.Contains(rsp.Header.Get(\"Content-Type\"), \"json\") && rsp.StatusCode == 201:\n\t\tvar dest []Thenewlycreateditemorempty45\n\t\tif err := json.Unmarshal(bodyBytes, &dest); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tresponse.JSON201 = &dest\n\n\tcase strings.Contains(rsp.Header.Get(\"Content-Type\"), \"json\") && rsp.StatusCode == 422:\n\t\tvar dest Anerror\n\t\tif err := json.Unmarshal(bodyBytes, &dest); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tresponse.JSON422 = &dest\n\n\t}\n\n\treturn response, nil\n}", "func CorporateCreateTicketEncodeResponse(ctx context.Context, w http.ResponseWriter, response interface{}) error {\n\tvar body []byte\n\tw.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\tw.Header().Set(\"Access-Control-Allow-Headers\", \"Content-Type,Authorization\")\n\tbody, err := json.Marshal(&response)\n\tlogger.Logf(\"CorporateCreateTicketEncodeResponse : %s\", string(body[:]))\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t//w.Header().Set(\"X-Checksum\", cm.Cksum(body))\n\n\tvar e = response.(dt.CorporateCreateTicketJSONResponse).ResponseCode\n\n\tif e <= dt.HeaderStatusOk {\n\t\tw.WriteHeader(http.StatusOK)\n\t} else if e <= dt.StatusBadRequest {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t} else if e <= 998 {\n\t\tw.WriteHeader(http.StatusUnauthorized)\n\t} else {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t}\n\n\t_, err = w.Write(body)\n\n\treturn err\n}", "func ParseResponse(mapWrapper *cmap.ConcurrentMap, command model.Command, err error, customResponse *model.CustomResponse) model.CustomResponse {\n\n\tresultWrapper := model.Wrapper{\n\t\tConfigure: model.Configure{\n\t\t\tResponse: command,\n\t\t},\n\t\tResponse: cmap.New(),\n\t}\n\n\tresultWrapper.Response.Set(\"statusCode\", 0)\n\tresultWrapper.Response.Set(\"header\", make(map[string]interface{}))\n\tresultWrapper.Response.Set(\"body\", make(map[string]interface{}))\n\n\t//* now we will set the response body based from configurex.json if there is $configure value in configureBased.\n\n\ttmpHeader := make(map[string]interface{})\n\ttmpBody := make(map[string]interface{})\n\n\tstatusCode := 400\n\tif customResponse != nil {\n\t\tif customResponse.Header != nil {\n\t\t\ttmpHeader = customResponse.Header\n\t\t}\n\t\tif customResponse.Body != nil {\n\t\t\ttmpBody = customResponse.Body\n\t\t}\n\n\t\tif customResponse.StatusCode > 0 {\n\t\t\tstatusCode = customResponse.StatusCode\n\n\t\t} else {\n\t\t\tlog.Warn(\"status code is not defined, set status code to 400\")\n\t\t\t// default\n\t\t\tstatusCode = 400\n\n\t\t}\n\t}\n\n\t// if status code is specified in configure, then set status code based on configure\n\tif command.StatusCode > 0 {\n\t\tstatusCode = command.StatusCode\n\t}\n\n\t//*header\n\ttmpHeader = service.AddToWrapper(resultWrapper.Configure.Response.Adds.Header, \"--\", tmpHeader, mapWrapper, 0)\n\t//*modify header\n\ttmpHeader = service.ModifyWrapper(resultWrapper.Configure.Response.Modifies.Header, \"--\", tmpHeader, mapWrapper, 0)\n\t//*Deletion Header\n\ttmpHeader = service.DeletionHeaderOrQuery(resultWrapper.Configure.Response.Deletes.Header, tmpHeader)\n\n\t//*add\n\ttmpBody = service.AddToWrapper(resultWrapper.Configure.Response.Adds.Body, \"--\", tmpBody, mapWrapper, 0)\n\t//*modify\n\ttmpBody = service.ModifyWrapper(resultWrapper.Configure.Response.Modifies.Body, \"--\", tmpBody, mapWrapper, 0)\n\t//* delete\n\ttmpBody = service.DeletionBody(resultWrapper.Configure.Response.Deletes, tmpBody)\n\n\t//*In case user want to log final response\n\tif len(resultWrapper.Configure.Response.LogAfterModify) > 0 {\n\t\tlogValue := make(map[string]interface{}) // v\n\t\tfor key, val := range resultWrapper.Configure.Response.LogAfterModify {\n\t\t\tlogValue[key] = service.GetFromHalfReferenceValue(val, resultWrapper.Response, 0)\n\t\t}\n\t\t//logValue := service.GetFromHalfReferenceValue(resultWrapper.Configure.Response.LogAfterModify, resultWrapper.Response, 0)\n\t\tutil.DoLoggingJson(logValue, \"after\", \"final response\", false)\n\t}\n\n\tresponse := model.CustomResponse{\n\t\tStatusCode: statusCode,\n\t\tHeader: tmpHeader,\n\t\tBody: tmpBody,\n\t\tError: err,\n\t}\n\n\treturn response\n}", "func (o *PostOperationsGetNodeEdgePointDetailsCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(201)\n}", "func (s *SaleResponse) FormatResponse() *g.Response {\n\tresponse := new(g.Response)\n\tresponse.Acquirer = Name\n\n\tif s.OrderResult != nil {\n\t\tresponse.Id = s.OrderResult.OrderReference\n\t\tresponse.AuthorizationCode = s.OrderResult.OrderKey\n\t}\n\n\t// If CreditCard\n\tif len(s.CreditCardTransactionResultCollection) > 0 {\n\t\ttransaction := s.CreditCardTransactionResultCollection[0]\n\n\t\tresponse.Amount = transaction.AmountInCents\n\t\t//response.CreditCard = &g.CreditCard{}\n\t\tresponse.NSU = transaction.UniqueSequentialNumber\n\t\tresponse.TID = transaction.TransactionIdentifier\n\t}\n\n\t// If BankingBillet\n\tif len(s.BoletoTransactionResultCollection) > 0 {\n\t\ttransaction := s.BoletoTransactionResultCollection[0]\n\n\t\tresponse.Amount = transaction.AmountInCents\n\t\tresponse.BarCode = transaction.Barcode\n\t\tresponse.BoletoUrl = transaction.BoletoUrl\n\t}\n\n\treturn response\n}", "func CreateUpdateCommodityResponse() (response *UpdateCommodityResponse) {\n\tresponse = &UpdateCommodityResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func CreateCreateExpressSyncResponse() (response *CreateExpressSyncResponse) {\n\tresponse = &CreateExpressSyncResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func CreateRunMedQAResponse() (response *RunMedQAResponse) {\n\tresponse = &RunMedQAResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func EncodeCreateResponse(encoder func(context.Context, http.ResponseWriter) goahttp.Encoder) func(context.Context, http.ResponseWriter, interface{}) error {\n\treturn func(ctx context.Context, w http.ResponseWriter, v interface{}) error {\n\t\tres := v.(*inventoryviews.Inventory)\n\t\tenc := encoder(ctx, w)\n\t\tbody := NewCreateResponseBody(res.Projected)\n\t\tw.WriteHeader(http.StatusOK)\n\t\treturn enc.Encode(body)\n\t}\n}", "func CreateQueryPublicModelEngineResponse() (response *QueryPublicModelEngineResponse) {\n\tresponse = &QueryPublicModelEngineResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func CreateDescribeClusterServiceConfigTagResponse() (response *DescribeClusterServiceConfigTagResponse) {\n\tresponse = &DescribeClusterServiceConfigTagResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func CreateIosCustomConfigurationCollectionResponseFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) {\n return NewIosCustomConfigurationCollectionResponse(), nil\n}", "func CreateGetRenderResultResponse() (response *GetRenderResultResponse) {\n\tresponse = &GetRenderResultResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func CreateCreateAgentResponse() (response *CreateAgentResponse) {\n\tresponse = &CreateAgentResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func (c *WSCodec) CreateResponse(id interface{}, reply interface{}) interface{} {\n\treturn &jsonSuccessResponse{Version: jsonrpcVersion, Id: id, Result: reply}\n}", "func CreateGetHotlineGroupDetailReportResponse() (response *GetHotlineGroupDetailReportResponse) {\n\tresponse = &GetHotlineGroupDetailReportResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func CreateGetWsCustomizedChO2ORequest() (request *GetWsCustomizedChO2ORequest) {\n\trequest = &GetWsCustomizedChO2ORequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"alinlp\", \"2020-06-29\", \"GetWsCustomizedChO2O\", \"alinlp\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func CreateQueryWorksResponse() (response *QueryWorksResponse) {\n\tresponse = &QueryWorksResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func CreateDescribeScalingConfigurationsResponse() (response *DescribeScalingConfigurationsResponse) {\n\tresponse = &DescribeScalingConfigurationsResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func CreateCreateServerlessDBInstanceResponse() (response *CreateServerlessDBInstanceResponse) {\n\tresponse = &CreateServerlessDBInstanceResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func CreateGetRetcodeLogstoreResponse() (response *GetRetcodeLogstoreResponse) {\n\tresponse = &GetRetcodeLogstoreResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func CreateCreateContainerInstancesResponse() (response *CreateContainerInstancesResponse) {\n\tresponse = &CreateContainerInstancesResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func CreateSearchAlertContactResponse() (response *SearchAlertContactResponse) {\n\tresponse = &SearchAlertContactResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func EncodeNewNeatThingResponse(encoder func(context.Context, http.ResponseWriter) goahttp.Encoder) func(context.Context, http.ResponseWriter, interface{}) error {\n\treturn func(ctx context.Context, w http.ResponseWriter, v interface{}) error {\n\t\tres := v.(*neatthingviews.NeatThing)\n\t\tw.Header().Set(\"goa-view\", res.View)\n\t\tenc := encoder(ctx, w)\n\t\tbody := NewNewNeatThingResponseBodyFull(res.Projected)\n\t\tw.WriteHeader(http.StatusOK)\n\t\treturn enc.Encode(body)\n\t}\n}", "func ParseCreateanewInterceptionResponse(rsp *http.Response) (*CreateanewInterceptionResponse, error) {\n\tbodyBytes, err := ioutil.ReadAll(rsp.Body)\n\tdefer rsp.Body.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresponse := &CreateanewInterceptionResponse{\n\t\tBody: bodyBytes,\n\t\tHTTPResponse: rsp,\n\t}\n\n\tswitch {\n\tcase strings.Contains(rsp.Header.Get(\"Content-Type\"), \"json\") && rsp.StatusCode == 201:\n\t\tvar dest []Thenewlycreateditemorempty48\n\t\tif err := json.Unmarshal(bodyBytes, &dest); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tresponse.JSON201 = &dest\n\n\tcase strings.Contains(rsp.Header.Get(\"Content-Type\"), \"json\") && rsp.StatusCode == 422:\n\t\tvar dest Anerror\n\t\tif err := json.Unmarshal(bodyBytes, &dest); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tresponse.JSON422 = &dest\n\n\t}\n\n\treturn response, nil\n}", "func encodeCreatePostResponse(ctx context.Context, w http.ResponseWriter, response interface{}) (err error) {\n\tif f, ok := response.(endpoint.Failure); ok && f.Failed() != nil {\n\t\tErrorEncoder(ctx, f.Failed(), w)\n\t\treturn nil\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\terr = json.NewEncoder(w).Encode(response)\n\tif err != nil {\n\t\tlogrus.Warn(err.Error())\n\t}\n\treturn\n}", "func CreateOnsConsumerAccumulateResponse() (response *OnsConsumerAccumulateResponse) {\n\tresponse = &OnsConsumerAccumulateResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}" ]
[ "0.75959283", "0.70300895", "0.661331", "0.6274801", "0.61348885", "0.6081415", "0.60363543", "0.5802763", "0.57948554", "0.56905454", "0.5679204", "0.5625127", "0.5603903", "0.5568985", "0.55646026", "0.5562964", "0.55232185", "0.5499022", "0.54989606", "0.5492508", "0.5489657", "0.545481", "0.5443159", "0.54229486", "0.5418264", "0.541402", "0.53864735", "0.538133", "0.53803337", "0.53495085", "0.53428304", "0.5340594", "0.5332268", "0.5331375", "0.53280497", "0.53208655", "0.5318078", "0.53157574", "0.53051907", "0.5304101", "0.52679354", "0.5259694", "0.52581847", "0.5254574", "0.52505434", "0.5219822", "0.52049726", "0.5196202", "0.5178534", "0.51717293", "0.5167227", "0.51576924", "0.51576203", "0.51559025", "0.51494294", "0.51425403", "0.51279056", "0.512272", "0.5108424", "0.51028645", "0.5090196", "0.5084035", "0.5072196", "0.50694805", "0.5066925", "0.50634277", "0.5058891", "0.50586003", "0.5058367", "0.50560856", "0.5054671", "0.5054651", "0.50545686", "0.5053801", "0.50502694", "0.5049642", "0.5040351", "0.5038482", "0.50325966", "0.5028541", "0.50260836", "0.5020233", "0.501329", "0.50079805", "0.50072813", "0.5006886", "0.50028694", "0.5002339", "0.4999332", "0.49981496", "0.4993201", "0.4977848", "0.49707747", "0.4967696", "0.4963197", "0.49628437", "0.49488002", "0.4945975", "0.49458438", "0.49452272" ]
0.896394
0
WithBaseURL sets the baseURL
func WithBaseURL(url string) Option { return func(s *Storage) { s.baseURL = url } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (c *Client) SetBaseURL(bu string) {\n\tu, err := url.Parse(bu)\n\tif err != nil {\n\t\t// return err\n\t\tlog.Fatalf(\"error parsing base url %v\", err)\n\t}\n\n\tc.BaseURL = u\n}", "func (c *Client) SetBaseURL(base string) error {\n\tu, err := url.ParseRequestURI(base)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tc.baseURL = u\n\treturn nil\n}", "func (c *Client) setBaseURL(urlStr string) error {\n\tbaseURL, err := url.Parse(urlStr)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Update the base URL of the client.\n\tc.baseURL = baseURL\n\n\treturn nil\n}", "func (c *Client) SetBaseURL(baseURL string) error {\n\tu, err := url.Parse(baseURL)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif u.Path != \"\" && u.Path[len(u.Path)-1] != '/' {\n\t\treturn ErrNoTrailingSlash\n\t}\n\n\tc.baseURL = u\n\treturn nil\n}", "func (c *Client) WithBaseURL(u *url.URL) *Client {\n\tc.u = u\n\treturn c\n}", "func SetBaseURL(bu string) ClientOpt {\n\treturn func(c *Client) error {\n\t\tu, err := url.Parse(bu)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tc.BaseURL = u\n\t\treturn nil\n\t}\n}", "func (b *AuroraBackend) SetBaseURL(url string) {\n\tb.BaseURL = url\n}", "func WithBaseURL(b string) Option {\n\treturn func(r *RequestClient) {\n\t\tr.base = b\n\t}\n}", "func WithBaseURL(url *url.URL) OptionFunc {\n\treturn func(c *Client) {\n\t\tc.baseURL = url\n\t}\n}", "func WithBaseURL(baseURL string) Option {\n\treturn func(c *Client) error {\n\t\tif baseURL == \"\" {\n\t\t\treturn errors.New(\"URL cannot be empty\")\n\t\t}\n\n\t\tif _, err := url.Parse(baseURL); err != nil {\n\t\t\treturn fmt.Errorf(\"failed to parse base URL: %w\", err)\n\t\t}\n\n\t\tc.baseURL = baseURL\n\t\treturn nil\n\t}\n}", "func GetBaseURL() string {\n\treturn baseURL\n}", "func (i *Idb) joinBaseURL(p ...string) *url.URL {\n\tu := new(url.URL)\n\t*u = *i.url\t\n\tu.Path = path.Join(append([]string{u.Path}, p...)...)\n\treturn u\n}", "func BaseURL() (u url.URL) {\n\tu = *PrefixURI\n\tu.Path += \"/api/v1\"\n\treturn u\n}", "func SetBaseUrl(newbaseurl string) string {\n\tBaseUrl = newbaseurl\n\treturn BaseUrl\n}", "func (c *baseClient) Base(path string) *baseClient {\n\tc.url = path\n\treturn c\n}", "func WithBaseURL(url string) ClientOption {\n\treturn func(opts *ClientOptions) {\n\t\topts.BaseURL = url\n\t}\n}", "func WithBaseURL(baseURL string) ClientOption {\n\treturn func(c *Client) {\n\t\tc.baseURL = baseURL\n\t}\n}", "func (c *Client) SetBaseURL(url string) *Client {\n\tc.baseURL = url\n\treturn c\n}", "func (s *CallbackServer) BaseURL() *url.URL {\n\treturn s.http.BaseURL()\n}", "func (c *client) BaseURL() *url.URL {\n\treturn c.c.BaseURL()\n}", "func (r *Region) SetNerdGraphBaseURL(url string) {\n\tif r != nil && url != \"\" {\n\t\tr.nerdGraphBaseURL = url\n\t}\n}", "func WithBaseURL(baseURL string) ClientOption {\n\treturn func(c *Client) error {\n\t\tif !strings.HasSuffix(baseURL, \"/\") {\n\t\t\tbaseURL += \"/\"\n\t\t}\n\t\tnewBaseURL, err := url.Parse(baseURL)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tc.Server = newBaseURL.String()\n\t\treturn nil\n\t}\n}", "func WithBaseURL(baseURL string) ClientOption {\n\treturn func(c *Client) error {\n\t\tnewBaseURL, err := url.Parse(baseURL)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tc.Server = newBaseURL.String()\n\t\treturn nil\n\t}\n}", "func WithBaseURL(baseURL string) ClientOption {\n\treturn func(c *Client) error {\n\t\tnewBaseURL, err := url.Parse(baseURL)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tc.Server = newBaseURL.String()\n\t\treturn nil\n\t}\n}", "func WithBaseURL(baseURL string) ClientOption {\n\treturn func(c *Client) error {\n\t\tnewBaseURL, err := url.Parse(baseURL)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tc.Server = newBaseURL.String()\n\t\treturn nil\n\t}\n}", "func WithBaseURL(baseURL string) ClientOption {\n\treturn func(c *Client) error {\n\t\tnewBaseURL, err := url.Parse(baseURL)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tc.Server = newBaseURL.String()\n\t\treturn nil\n\t}\n}", "func WithBaseURL(baseURL string) ClientOption {\n\treturn func(c *Client) error {\n\t\tnewBaseURL, err := url.Parse(baseURL)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tc.Server = newBaseURL.String()\n\t\treturn nil\n\t}\n}", "func WithBaseURL(baseURL string) ClientOption {\n\treturn func(c *Client) error {\n\t\tnewBaseURL, err := url.Parse(baseURL)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tc.Server = newBaseURL.String()\n\t\treturn nil\n\t}\n}", "func WithBaseURL(baseURL string) ClientOption {\n\treturn func(c *Client) error {\n\t\tnewBaseURL, err := url.Parse(baseURL)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tc.Server = newBaseURL.String()\n\t\treturn nil\n\t}\n}", "func WithBaseURL(baseURL string) ClientOption {\n\treturn func(c *Client) error {\n\t\tnewBaseURL, err := url.Parse(baseURL)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tc.Server = newBaseURL.String()\n\t\treturn nil\n\t}\n}", "func WithBaseURL(baseURL string) ClientOption {\n\treturn func(c *Client) error {\n\t\tnewBaseURL, err := url.Parse(baseURL)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tc.Server = newBaseURL.String()\n\t\treturn nil\n\t}\n}", "func WithBaseURL(baseURL string) ClientOption {\n\treturn func(c *Client) error {\n\t\tnewBaseURL, err := url.Parse(baseURL)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tc.Server = newBaseURL.String()\n\t\treturn nil\n\t}\n}", "func BaseURL(rawurl string) Option {\n\treturn func(c *Client) {\n\t\tif baseURL, err := url.Parse(rawurl); err == nil {\n\t\t\tc.baseURL = baseURL\n\t\t}\n\t}\n}", "func (s *Nap) Base(rawURL string) *Nap {\n\ts.rawURL = rawURL\n\treturn s\n}", "func WithBaseURL(s string) ClientOptionFunc {\n\treturn func(c *Client) error {\n\t\tc.http.SetHostURL(s)\n\t\treturn nil\n\t}\n}", "func AddBase(path string) string {\n\treturn fmt.Sprintf(\"%s%s\", baseURL, path)\n}", "func (c Config) BaseURLOrDefault() string {\n\treturn c.BaseURL\n}", "func (r *Region) SetSyntheticsBaseURL(url string) {\n\tif r != nil && url != \"\" {\n\t\tr.syntheticsBaseURL = url\n\t}\n}", "func SetBaseURL(baseURL string) AuthenticatorOption {\n\treturn func(d *DeviceAuthenticator) error {\n\t\tparsedURL, err := url.Parse(baseURL)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\td.BaseURL = parsedURL\n\t\treturn nil\n\t}\n}", "func (e *Endpoint) setBaseUrl(url string) {\n\te.BaseUrl = url\n}", "func buildBaseURL(r *http.Request) string {\n\treturn fmt.Sprintf(\"%s://%s\", r.URL.Scheme, r.URL.Hostname())\n}", "func Test_Ctx_BaseURL(t *testing.T) {\n\tt.Parallel()\n\tapp := New()\n\tctx := app.AcquireCtx(&fasthttp.RequestCtx{})\n\tdefer app.ReleaseCtx(ctx)\n\tctx.Fasthttp.Request.SetRequestURI(\"http://google.com/test\")\n\tutils.AssertEqual(t, \"http://google.com\", ctx.BaseURL())\n}", "func WithBaseURL(baseURL string) ConfigOption {\n\treturn func(c *Config) {\n\t\tc.baseURL = baseURL\n\t}\n}", "func (n nodeData) goodBaseURL() string {\n\turl := n.BaseURL\n\tif !strings.HasPrefix(n.BaseURL, \"http://\") {\n\t\turl = \"http://\" + url\n\t}\n\tif strings.HasSuffix(url, \"/\") {\n\t\turl = url[:len(url)-1]\n\t}\n\treturn url\n}", "func ReplaceBase(str, old, new string) string {\n\treturn strings.Replace(str, old, fmt.Sprintf(new, baseURL), 1)\n}", "func NewWithBaseURL(token string, baseURL string) Client {\n\tnewClient := Client{Token: token, BaseURL: baseURL}\n\n\tnewClient.FreeCompany = FreeCompany{Endpoint: Endpoint{client: &newClient, endpoint: \"FreeCompany/\"}}\n\tnewClient.Group = Group{Endpoint: Endpoint{client: &newClient, endpoint: \"Group/\"}}\n\n\treturn newClient\n\n}", "func (a *Accessor) genBaseURL(epID EntryPointID) (*url.URL, error) {\n\tu, err := url.Parse(a.BaseUrl)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif a.EntryPoints == nil {\n\t\tif err := a.ReloadEntryPoint(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tswitch epID.Value() {\n\tcase Entities:\n\t\tu.Path = path.Join(u.Path, a.EntryPoints.EntitiesURL)\n\tcase Types:\n\t\tu.Path = path.Join(u.Path, a.EntryPoints.TypesURL)\n\tcase Subscriptions:\n\t\tu.Path = path.Join(u.Path, a.EntryPoints.SubscriptionsURL)\n\tcase Registrations:\n\t\tu.Path = path.Join(u.Path, a.EntryPoints.RegistrationsURL)\n\tdefault:\n\t\treturn nil, IllegalEndPointIDError\n\t}\n\treturn u, nil\n}", "func (url URL) Base() string {\n\treturn \"~/\" + path.Base(url.String())\n}", "func (o ProviderOutput) BaseUrl() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *Provider) pulumi.StringPtrOutput { return v.BaseUrl }).(pulumi.StringPtrOutput)\n}", "func sanitizeBaseURL(baseURL string) string {\n\tu, err := url.Parse(baseURL)\n\tif err != nil {\n\t\treturn baseURL // invalid URL will fail later when making requests\n\t}\n\tif u.Scheme == \"\" {\n\t\tu.Scheme = \"http\"\n\t}\n\treturn u.String()\n}", "func sanitizeBaseURL(baseURL string) string {\n\tu, err := url.Parse(baseURL)\n\tif err != nil {\n\t\treturn baseURL // invalid URL will fail later when making requests\n\t}\n\tif u.Scheme == \"\" {\n\t\tu.Scheme = \"http\"\n\t}\n\treturn u.String()\n}", "func (api *API) GetBaseURL() string {\n\treturn api.baseURL\n}", "func (c *Client) GetBaseURL() url.URL {\n\treturn *c.baseURL\n}", "func GetBaseURL() string {\n\t//Change This to Your Google Analytics ID\n\tconst url = \"goappstarter.appspot.com\"\n\treturn url\n}", "func (g *Gate) SetBaseUrl(url string) *Gate {\n\tg.paymentPage.SetBaseUrl(url)\n\n\treturn g\n}", "func (p Database) BaseURL() string {\n\tif p.authinfo == nil {\n\t\treturn fmt.Sprintf(\"http://%s:%s\", p.Host, p.Port)\n\t}\n\treturn fmt.Sprintf(\"http://%s@%s:%s\", p.authinfo.String(), p.Host, p.Port)\n}", "func GetBaseURL(r *http.Request) string {\n\tscheme := \"http\"\n\tif r.TLS != nil {\n\t\tscheme = \"https\"\n\t}\n\treturn fmt.Sprintf(\"%s://%s\", scheme, r.Host)\n}", "func (analyzer *Analyzer) SetBaseUrl(url string) {\n\tanalyzer.baseUrl = url\n}", "func (c *Config) BaseURL() string {\n\tif c.IsProd() {\n\t\treturn \"http://golang-practice\"\n\t}\n\treturn \"http://localhost\" + c.Port\n}", "func ReloadURLFromBase(u *url.URL) *url.URL {\n\tr := *u\n\tr.Path = path.Join(r.Path, \"/-/reload\")\n\treturn &r\n}", "func BaseUrl() string {\n\treturn requireEnv(drupalBaseUrl)\n}", "func (s Storage) BaseURL() *url.URL {\n\treturn s.cdnConf.CDNEndpointWithDefault(s.baseURI)\n}", "func (o *SmsTracking) SetBaseUrl(v string) {\n\to.BaseUrl = &v\n}", "func (a APIKeyAuthentication) GetBaseURL() string {\n\treturn a.BaseURL\n}", "func (m RequestURL) GetBaseURL() string {\n\tif uri := m.r.Header.Get(\"REQUEST_URI\"); uri != \"\" {\n\t\treturn uri\n\t}\n\n\treturn fmt.Sprintf(\"https://localhost:%d\", m.Port)\n}", "func (r *BaseRequestBuilder) SetURL(baseURL string) {\n\tr.baseURL = baseURL\n}", "func (o *GetReportOutputResourceParams) WithBaseURL(BaseURL *string) *GetReportOutputResourceParams {\n\to.BaseURL = BaseURL\n\treturn o\n}", "func GetBaseURL(s string) string {\n\tu, err := url.Parse(s)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn u.Scheme + \"://\" + u.Host\n}", "func (conf FileConfiguration) BaseURL() string {\n\treturn conf.parsedYaml.BaseURL\n}", "func joinURL(baseURL string, paths ...string) string {\n\n\tu, err := url.Parse(baseURL)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\turl := path.Join(paths...)\n\tu.Path = path.Join(u.Path, url)\n\treturn u.String()\n}", "func (s *DefaultSegmentDeliveryConfiguration) SetBaseUrl(v string) *DefaultSegmentDeliveryConfiguration {\n\ts.BaseUrl = &v\n\treturn s\n}", "func (v *View) PrependBaseURI(s string) string {\n\treturn v.BaseURI + s\n}", "func (_ZKOnacci *ZKOnacciCallerSession) BaseURI() (string, error) {\n\treturn _ZKOnacci.Contract.BaseURI(&_ZKOnacci.CallOpts)\n}", "func (s *SegmentDeliveryConfiguration) SetBaseUrl(v string) *SegmentDeliveryConfiguration {\n\ts.BaseUrl = &v\n\treturn s\n}", "func (r *Region) SetRestBaseURL(url string) {\n\tif r != nil && url != \"\" {\n\t\tr.restBaseURL = url\n\t}\n}", "func (p *Base) URL() string {\n\treturn \"https://thebase.in\"\n}", "func applicationRoot(baseURL string) (base string, err error) {\n\tb, err := url.Parse(baseURL)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif b.RawQuery != \"\" {\n\t\treturn \"\", fmt.Errorf(\"query component is not allowed: %s\", baseURL)\n\t}\n\tif b.Fragment != \"\" {\n\t\treturn \"\", fmt.Errorf(\"fragment component is not allowed: %s\", baseURL)\n\t}\n\tb.Path = strings.TrimRight(b.Path, \"/\")\n\treturn b.String(), nil\n}", "func (_ZKOnacci *ZKOnacciSession) BaseURI() (string, error) {\n\treturn _ZKOnacci.Contract.BaseURI(&_ZKOnacci.CallOpts)\n}", "func urlBase(addr string) string {\n\t// If the addr specifies a scheme, use it. If not, default to\n\t// http. If url.Parse fails on it, return it unchanged.\n\turl, err := url.Parse(addr)\n\tif err != nil {\n\t\treturn addr\n\t}\n\tif url.Scheme == \"\" {\n\t\turl.Scheme = \"http\"\n\t}\n\treturn url.String()\n}", "func Benchmark_Ctx_BaseURL(b *testing.B) {\n\tapp := New()\n\tc := app.AcquireCtx(&fasthttp.RequestCtx{})\n\tdefer app.ReleaseCtx(c)\n\tc.Fasthttp.Request.SetHost(\"google.com:1337\")\n\tc.Fasthttp.Request.URI().SetPath(\"/haha/oke/lol\")\n\tvar res string\n\tb.ReportAllocs()\n\tb.ResetTimer()\n\tfor n := 0; n < b.N; n++ {\n\t\tres = c.BaseURL()\n\t}\n\tutils.AssertEqual(b, \"http://google.com:1337\", res)\n}", "func (a *api) getBaseURL(targetAppID string) string {\n\tendpoint, ok := a.universal.CompStore.GetHTTPEndpoint(targetAppID)\n\tif ok && endpoint.Name == targetAppID {\n\t\treturn endpoint.Spec.BaseURL\n\t}\n\treturn \"\"\n}", "func NewWithBaseURI(baseURI string, ) BaseClient {\n return BaseClient{\n Client: autorest.NewClientWithUserAgent(UserAgent()),\n BaseURI: baseURI,\n }\n}", "func (self *Request) UriBase() url.URL {\n\tscheme := self.URL.Scheme\n\tif scheme == \"\" {\n\t\tscheme = \"http\"\n\t}\n\n\thost := self.Host\n\tif len(host) > 0 && host[len(host)-1] == '/' {\n\t\thost = host[:len(host)-1]\n\t}\n\n\turl := url.URL{\n\t\tScheme: scheme,\n\t\tHost: host,\n\t}\n\treturn url\n}", "func Base_() HTML {\n return Base(nil)\n}", "func (m *DeviceManagementConfigurationSettingDefinition) SetBaseUri(value *string)() {\n err := m.GetBackingStore().Set(\"baseUri\", value)\n if err != nil {\n panic(err)\n }\n}", "func (c *Client) SetBaseUrl(baseUrl string) {\n\tc.baseUrl = baseUrl\n}", "func ShopBaseURL(name string) string {\n\treturn \"https://\" + ShopFullName(name)\n}", "func SetRemoteURLBase(remote *Remote, newurl string) {\n\tremote.Urlbase = newurl\n\tif !ValidateRemote(remote) {\n\t\tlog.Fatalf(\"Refusing to set new URL %s for %s\\n\", newurl)\n\t}\n\tZapRemote(remote)\n\tAddRemote(remote)\n}", "func (sn *SelfNode) GetURLBase() string {\n\treturn sn.URL\n}", "func (cc *CoinCap) Init(baseURL string) {\n\tcc.BaseURL = baseURL\n\treturn\n}", "func SetAPIDomain(url string) {\n\n\tBaseUrl = url\n\n\treturn\n\n}", "func (r *Region) SetLogsBaseURL(url string) {\n\tif r != nil && url != \"\" {\n\t\tr.logsBaseURL = url\n\t}\n}", "func AssetsBaseUrl() string {\n\treturn requireEnv(assetsBaseUrl)\n}", "func (o *GetServicesCounselingQueueURL) SetBasePath(bp string) {\n\to._basePath = bp\n}", "func GetBaseUrl(uri string) string {\n\n\tvar url string\n\tvar baseUrl string\n\n\tbaseUrl = os.Getenv(\"BASE_URL\")\n\n\tif baseUrl != \"\" {\n\t\turl = baseUrl + uri\n\t} else {\n\t\turl = \"http://0.0.0.0:8080\" + uri\n\t}\n\treturn url\n}", "func WithBase(base string) func(*Client) {\n\treturn func(mr *Client) {\n\t\tmr.base = base\n\t}\n}", "func (as *AdminServer) Base(w http.ResponseWriter, r *http.Request) {\n\tparams := newTemplateParams(r)\n\tparams.Title = \"Dashboard\"\n\tgetTemplate(w, \"dashboard\").ExecuteTemplate(w, \"base\", params)\n}", "func (s Shop) GetURL(baseURL string) (result string) {\n\tfullURL, err := url.Parse(baseURL)\n\tif err != nil {\n\t\tlog.Println(\"[Model/Shop] Error parsing baseURL: \", err.Error())\n\t\treturn\n\t}\n\tfullURL.Path += fmt.Sprintf(\"/%v/%s\", s.ID, strings.Replace(s.Name, \" \", \"-\", -1))\n\tresult = fullURL.String()\n\treturn\n}", "func BaseUrlOr(defaultValue string) string {\n\treturn GetEnvOr(drupalBaseUrl, defaultValue)\n}", "func BuildURL(baseURL string, q *Parameters) string {\n\tqueryParams := q.Encode()\n\tif queryParams == \"\" {\n\t\treturn baseURL\n\t}\n\treturn baseURL + \"?\" + queryParams\n}" ]
[ "0.7003866", "0.699843", "0.6800046", "0.67183524", "0.6677771", "0.6500183", "0.6447244", "0.64459676", "0.6425619", "0.62872773", "0.6284496", "0.62346447", "0.6181099", "0.60991687", "0.60940456", "0.609286", "0.60837984", "0.6007285", "0.5934689", "0.5925488", "0.5879423", "0.58745134", "0.5857477", "0.5857477", "0.5857477", "0.5857477", "0.5857477", "0.5857477", "0.5857477", "0.5857477", "0.5857477", "0.5857477", "0.5855718", "0.58245593", "0.58196783", "0.5779884", "0.57008356", "0.56402975", "0.5607447", "0.5551188", "0.55420595", "0.5540583", "0.55318916", "0.55252016", "0.5518008", "0.5510097", "0.5507567", "0.54950136", "0.5480637", "0.5457416", "0.5457416", "0.54506576", "0.5417656", "0.5415511", "0.53989565", "0.53636646", "0.53483605", "0.5343418", "0.53188604", "0.5313335", "0.5285614", "0.5265053", "0.52580684", "0.5241071", "0.522659", "0.517694", "0.5138178", "0.51093256", "0.50800186", "0.5077219", "0.5069247", "0.5067561", "0.5048434", "0.5046913", "0.5040682", "0.5038092", "0.5024528", "0.50092864", "0.5007508", "0.5006062", "0.5004619", "0.49978617", "0.49856865", "0.4947283", "0.49461356", "0.49335492", "0.49321088", "0.4916112", "0.49124607", "0.4900879", "0.4899852", "0.48987976", "0.48958442", "0.48835155", "0.48550826", "0.4822993", "0.47911963", "0.47635445", "0.47633547", "0.47561944" ]
0.6301283
9
WithHeimdallClient sets the client
func WithHeimdallClient(client heimdall.Client) Option { return func(s *Storage) { s.client = client } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (cl *Client) setClient() {\n\tcl.client = &http.Client{\n\t\tTransport: &ochttp.Transport{\n\t\t\tBase: &http.Transport{\n\t\t\t\tProxy: http.ProxyFromEnvironment,\n\t\t\t\tDial: (&net.Dialer{\n\t\t\t\t\tKeepAlive: cl.keepAlive,\n\t\t\t\t}).Dial,\n\t\t\t\tTLSHandshakeTimeout: cl.handshakeTimeout,\n\t\t\t\tMaxIdleConnsPerHost: cl.maxIdleConnsPerHost,\n\t\t\t},\n\t\t},\n\t}\n}", "func (s *Service) SetClient(client util.HTTPClient) { s.httpClient = client }", "func (_m *esClientInterface) setClient(_a0 *elastic.Client) {\n\t_m.Called(_a0)\n}", "func SetClient(client *http.Client) {\n\thttpClient = client\n}", "func SetClient(cli *Client) {\n\tclients = []*Client{cli}\n}", "func (b *AuroraBackend) SetClient(client *http.Client) {\n\tb.client = client\n}", "func (c *HTTPClientMock) SetClient(client *http.Client) {\n\tc.client = client\n}", "func SetClient(client HttpClient) func(*AviSession) error {\n\treturn func(sess *AviSession) error {\n\t\treturn sess.setClient(client)\n\t}\n}", "func (m *MqttClientBase) SetClientBase(host string, qos byte, clientID string, channel chan *models.PublishMessage, username string, password string, keepAlive time.Duration, pingTimeout time.Duration) {\n\tm.Qos = qos\n\tm.Host = host\n\tm.Username = username\n\tm.Password = password\n\tm.KeepAlive = keepAlive\n\tm.PingTimeout = pingTimeout\n\tm.Connecting = false\n\tm.Client = createPahoClient(host, clientID, username, password, keepAlive, pingTimeout)\n\tm.PublishChannel = channel\n}", "func SetClient(client *Client, ctx *fiber.Ctx) {\n\tctx.Locals(\"nats_client\", client)\n}", "func (service *BaseService) SetHTTPClient(client *http.Client) {\n\tsetMinimumTLSVersion(client)\n\n\tif isRetryableClient(service.Client) {\n\t\t// If \"service\" is currently holding a retryable client,\n\t\t// then set \"client\" as the embedded client used for individual requests.\n\t\ttr := service.Client.Transport.(*retryablehttp.RoundTripper)\n\t\ttr.Client.HTTPClient = client\n\t} else {\n\t\t// Otherwise, just hang \"client\" directly off the base service.\n\t\tservice.Client = client\n\t}\n}", "func SetClient(c client.Client) {\n\tdefaultK.client = c\n}", "func (b *OGame) SetClient(client *httpclient.Client) {\n\tb.client = client\n}", "func (s *server) handleClient(client *client) {\n\tdefer client.closeConn()\n\tsc := s.configStore.Load().(ServerConfig)\n\ts.log().Debugf(\"Handle client [%s], id: %d\", client.RemoteIP, client.ID)\n\n\t// Initial greeting\n\tgreeting := fmt.Sprintf(\"220 %s UMBO SMTP #%d (%d) %s\",\n\t\tsc.Hostname, client.ID,\n\t\ts.clientPool.GetActiveClientsCount(), time.Now().Format(time.RFC3339))\n\n\thelo := fmt.Sprintf(\"250 %s Hello\", sc.Hostname)\n\t// ehlo is a multi-line reply and need additional \\r\\n at the end\n\tehlo := fmt.Sprintf(\"250-%s Hello\\r\\n\", sc.Hostname)\n\n\t// Extended feature advertisements\n\tmessageSize := fmt.Sprintf(\"250-SIZE %d\\r\\n\", sc.MaxSize)\n\tadvertiseAuth := \"250-AUTH LOGIN\\r\\n\"\n\tpipelining := \"250-PIPELINING\\r\\n\"\n\tadvertiseTLS := \"250-STARTTLS\\r\\n\"\n\tadvertiseEnhancedStatusCodes := \"250-ENHANCEDSTATUSCODES\\r\\n\"\n\t// The last line doesn't need \\r\\n since string will be printed as a new line.\n\t// Also, Last line has no dash -\n\thelp := \"250 HELP\"\n\n\tif sc.TLS.AlwaysOn {\n\t\ttlsConfig, ok := s.tlsConfigStore.Load().(*tls.Config)\n\t\tif !ok {\n\t\t\ts.mainlog().Error(\"Failed to load *tls.Config\")\n\t\t} else if err := client.upgradeToTLS(tlsConfig); err == nil {\n\t\t\tadvertiseTLS = \"\"\n\t\t} else {\n\t\t\t// server requires TLS, but can't handshake\n\t\t\ts.log().WithError(err).Debugf(\"[%s] Failed TLS handshake\", client.RemoteIP)\n\t\t\tclient.kill()\n\t\t}\n\t}\n\tif !sc.TLS.StartTLSOn {\n\t\t// STARTTLS turned off, don't advertise it\n\t\tadvertiseTLS = \"\"\n\t}\n\tr := response.Canned\n\tloginInfo := LoginInfo{\n\t\tstatus: false,\n\t}\n\tif client.isAlive() {\n\t\terr := client.sendResponse(s.timeout.Load().(time.Duration), greeting)\n\t\tif err != nil {\n\t\t\ts.log().WithError(err).Debug(\"error with response\")\n\t\t\treturn\n\t\t}\n\t}\n\n\tvar firstMessage = true\n\tvar cmdLogs []string\n\n\tfor client.isAlive() {\n\t\tclient.bufin.setLimit(CommandLineMaxLength)\n\t\tinput, err := s.readCommand(client)\n\t\ts.log().Debugf(\"Client sent: %s\", input)\n\t\tif err == io.EOF {\n\t\t\ts.log().WithError(err).Debugf(\"Client closed the connection: %s\", client.RemoteIP)\n\t\t\treturn\n\t\t} else if netErr, ok := err.(net.Error); ok && netErr.Timeout() {\n\t\t\ts.log().WithError(err).Warnf(\"Timeout: %s\", client.RemoteIP)\n\t\t\treturn\n\t\t} else if err == LineLimitExceeded {\n\t\t\terr := client.sendResponse(s.timeout.Load().(time.Duration), r.FailLineTooLong)\n\t\t\tif err != nil {\n\t\t\t\ts.log().WithError(err).Debug(\"error writing response\")\n\t\t\t}\n\t\t\tclient.kill()\n\t\t\treturn\n\t\t} else if err != nil {\n\t\t\ts.log().WithError(err).Debugf(\"Read error: %s\", client.RemoteIP)\n\t\t\tclient.kill()\n\t\t\treturn\n\t\t}\n\t\tif s.isShuttingDown() {\n\t\t\ts.handleShotdown(client, r)\n\t\t\treturn\n\t\t}\n\n\t\tcmdLen := len(input)\n\t\tif cmdLen > CommandVerbMaxLength {\n\t\t\tcmdLen = CommandVerbMaxLength\n\t\t}\n\t\tcmd := bytes.ToUpper(input[:cmdLen])\n\n\t\t// keep SMTP command logs for debug purpose\n\t\tcmdLogs = append(cmdLogs, strings.Split(string(cmd), \" \")[0])\n\n\t\tswitch {\n\t\tcase cmdHELO.match(cmd):\n\t\t\tclient.Helo = string(bytes.Trim(input[4:], \" \"))\n\t\t\tclient.resetTransaction()\n\t\t\tclient.Envelope.HelloBeginAt = time.Now().UTC()\n\t\t\terr = client.sendResponse(s.timeout.Load().(time.Duration), helo)\n\t\tcase cmdEHLO.match(cmd):\n\t\t\tclient.Helo = string(bytes.Trim(input[4:], \" \"))\n\t\t\tclient.resetTransaction()\n\t\t\tclient.Envelope.HelloBeginAt = time.Now().UTC()\n\t\t\terr = client.sendResponse(s.timeout.Load().(time.Duration), ehlo,\n\t\t\t\tmessageSize,\n\t\t\t\tadvertiseAuth,\n\t\t\t\tpipelining,\n\t\t\t\tadvertiseTLS,\n\t\t\t\tadvertiseEnhancedStatusCodes,\n\t\t\t\thelp,\n\t\t\t)\n\t\tcase cmdHELP.match(cmd):\n\t\t\tquote := response.GetQuote()\n\t\t\terr = client.sendResponse(s.timeout.Load().(time.Duration), \"214-OK\\r\\n\", quote)\n\t\tcase sc.XClientOn && cmdXCLIENT.match(cmd):\n\t\t\tif toks := bytes.Split(input[8:], []byte{' '}); len(toks) > 0 {\n\t\t\t\tfor i := range toks {\n\t\t\t\t\tif vals := bytes.Split(toks[i], []byte{'='}); len(vals) == 2 {\n\t\t\t\t\t\tif bytes.Compare(vals[1], []byte(\"[UNAVAILABLE]\")) == 0 {\n\t\t\t\t\t\t\t// skip\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif bytes.Compare(vals[0], []byte(\"ADDR\")) == 0 {\n\t\t\t\t\t\t\tclient.RemoteIP = string(vals[1])\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif bytes.Compare(vals[0], []byte(\"HELO\")) == 0 {\n\t\t\t\t\t\t\tclient.Helo = string(vals[1])\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\terr = client.sendResponse(s.timeout.Load().(time.Duration), r.SuccessMailCmd)\n\t\tcase cmdMAIL.match(cmd):\n\t\t\tif !s.isAuthentication(sc.AuthenticationRequired, loginInfo.status) {\n\t\t\t\terr = client.sendResponse(s.timeout.Load().(time.Duration), r.FailAuthRequired)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif client.isInTransaction() {\n\t\t\t\terr = client.sendResponse(s.timeout.Load().(time.Duration), r.FailNestedMailCmd)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tclient.Envelope.MailBeginAt = time.Now().UTC()\n\t\t\tclient.MailFrom, err = client.parsePath([]byte(input[10:]), client.parser.MailFrom)\n\t\t\tif err != nil {\n\t\t\t\ts.log().WithError(err).Error(\"MAIL parse error\", \"[\"+string(input[10:])+\"]\")\n\t\t\t\terr = client.sendResponse(s.timeout.Load().(time.Duration), err)\n\t\t\t\tbreak\n\t\t\t} else if client.parser.NullPath {\n\t\t\t\t// bounce has empty from address\n\t\t\t\tclient.MailFrom = mail.Address{}\n\t\t\t}\n\t\t\terr = client.sendResponse(s.timeout.Load().(time.Duration), r.SuccessMailCmd)\n\t\tcase cmdRCPT.match(cmd):\n\t\t\tif !s.isAuthentication(sc.AuthenticationRequired, loginInfo.status) {\n\t\t\t\terr = client.sendResponse(s.timeout.Load().(time.Duration), r.FailAuthRequired)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif len(client.RcptTo) > rfc5321.LimitRecipients {\n\t\t\t\terr = client.sendResponse(s.timeout.Load().(time.Duration), r.ErrorTooManyRecipients)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tto, err := client.parsePath([]byte(input[8:]), client.parser.RcptTo)\n\t\t\tif err != nil {\n\t\t\t\ts.log().WithError(err).Error(\"RCPT parse error\", \"[\"+string(input[8:])+\"]\")\n\t\t\t\terr = client.sendResponse(s.timeout.Load().(time.Duration), err.Error())\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif !s.allowsHost(to.Host) {\n\t\t\t\terr = client.sendResponse(s.timeout.Load().(time.Duration), r.ErrorRelayDenied, \" \", to.Host)\n\t\t\t} else {\n\t\t\t\tclient.PushRcpt(to)\n\t\t\t\trcptError := s.backend().ValidateRcpt(client.Envelope)\n\t\t\t\tif rcptError != nil {\n\t\t\t\t\tclient.PopRcpt()\n\t\t\t\t\terr = client.sendResponse(s.timeout.Load().(time.Duration), r.FailRcptCmd, \" \", rcptError.Error())\n\t\t\t\t} else {\n\t\t\t\t\terr = client.sendResponse(s.timeout.Load().(time.Duration), r.SuccessRcptCmd)\n\t\t\t\t}\n\t\t\t}\n\t\tcase cmdRSET.match(cmd):\n\t\t\tclient.resetTransaction()\n\t\t\terr = client.sendResponse(s.timeout.Load().(time.Duration), r.SuccessResetCmd)\n\t\tcase cmdVRFY.match(cmd):\n\t\t\terr = client.sendResponse(s.timeout.Load().(time.Duration), r.SuccessVerifyCmd)\n\t\tcase cmdNOOP.match(cmd):\n\t\t\terr = client.sendResponse(s.timeout.Load().(time.Duration), r.SuccessNoopCmd)\n\t\tcase cmdQUIT.match(cmd):\n\t\t\terr = client.sendResponse(s.timeout.Load().(time.Duration), r.SuccessQuitCmd)\n\t\t\tclient.kill()\n\t\tcase cmdDATA.match(cmd):\n\t\t\tif !s.isAuthentication(sc.AuthenticationRequired, loginInfo.status) {\n\t\t\t\terr = client.sendResponse(s.timeout.Load().(time.Duration), r.FailAuthRequired)\n\t\t\t\tif err != nil {\n\t\t\t\t\ts.log().WithError(err).Debug(\"error writing response\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif len(client.RcptTo) == 0 {\n\t\t\t\terr = client.sendResponse(s.timeout.Load().(time.Duration), r.FailNoRecipientsDataCmd)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\terr = client.sendResponse(s.timeout.Load().(time.Duration), r.SuccessDataCmd)\n\t\t\tif err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tclient.CmdLogs = cmdLogs\n\t\t\tif firstMessage {\n\t\t\t\tclient.Envelope.ConnectBeginAt = client.ConnectedAt\n\t\t\t}\n\n\t\t\terr = s.handleData(client, sc, r)\n\t\t\tfirstMessage = false\n\t\t\tif err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\tcase cmdAuth.match(cmd):\n\t\t\tif loginInfo.status == true {\n\t\t\t\terr = client.sendResponse(s.timeout.Load().(time.Duration), r.FailNoIdentityChangesPermitted)\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tclient.Envelope.AuthBeginAt = time.Now().UTC()\n\t\t\tcmds := strings.Split(string(input), \" \")\n\t\t\tif len(cmds) > 2 {\n\t\t\t\tl, err := s.handleAuthWithUsername(client, cmds[2], r)\n\t\t\t\tif err != nil {\n\t\t\t\t\terr = client.sendResponse(s.timeout.Load().(time.Duration), r.FailAuthNotAccepted)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tloginInfo = l\n\t\t\t} else {\n\t\t\t\tl, err := s.handleAuth(client, r)\n\t\t\t\tif err != nil {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tloginInfo = l\n\t\t\t}\n\n\t\tcase sc.TLS.StartTLSOn && cmdSTARTTLS.match(cmd):\n\t\t\terr = client.sendResponse(s.timeout.Load().(time.Duration), r.SuccessStartTLSCmd)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif s.handleStartTLS(client, sc) {\n\t\t\t\tadvertiseTLS = \"\"\n\t\t\t}\n\t\tdefault:\n\t\t\tclient.errors++\n\t\t\tif client.errors >= MaxUnrecognizedCommands {\n\t\t\t\terr = client.sendResponse(s.timeout.Load().(time.Duration), r.FailMaxUnrecognizedCmd)\n\t\t\t\tclient.kill()\n\t\t\t} else {\n\t\t\t\terr = client.sendResponse(s.timeout.Load().(time.Duration), r.FailUnrecognizedCmd)\n\t\t\t}\n\t\t}\n\t\tif err != nil {\n\t\t\ts.log().WithError(err).Debug(\"error with response\")\n\t\t\treturn\n\t\t}\n\t}\n}", "func (t *Tapa) WithClient(client *http.Client) {\n\tt.client = client\n}", "func (c *Client) setHttpClient(httpClient *http.Client) {\n\tc.httpClient = httpClient\n}", "func SetHTTPClient(newClient *http.Client) {\n\thttpClient = newClient\n}", "func (s *UserService) SetClient(client *http.Client) (error) {\n\tsrv, err := admin.New(client)\n\tif err != nil {\n\t\treturn err\n\t}\n\ts.UsersService = srv.Users\n\ts.Client = client\n\treturn nil\n}", "func (c *listCmd) setClient() error {\n\tss, err := c.syncCmd.storageFromParam(\"src\", c.syncCmd.src)\n\tif err != nil {\n\t\tfmt.Errorf(\"Could not set client for describe requests: %v\", err)\n\t}\n\tvar ok bool\n\tc.cl, ok = ss.(*client.Client)\n\tif !ok {\n\t\treturn fmt.Errorf(\"storageFromParam returned a %T, was expecting a *client.Client\", ss)\n\t}\n\treturn nil\n}", "func setupClient() *Client {\n\treturn &Client{\n\t\terr: make(chan error),\n\t\trequest: make(chan []byte, maxConCurrentMessages),\n\t\tresults: &sync.Map{},\n\t\tresultMessenger: &sync.Map{},\n\t\tlogger: logging.NewNilLogger(),\n\t\tgremlinVersion: \"3\",\n\t}\n}", "func (a *Agent) setupClient() error {\n\n\tconfig, err := a.clientConfig()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"client config setup failed: %v\", err)\n\t}\n\n\tclient, err := client.New(config)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"client setup failed: %v\", err)\n\t}\n\n\ta.client = client\n\n\treturn nil\n}", "func (client *BaseClient) SetTLSClientConfig(config *tls.Config) {}", "func (mpt *MapPinTracker) SetClient(c *rpc.Client) {\n\tmpt.rpcClient = c\n\tmpt.rpcReady <- struct{}{}\n}", "func SetClient(ctx context.Context, client statsd.ClientInterface) context.Context {\n\treturn context.WithValue(ctx, statsdClient, client)\n}", "func SetHTTPClient(httpClient *http.Client) func(*Client) error {\n\treturn func(client *Client) error {\n\t\tclient.client = httpClient\n\n\t\treturn nil\n\t}\n}", "func withHTTPClient(target *http.Client) ClientOption {\n\treturn func(subject *client) {\n\t\tsubject.client = target\n\t}\n}", "func (clients *ClientSets) SetFakeClient() {\n\n\t// Load kubernetes client set by preloading with k8s objects.\n\tclients.KubeClient = fake.NewSimpleClientset([]runtime.Object{}...)\n\n\t// Load litmus client set by preloading with litmus objects.\n\tclients.LitmusClient = litmusFakeClientset.NewSimpleClientset([]runtime.Object{}...)\n}", "func WithHystrixClient(client *hystrix.Client) Option {\n\treturn func(c *Client) {\n\t\tc.client = client\n\t}\n}", "func (c *Cloud) setsshClient(client ssh.Clienter) {\n\tc.sshClient = client\n}", "func clientAfter(ctx context.Context, set *optionSet, err error) {\n\tevent := rkgrpcctx.GetEvent(ctx)\n\tevent.AddErr(err)\n\tcode := status.Code(err)\n\tendTime := time.Now()\n\n\t// Check whether context is cancelled from server\n\tselect {\n\tcase <-ctx.Done():\n\t\tevent.AddErr(ctx.Err())\n\tdefault:\n\t\tbreak\n\t}\n\n\t// Read X-Request-Id header sent from server if exists\n\tincomingMD := rkgrpcinter.GetIncomingHeadersOfClient(ctx)\n\tif v := incomingMD.Get(rkgrpcctx.RequestIdKey); len(v) > 0 {\n\t\trkgrpcinter.AddToClientContextPayload(ctx, rkgrpcctx.RequestIdKey, v[len(v)-1])\n\t}\n\n\t// Extract request id and log it\n\tincomingRequestId := rkgrpcctx.GetRequestId(ctx)\n\n\tif len(incomingRequestId) > 0 {\n\t\tevent.SetEventId(incomingRequestId)\n\t\tevent.SetRequestId(incomingRequestId)\n\t}\n\n\ttraceId := rkgrpcctx.GetTraceId(ctx)\n\tif len(traceId) > 0 {\n\t\tevent.SetTraceId(traceId)\n\t}\n\n\tevent.SetResCode(code.String())\n\tevent.SetEndTime(endTime)\n\tevent.Finish()\n}", "func init() {\n\tdefaultClient = New(\"\")\n}", "func SetHTTPClient(client *http.Client) {\n\thttpClient = client\n}", "func (m *Mock) UseClient(c *http.Client) {\n\tm._client = c\n}", "func WithClient(ctx context.Context, c *client.Client) context.Context {\n\treturn context.WithValue(ctx, clientKey{}, c)\n}", "func (c *Client) WithClient(hc *http.Client) *Client {\n\tc.c = hc\n\treturn c\n}", "func WithClient(c *http.Client) TransportOption {\n\treturn func(tr *Transport) {\n\t\ttr.client = c\n\t}\n}", "func WithHTTPClient(h HTTPClient) ClientOption {\n\treturn clientOptionFunc(func(c interface{}) {\n\t\tswitch c := c.(type) {\n\t\tcase *Client:\n\t\t\tc.httpClient = h\n\t\tdefault:\n\t\t\tpanic(\"unknown type\")\n\t\t}\n\t})\n}", "func (client Client) Helix() *helix.Client {\n\treturn helix.New(client.ID, client.bearer)\n}", "func (client *ClientImpl) HeadClient(ctx context.Context, args HeadClientArgs) error {\n\tlocationId, _ := uuid.Parse(\"79c83865-4de3-460c-8a16-01be238e0818\")\n\t_, err := client.Client.Send(ctx, http.MethodHead, locationId, \"6.0-preview.1\", nil, nil, nil, \"\", \"application/json\", nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (i *Inbound) setClientFactory(factory internal.ClientFactory) {\n\ti.clientFactory = factory\n}", "func (app *MgmtApp) defaultClientInitializer(client Client) {\n loge.Info(\"Default defaultClientInitializer: %v\", client.Username())\n}", "func Configure(c Client) {\n\tclient = c\n}", "func ConfigureClient(g *backpressure.Backpressure, c *autorest.Client) {\n\tc.SendDecorators = append([]autorest.SendDecorator{\n\t\t// NOTE: Order matters here since these decorators are executed in\n\t\t// order. See: https://godoc.org/github.com/Azure/go-autorest/autorest#Client\n\t\tRateLimitCircuitBreaker(g),\n\t}, c.SendDecorators...)\n}", "func withClient(c *http.Client) option {\n\treturn func(m *matcher) error {\n\t\tm.client = c\n\t\treturn nil\n\t}\n}", "func (m *Member) SetClient(newClient *Client) {\n\tm.client = newClient\n}", "func WithClient(cl client.Client) Option {\n\treturn func(r *Reconciler) error {\n\t\tr.client = cl\n\t\treturn nil\n\t}\n}", "func WithClient(cl client.Client) Option {\n\treturn func(r *Reconciler) error {\n\t\tr.client = cl\n\t\treturn nil\n\t}\n}", "func BrokerSetClient(client *pubsub.Client) BrokerOptionFunc {\n\treturn func(bk *Broker) {\n\t\tbk.client = client\n\t}\n}", "func SetClient(client *pubsub.Client) ClientOption {\n\treturn func(c *Client) {\n\t\tc.client = client\n\t}\n}", "func (obj *SObject) setClient(client *Client) {\n\t(*obj)[sobjectClientKey] = client\n}", "func (h *Handler) setupClient(client, worker string) (*Client, error) {\n\tvar err error\n\th.p.mu.Lock()\n\tlock, exists := h.p.clientSetupMutex[client]\n\tif !exists {\n\t\tlock = &deadlock.Mutex{}\n\t\th.p.clientSetupMutex[client] = lock\n\t}\n\th.p.mu.Unlock()\n\n\t// start := time.Now()\n\t// log.Println(\"lock: \", client, \" \", worker)\n\tlock.Lock()\n\tdefer func() {\n\t\tlock.Unlock()\n\t\t// log.Printf(\"unlock: %s %s %f\", client, worker, time.Now().Sub(start).Seconds())\n\t}()\n\tc, err := h.p.FindClientDB(client)\n\tif err == ErrNoUsernameInDatabase {\n\t\tc, err = newClient(h.p, client)\n\t\tif err != nil {\n\t\t\t//fmt.Println(\"Failed to create a new Client\")\n\t\t\th.p.log.Printf(\"Failed to create a new Client: %s\\n\", err)\n\t\t\treturn nil, err\n\t\t}\n\t\terr = h.p.AddClientDB(c)\n\t\tif err != nil {\n\t\t\th.p.log.Printf(\"Failed to add client to DB: %s\\n\", err)\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\tif err != nil {\n\t\t\th.p.log.Printf(\"Failed to get client from DB: %s\\n\", err)\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif h.p.Client(client) == nil {\n\t\th.p.log.Printf(\"Adding client in memory: %s\\n\", client)\n\t\th.p.AddClient(c)\n\t}\n\treturn c, nil\n}", "func setClient(client, testClient interface{}) interface{} {\n\tswitch cl := client.(type) {\n\tcase *networkintents.NetworkClient:\n\t\tif testClient != nil && reflect.TypeOf(testClient).Implements(reflect.TypeOf((*networkintents.NetworkManager)(nil)).Elem()) {\n\t\t\tc, ok := testClient.(networkintents.NetworkManager)\n\t\t\tif ok {\n\t\t\t\treturn c\n\t\t\t}\n\t\t}\n\tcase *networkintents.ProviderNetClient:\n\t\tif testClient != nil && reflect.TypeOf(testClient).Implements(reflect.TypeOf((*networkintents.ProviderNetManager)(nil)).Elem()) {\n\t\t\tc, ok := testClient.(networkintents.ProviderNetManager)\n\t\t\tif ok {\n\t\t\t\treturn c\n\t\t\t}\n\t\t}\n\tcase *scheduler.SchedulerClient:\n\t\tif testClient != nil && reflect.TypeOf(testClient).Implements(reflect.TypeOf((*scheduler.SchedulerManager)(nil)).Elem()) {\n\t\t\tc, ok := testClient.(scheduler.SchedulerManager)\n\t\t\tif ok {\n\t\t\t\treturn c\n\t\t\t}\n\t\t}\n\tdefault:\n\t\tfmt.Printf(\"unknown type %T\\n\", cl)\n\t}\n\treturn client\n}", "func (obj *ObjectBase) SetClient(c objectInterface) {\n\tobj.clientPtr = c\n}", "func SetClientGetRoutes(c echo.Context) error {\n\tvar m models.Message\n\tgetUserInterface(c, &m.User)\n\tvar cl models.Client\n\tid, err := getParams64(models.GetParams{C: c})\n\tif err != nil {\n\t\tm.Code = http.StatusBadRequest\n\t\tm.Message = \"identificador de cliente no valido\"\n\t\treturn commons.DisplayMessage(c, &m)\n\t}\n\tcl.ID = id\n\tcontrollers.ClientGet(cl, &m)\n\treturn commons.DisplayMessage(c, &m)\n}", "func (h *Handler) InjectClient(c client.Client) error {\n\th.Client = c\n\treturn nil\n}", "func (_m *MockDataCoord) SetEtcdClient(etcdClient *clientv3.Client) {\n\t_m.Called(etcdClient)\n}", "func NewClient(appID int, appHash string, opt Options) *Client {\n\topt.setDefaults()\n\n\tmode := manager.ConnModeUpdates\n\tif opt.NoUpdates {\n\t\tmode = manager.ConnModeData\n\t}\n\tclient := &Client{\n\t\trand: opt.Random,\n\t\tlog: opt.Logger,\n\t\tappID: appID,\n\t\tappHash: appHash,\n\t\tupdateHandler: opt.UpdateHandler,\n\t\tsession: pool.NewSyncSession(pool.Session{\n\t\t\tDC: opt.DC,\n\t\t}),\n\t\tdomains: opt.DCList.Domains,\n\t\ttestDC: opt.DCList.Test,\n\t\tcfg: manager.NewAtomicConfig(tg.Config{\n\t\t\tDCOptions: opt.DCList.Options,\n\t\t}),\n\t\tcreate: defaultConstructor(),\n\t\tresolver: opt.Resolver,\n\t\tdefaultMode: mode,\n\t\tconnBackoff: opt.ReconnectionBackoff,\n\t\tclock: opt.Clock,\n\t\tdevice: opt.Device,\n\t\tmigrationTimeout: opt.MigrationTimeout,\n\t\tnoUpdatesMode: opt.NoUpdates,\n\t\tmw: opt.Middlewares,\n\t}\n\tif opt.TracerProvider != nil {\n\t\tclient.tracer = opt.TracerProvider.Tracer(oteltg.Name)\n\t}\n\tclient.init()\n\n\t// Including version into client logger to help with debugging.\n\tif v := version.GetVersion(); v != \"\" {\n\t\tclient.log = client.log.With(zap.String(\"v\", v))\n\t}\n\n\tif opt.SessionStorage != nil {\n\t\tclient.storage = &session.Loader{\n\t\t\tStorage: opt.SessionStorage,\n\t\t}\n\t}\n\n\tclient.opts = mtproto.Options{\n\t\tPublicKeys: opt.PublicKeys,\n\t\tRandom: opt.Random,\n\t\tLogger: opt.Logger,\n\t\tAckBatchSize: opt.AckBatchSize,\n\t\tAckInterval: opt.AckInterval,\n\t\tRetryInterval: opt.RetryInterval,\n\t\tMaxRetries: opt.MaxRetries,\n\t\tCompressThreshold: opt.CompressThreshold,\n\t\tMessageID: opt.MessageID,\n\t\tExchangeTimeout: opt.ExchangeTimeout,\n\t\tDialTimeout: opt.DialTimeout,\n\t\tClock: opt.Clock,\n\n\t\tTypes: getTypesMapping(),\n\n\t\tTracer: client.tracer,\n\t}\n\tclient.conn = client.createPrimaryConn(nil)\n\n\treturn client\n}", "func WithHTTPClient(h *http.Client) Opts {\n\treturn func(r *retryable) {\n\t\tr.httpClient = h\n\t}\n}", "func (h *Handler) InjectClient(c client.Client) error {\n\th.client = c\n\treturn nil\n}", "func (sm *Manager) SetHTTPClient(c *http.Client) {\n\tsm.client = c\n}", "func (sm *Manager) SetHTTPClient(c *http.Client) {\n\tsm.client = c\n}", "func AdaptClient(c *storage.Client) Client {\n\treturn client{c}\n}", "func WrapClient(client redis.UniversalClient, opts ...ClientOption) {\n\tcfg := new(clientConfig)\n\tdefaults(cfg)\n\tfor _, fn := range opts {\n\t\tfn(cfg)\n\t}\n\n\thookParams := &params{\n\t\tadditionalTags: additionalTagOptions(client),\n\t\tconfig: cfg,\n\t}\n\n\tclient.AddHook(&datadogHook{params: hookParams})\n}", "func WithHTTPClient(hClient *http.Client) clientOption {\n\treturn func(c *client) {\n\t\tc.httpClient = hClient\n\t}\n}", "func SetHTTPClient(client *http.Client) error {\n\tif client == nil {\n\t\treturn errHTTPClientInvalid\n\t}\n\tm.Lock()\n\t_HTTPClient = client\n\tm.Unlock()\n\treturn nil\n}", "func WithClient(c client.Client) Option {\n\treturn func(o *Options) {\n\t\to.Client = c\n\t}\n}", "func setLogger(logClient logger.Interface) {\n\tlog = logClient\n}", "func (service *Manager) SetHeader(header interface{}) {\n\tservice.client.AddHeader(header)\n}", "func (r *Registry) client() Client {\n\tif r.Client == nil {\n\t\treturn auth.DefaultClient\n\t}\n\treturn r.Client\n}", "func (c *httpClient) SetHeader(header http.Header) HttpClient {\n\tclient := *c\n\tclient.headers = header\n\treturn &client\n}", "func WithClient(client *redis.Client) RedisClientOption {\n\treturn func(rs *RedisStorage) {\n\t\trs.redisClient = client\n\t}\n}", "func (s *StaticClients) Set(srvAddr string) {\n\ts.mu.Lock()\n\tcli := micro.NewClient(s.cfg, micro.NewStaticLinker(srvAddr))\n\tcli.SetProtoFunc(s.protoFunc)\n\ts.clients[srvAddr] = cli\n\ts.mu.Unlock()\n}", "func WithClient(ctx context.Context, clt client.Client) context.Context {\n\treturn context.WithValue(ctx, clientCtxKey{}, clt)\n}", "func (s *stor) UpdateClient(c osin.Client) error {\n\terr := s.Open()\n\tif err != nil {\n\t\treturn errors.Annotatef(err, \"Unable to open boldtb\")\n\t}\n\tdefer s.Close()\n\tcl := cl{\n\t\tId: c.GetId(),\n\t\tSecret: c.GetSecret(),\n\t\tRedirectUri: c.GetRedirectUri(),\n\t\tExtra: c.GetUserData(),\n\t}\n\traw, err := json.Marshal(cl)\n\tif err != nil {\n\t\treturn errors.Annotatef(err, \"Unable to marshal client object\")\n\t}\n\treturn s.d.Update(func(tx *bolt.Tx) error {\n\t\trb, err := tx.CreateBucketIfNotExists(s.root)\n\t\tif err != nil {\n\t\t\treturn errors.Annotatef(err, \"Invalid bucket %s\", s.root)\n\t\t}\n\t\tcb, err := rb.CreateBucketIfNotExists([]byte(clientsBucket))\n\t\tif err != nil {\n\t\t\treturn errors.Annotatef(err, \"Invalid bucket %s/%s\", s.root, clientsBucket)\n\t\t}\n\t\treturn cb.Put([]byte(cl.Id), raw)\n\t})\n}", "func (_m *MockQueryCoord) SetEtcdClient(etcdClient *clientv3.Client) {\n\t_m.Called(etcdClient)\n}", "func (mc *MockClient) SetOptions(opts piperhttp.ClientOptions) {}", "func (f *userFactory) Client(c *ent.Client) *userFactory {\n\tf.client = c\n\treturn f\n}", "func handleVarnishClient(conn net.Conn, publisher *Publisher, purgeOnStartup bool, secret *string) {\n\tdefer conn.Close()\n\n\twait := make(chan bool)\n\tclient := client.NewVarnishClient(&conn, wait)\n\n\terr := client.AuthenticateIfNeeded(secret)\n\tif err != nil {\n\t\tlogger.Crit(fmt.Sprintln(\"Varnish authentication failed:\", err))\n\t\treturn\n\t}\n\n\tif purgeOnStartup {\n\t\t// flush the whole cache of the new client\n\t\tclient.SendPurge([]byte(\".*\"))\n\t}\n\n\t// wait for purges\n\tpublisher.Sub(client)\n\t<-wait\n\n\t// client has quit, clean up\n\tpublisher.Unsub(client)\n\tlogger.Info(fmt.Sprintln(utils.ReverseName(conn), \"gone\"))\n}", "func HubClientInit(server, clientID, username, password string) *MQTT.ClientOptions {\n\topts := MQTT.NewClientOptions().AddBroker(server).SetClientID(clientID).SetCleanSession(true)\n\tif username != \"\" {\n\t\topts.SetUsername(username)\n\t\tif password != \"\" {\n\t\t\topts.SetPassword(password)\n\t\t}\n\t}\n\ttlsConfig := &tls.Config{InsecureSkipVerify: true, ClientAuth: tls.NoClientCert}\n\topts.SetTLSConfig(tlsConfig)\n\treturn opts\n}", "func ClientsetFromContext(ctx context.Context) (*Clientset, error) {\n\trestConfig, err := kubeutils.GetConfig(\"\", \"\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tkubeClient, err := kubernetes.NewForConfig(restConfig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcrdCache := kube.NewKubeCache(ctx)\n\tkubeCoreCache, err := cache.NewKubeCoreCache(ctx, kubeClient)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpromClient, err := promv1.NewPrometheusConfigClient(prometheus.ResourceClientFactory(kubeClient, kubeCoreCache))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t/*\n\t\tsupergloo config clients\n\t*/\n\tinstall, err := v1.NewInstallClient(clientForCrd(v1.InstallCrd, restConfig, crdCache))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err := install.Register(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tmesh, err := v1.NewMeshClient(clientForCrd(v1.MeshCrd, restConfig, crdCache))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err := mesh.Register(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tmeshIngress, err := v1.NewMeshIngressClient(clientForCrd(v1.MeshIngressCrd, restConfig, crdCache))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err := meshIngress.Register(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tmeshGroup, err := v1.NewMeshGroupClient(clientForCrd(v1.MeshGroupCrd, restConfig, crdCache))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err := meshGroup.Register(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tupstream, err := gloov1.NewUpstreamClient(clientForCrd(gloov1.UpstreamCrd, restConfig, crdCache))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err := upstream.Register(); err != nil {\n\t\treturn nil, err\n\t}\n\n\troutingRule, err := v1.NewRoutingRuleClient(clientForCrd(v1.RoutingRuleCrd, restConfig, crdCache))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err := routingRule.Register(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tsecurityRule, err := v1.NewSecurityRuleClient(clientForCrd(v1.SecurityRuleCrd, restConfig, crdCache))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err := securityRule.Register(); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// ilackarms: should we use Kube secret here? these secrets follow a different format (specific to istio)\n\ttlsSecret, err := v1.NewTlsSecretClient(&factory.KubeSecretClientFactory{\n\t\tClientset: kubeClient,\n\t\tPlainSecrets: true,\n\t\tCache: kubeCoreCache,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err := tlsSecret.Register(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tsecret, err := gloov1.NewSecretClient(&factory.KubeSecretClientFactory{\n\t\tClientset: kubeClient,\n\t\tCache: kubeCoreCache,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err := secret.Register(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tsettings, err := gloov1.NewSettingsClient(clientForCrd(gloov1.SettingsCrd, restConfig, crdCache))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err := settings.Register(); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// special resource client wired up to kubernetes pods\n\t// used by the istio policy syncer to watch pods for service account info\n\tpods := pod.NewPodClient(kubeClient, kubeCoreCache)\n\tservices := service.NewServiceClient(kubeClient, kubeCoreCache)\n\n\treturn newClientset(\n\t\trestConfig,\n\t\tkubeClient,\n\t\tpromClient,\n\t\tnewSuperglooClients(install, mesh, meshGroup, meshIngress, upstream,\n\t\t\troutingRule, securityRule, tlsSecret, secret, settings),\n\t\tnewDiscoveryClients(pods, services),\n\t), nil\n}", "func WrapClient(w ...client.Wrapper) error {\n\tfor i := len(w); i > 0; i-- {\n\t\tDefault.Client = w[i-1](Default.Client)\n\t}\n\treturn nil\n}", "func setupClient() *Client {\n\treturn &Client{\n\t\tlogger: NewNilLogger(),\n\t\tpredicateKey: predicateKeyDefault,\n\t\ttemplate: templateDefault,\n\t\tmaxWorkerCount: maxWorkers,\n\t}\n}", "func WithClient(hc *http.Client) Options {\n\treturn func(s *SPDX) { s.hc = hc }\n}", "func (n *Notifier) WithClient(client *http.Client) *Notifier {\n\tn.Client = client\n\treturn n\n}", "func WithHttpClient(client httpClient) func(rpc *EthRPC) {\n\treturn func(rpc *EthRPC) {\n\t\trpc.client = client\n\t}\n}", "func WithClient(x *xray.XRay) Option {\n\treturn func(o *Options) {\n\t\to.Client = x\n\t}\n}", "func (r *CSIPowerMaxRevProxyReconciler) SetClient(client client.Client) *CSIPowerMaxRevProxyReconciler {\n\tr.Client = client\n\treturn r\n}", "func init() {\n\tclients = map[string]*Client{}\n\theartbeatInterval = defaultHeartbeatInterval\n}", "func HubClientInit(server, clientID, username, password string) *MQTT.ClientOptions {\n\topts := MQTT.NewClientOptions().AddBroker(server).SetClientID(clientID).SetCleanSession(true)\n\tif username != \"\" {\n\t\topts.SetUsername(username)\n\t\tif password != \"\" {\n\t\t\topts.SetPassword(password)\n\t\t}\n\t}\n\n\tklog.V(4).Infof(\"Start to set TLS configuration for MQTT client\")\n\ttlsConfig := &tls.Config{}\n\tif eventconfig.Config.TLS.Enable {\n\t\tcert, err := tls.LoadX509KeyPair(eventconfig.Config.TLS.TLSMqttCertFile, eventconfig.Config.TLS.TLSMqttPrivateKeyFile)\n\t\tif err != nil {\n\t\t\tklog.Errorf(\"Failed to load x509 key pair: %v\", err)\n\t\t\treturn nil\n\t\t}\n\n\t\tcaCert, err := os.ReadFile(eventconfig.Config.TLS.TLSMqttCAFile)\n\t\tif err != nil {\n\t\t\tklog.Errorf(\"Failed to read TLSMqttCAFile\")\n\t\t\treturn nil\n\t\t}\n\n\t\tpool := x509.NewCertPool()\n\t\tif ok := pool.AppendCertsFromPEM(caCert); !ok {\n\t\t\tklog.Errorf(\"Cannot parse the certificates\")\n\t\t\treturn nil\n\t\t}\n\n\t\ttlsConfig = &tls.Config{\n\t\t\tRootCAs: pool,\n\t\t\tCertificates: []tls.Certificate{cert},\n\t\t\tInsecureSkipVerify: false,\n\t\t}\n\t} else {\n\t\ttlsConfig = &tls.Config{InsecureSkipVerify: true, ClientAuth: tls.NoClientCert}\n\t}\n\topts.SetTLSConfig(tlsConfig)\n\tklog.V(4).Infof(\"set TLS configuration for MQTT client successfully\")\n\n\treturn opts\n}", "func WithClient(client *client.Client) *Auth {\n\treturn &Auth{client: client}\n\n}", "func WithClient(client *http.Client) Option {\n\treturn func(o *Options) {\n\t\to.HTTPClient = client\n\t}\n}", "func Client(c *http.Client) func(*Attacker) {\n\treturn func(a *Attacker) { a.client = *c }\n}", "func WithClient(c *http.Client) Option {\n\treturn func(u *Updater) error {\n\t\tu.Fetcher.Client = c\n\t\treturn nil\n\t}\n}", "func WithHTTPClient(httpclient *http.Client) ClientOption {\n\treturn func(client *Client) {\n\t\tclient.httpClient = httpclient\n\t}\n}", "func setupClient(t *testing.T, serverResponseBody string) (*lokalise.Client, *fixture, func()) {\n\treturn setupServerAndClient(t, func(f *fixture) http.HandlerFunc {\n\t\treturn func(rw http.ResponseWriter, req *http.Request) {\n\t\t\tf.calledMethod = req.Method\n\t\t\tf.calledPath = req.URL.Path\n\t\t\trequestBody, err := ioutil.ReadAll(req.Body)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"read request body failed: %v\", err)\n\t\t\t}\n\t\t\tf.requestBody = string(requestBody)\n\t\t\trw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\trw.WriteHeader(http.StatusOK)\n\t\t\tfmt.Fprint(rw, serverResponseBody)\n\t\t}\n\t})\n}", "func (g *FakeClientFactory) Reset() {\n\tg.Caclient = &FakeConfigAgentClient{}\n}", "func closeClient() {\n\tfor id, _ := range lights {\n\t\thandleExpiredLight(lights[id].light)\n\t}\n\n\t_ = client.CloseSubscription(subscription)\n\t_ = client.Close()\n\n\tclient = nil\n\tsubscription = nil\n}", "func WithHTTPClient(c *http.Client) func(*Client) {\n\treturn func(mr *Client) {\n\t\tmr.client = c\n\t}\n}", "func wrappedClient(t *testing.T, testID string) (*Client, error) {\n\tctx := context.Background()\n\tbase := http.DefaultTransport\n\n\ttrans, err := htransport.NewTransport(ctx, base, option.WithoutAuthentication(), option.WithUserAgent(\"custom-user-agent\"))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to create http client: %v\", err)\n\t}\n\n\tc := http.Client{Transport: trans}\n\n\t// Add RoundTripper to the created HTTP client\n\twrappedTrans := &retryTestRoundTripper{rt: c.Transport, testID: testID, T: t}\n\tc.Transport = wrappedTrans\n\n\t// Supply this client to storage.NewClient\n\t// STORAGE_EMULATOR_HOST takes care of setting the correct endpoint\n\tclient, err := NewClient(ctx, option.WithHTTPClient(&c))\n\treturn client, err\n}", "func RecordClient(ctx context.Context, c *http.Client, opts ...option.ClientOption) (*http.Client, error) {\n\ttrans, err := htransport.NewTransport(ctx, c.Transport, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &http.Client{Transport: trans}, nil\n}" ]
[ "0.66653216", "0.656386", "0.6513115", "0.63362336", "0.6135869", "0.6078363", "0.6058374", "0.6022997", "0.58994734", "0.5800192", "0.57922196", "0.5785561", "0.5777135", "0.57480276", "0.5738935", "0.5699551", "0.5697977", "0.56765866", "0.56661636", "0.5657892", "0.5656079", "0.56319517", "0.5594456", "0.5566782", "0.555586", "0.55326366", "0.55277056", "0.5525969", "0.55251837", "0.5518431", "0.55173826", "0.55140644", "0.5504275", "0.5443845", "0.5433823", "0.5417169", "0.5415717", "0.5410661", "0.5404725", "0.5395699", "0.53930205", "0.5378266", "0.5375024", "0.5370511", "0.5368408", "0.5357836", "0.5357836", "0.5357185", "0.53421134", "0.53392524", "0.53362364", "0.5333345", "0.53210306", "0.53099835", "0.5308108", "0.5303734", "0.5296582", "0.52754563", "0.5271648", "0.5266274", "0.5266274", "0.52631336", "0.52627957", "0.5262616", "0.5259193", "0.5251725", "0.52372307", "0.52364147", "0.5235329", "0.5229622", "0.522687", "0.5218796", "0.5206012", "0.5203136", "0.52020997", "0.51963085", "0.51952654", "0.5186248", "0.5185565", "0.5181402", "0.51803386", "0.51802206", "0.5180104", "0.5179137", "0.5174806", "0.51729333", "0.51722413", "0.5167396", "0.5156402", "0.51521116", "0.5151733", "0.51512307", "0.51495916", "0.5149068", "0.5135798", "0.5128371", "0.5126924", "0.5120617", "0.5119387", "0.51180124" ]
0.71384174
0
CreateAuthMiddleware creates the middleware for authtication
func CreateAuthMiddleware() (*jwt.Middleware, error) { err := variables.LoadTokenKeys() if err != nil { return nil, err } authMiddleware := &jwt.Middleware{ Realm: "numapp", SigningAlgorithm: variables.SigningAlgorithm, Key: variables.TokenSignKey, VerifyKey: &variables.TokenSignKey.PublicKey, Timeout: time.Hour, MaxRefresh: time.Hour * 24, Authenticator: func(username string, password string) error { // Log the user in err := login.Login(username, password) if err != nil { return err } return nil }, } return authMiddleware, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewAuthMiddleware(svc interfaces.Service, r interfaces.Repository) interfaces.Service {\n\treturn &authMiddleware{\n\t\tnext: svc,\n\t\trepository: r,\n\t}\n}", "func NewAuthMiddleware(l lib.LogI, ctx *lib.Context) *AuthMiddleware {\n\treturn &AuthMiddleware{\n\t\tLog: l,\n\t\tContext: ctx,\n\t}\n}", "func NewAuthMiddleware(up UserProvider) echo.MiddlewareFunc {\n\treturn func(next echo.HandlerFunc) echo.HandlerFunc {\n\t\treturn func(c echo.Context) error {\n\t\t\tctx := c.Request().Context()\n\n\t\t\taccountID := c.Param(\"account_id\")\n\t\t\tif accountID == \"\" || len(accountID) <= 10 {\n\t\t\t\treturn errors.New(\"account id missing or invalid\")\n\t\t\t}\n\n\t\t\tauth := c.Request().Header.Get(\"authorization\")\n\t\t\tif auth == \"\" || !strings.HasPrefix(auth, \"Bearer \") || len(auth) <= 10 {\n\t\t\t\treturn errors.New(\"token invalid\")\n\t\t\t}\n\n\t\t\ttoken := auth[7:]\n\n\t\t\tu, err := up.GetByToken(ctx, token)\n\t\t\tif err != nil {\n\t\t\t\treturn errors.Wrap(err, \"token invalid\")\n\t\t\t}\n\n\t\t\tif domain.ID(accountID) != u.AccountID {\n\t\t\t\treturn errors.New(\"account invalid\")\n\t\t\t}\n\n\t\t\tif u.TokenExpiresAt.Before(time.Now()) {\n\t\t\t\treturn errors.New(\"token expired\")\n\t\t\t}\n\n\t\t\t// Replace current request object\n\t\t\tc.SetRequest(c.Request().WithContext(domain.ContextWithSession(ctx, u)))\n\n\t\t\treturn next(c)\n\t\t}\n\t}\n}", "func AuthMiddleware() *Middleware {\n\tm := make([]*Middleware, 0, 1+len(extraAuthMiddlewares))\n\tm = append(m, RequireAuthMiddleware)\n\tm = append(m, extraAuthMiddlewares...)\n\treturn composeMiddleware(m...)\n}", "func (env *Env) AuthMiddleware(next http.Handler) http.Handler {\n\treturn jwtauth.Verifier(jwtAuth)(extractUser(env.userRepository)(next))\n}", "func AuthMiddleware() (*jwt.GinJWTMiddleware, error) {\n\treturn jwt.New(&jwt.GinJWTMiddleware{\n\t\tRealm: \"test zone\",\n\t\tKey: []byte(\"$2a$10$GCiJQcAqSaPV8.bU/mvGiOgdHV8GuMOdmW6.nUpCRisfUx9b.VGqy\"),\n\t\tTimeout: time.Hour,\n\t\tMaxRefresh: time.Hour,\n\t\tIdentityKey: identityKey,\n\t\tPayloadFunc: payloadFunc,\n\t\tIdentityHandler: identityHandler,\n\t\tAuthenticator: authenticator,\n\t\tAuthorizator: authorizator,\n\t\tTokenLookup: \"header: Authorization, query: token, cookie: jwt\",\n\t\tTokenHeadName: \"Bearer\",\n\t\tTimeFunc: time.Now,\n\t\tLoginResponse: LoginResponse,\n\t})\n}", "func AuthMiddleware(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tuser, _ := UserFromHTTPRequestgo(r)\n\t\tctx := r.Context()\n\n\t\t// put it in context\n\t\tctx = context.WithValue(ctx, ut.UserCtxKey, &user)\n\t\tr = r.WithContext(ctx)\n\t\tnext.ServeHTTP(w, r)\n\t})\n}", "func AuthMiddleware(auto401 bool) gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\tUpdateContextUser(c, 0)\n\t\ttoken, err := request.ParseFromRequest(c.Request, MyAuth2Extractor, func(token *jwt.Token) (interface{}, error) {\n\t\t\tb := ([]byte(common.NBSecretPassword))\n\t\t\treturn b, nil\n\t\t})\n\t\tif err != nil {\n\t\t\tif auto401 {\n\t\t\t\tc.AbortWithError(http.StatusUnauthorized, err)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tif claims, ok := token.Claims.(jwt.MapClaims); ok && token.Valid {\n\t\t\tmy_user_id := uint(claims[\"id\"].(float64))\n\t\t\t//fmt.Println(my_user_id,claims[\"id\"])\n\t\t\tUpdateContextUser(c, my_user_id)\n\t\t}\n\t}\n}", "func AuthMiddleware() gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\tauthHeader := c.GetHeader(\"authorization\")\n\t\tif authHeader == \"\" || len(authHeader) < len(\"Token\")+1 {\n\t\t\trestErr := resterror.NewUnAuthorizedError()\n\t\t\tc.JSON(restErr.StatusCode, restErr)\n\t\t\tc.Abort()\n\t\t\treturn\n\t\t}\n\n\t\ttoken := authHeader[len(\"Token \"):]\n\n\t\tauthService := services.JWTAuthService()\n\t\tresult, err := authService.ValidateToken(token)\n\t\tif err != nil || !result.Valid {\n\t\t\trestErr := resterror.NewUnAuthorizedError()\n\t\t\tc.JSON(restErr.StatusCode, restErr)\n\t\t\tc.Abort()\n\t\t\treturn\n\t\t}\n\n\t\tclaims := result.Claims.(jwt.MapClaims)\n\t\tc.Set(\"user_id\", claims[\"user_id\"])\n\t\tc.Set(\"is_admin\", claims[\"is_admin\"])\n\n\t\tc.Next()\n\t}\n}", "func NewAuthMiddleware(providers []config.JWTProvider, UnauthenticatedRoutes []*regexp.Regexp) func(http.Handler) http.Handler {\n\tinitJWKs(providers)\n\n\treturn func(next http.Handler) http.Handler {\n\t\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\tif matches(UnauthenticatedRoutes, r.URL.Path) {\n\t\t\t\tnext.ServeHTTP(w, r)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\ttoken, err := request.ParseFromRequest(r, request.AuthorizationHeaderExtractor, jwkfetch.FromIssuerClaim())\n\t\t\tif err != nil {\n\t\t\t\tif err == jwt.ErrSignatureInvalid {\n\t\t\t\t\tlog.WithError(err).Error(\"Error while validating request JWT\")\n\t\t\t\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tif err == request.ErrNoTokenInRequest {\n\t\t\t\t\tlog.WithError(err).Error(\"Error while validating request JWT\")\n\t\t\t\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tlog.WithError(err).Error(\"Error while validating request JWT\")\n\t\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif !token.Valid {\n\t\t\t\tlog.Error(\"Error while validating request JWT: token is invalid.\")\n\t\t\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\terr = validate(token, providers)\n\t\t\tif err != nil {\n\t\t\t\tlog.WithError(err).Error(\"Error while validating request JWT\")\n\t\t\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tnext.ServeHTTP(w, r)\n\t\t})\n\n\t}\n}", "func AuthMiddleware(next http.HandlerFunc) http.HandlerFunc {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tbearerToken := r.Header.Get(\"Authorization\")\n\n\t\tif bearerToken == \"\" {\n\t\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\t\tw.Write([]byte(\"authorization header must be sent\"))\n\t\t\treturn\n\t\t}\n\n\t\tbearerToken = strings.Replace(bearerToken, \"Bearer \", \"\", 1)\n\n\t\ttoken, err := jwt.ParseWithClaims(bearerToken, &models.Claims{}, func(token *jwt.Token) (interface{}, error) {\n\t\t\treturn []byte(os.Getenv(\"JWT_KEY\")), nil\n\t\t})\n\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\t\tw.Write([]byte(err.Error()))\n\t\t\treturn\n\t\t}\n\n\t\tclaims, ok := token.Claims.(*models.Claims)\n\n\t\tif !ok || !token.Valid {\n\t\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\t\tw.Write([]byte(\"invalid token\"))\n\t\t\treturn\n\t\t}\n\n\t\tctx := context.WithValue(r.Context(), \"username\", claims.Username)\n\n\t\tnext.ServeHTTP(w, r.WithContext(ctx))\n\t})\n}", "func NewAuthMiddleware(headerKeyLabel string, headerSecretLabel string, config AuthConfig) *RESTGate {\n\tt := &RESTGate{headerKeyLabel: headerKeyLabel, headerSecretLabel: headerSecretLabel, config: config}\n\n\tif headerKeyLabel == \"\" { //headerKeyLabel must be defined\n\t\tif t.config.Debug == true {\n\t\t\tlog.Printf(\"RestGate: headerKeyLabel is not defined.\")\n\t\t}\n\t\treturn nil\n\t}\n\n\t//Default Error Messages\n\tif t.config.ErrorMessages == nil {\n\t\tt.config.ErrorMessages = map[int]map[string]string{\n\t\t\t1: e.New(1, \"No Key Or Secret\", \"\").Render(),\n\t\t\t2: e.New(2, \"Unauthorized Access\", \"\").Render(),\n\t\t}\n\t} else {\n\t\tif _, ok := t.config.ErrorMessages[1]; !ok {\n\t\t\tt.config.ErrorMessages[1] = e.New(1, \"No Key Or Secret\", \"\").Render()\n\t\t}\n\n\t\tif _, ok := t.config.ErrorMessages[2]; !ok {\n\t\t\tt.config.ErrorMessages[2] = e.New(2, \"Unauthorized Access\", \"\").Render()\n\t\t}\n\t}\n\n\treturn t\n}", "func authMiddleware(next echo.HandlerFunc) echo.HandlerFunc {\n\treturn func(c echo.Context) error {\n\t\tcookie, err := c.Cookie(model.CookieName)\n\t\tif err == nil {\n\t\t\tlogin := authcookie.Login(cookie.Value, []byte(model.SECRET))\n\t\t\tif login == \"\" {\n\t\t\t\treturn c.Redirect(http.StatusTemporaryRedirect, \"/login\")\n\t\t\t}\n\t\t\tc.Request().Header.Set(model.HEADER_AUTH_USER_ID, login)\n\t\t\tusername := strings.Split(login, \"@\")[0]\n\t\t\tdbName := username\n\t\t\terr := db.ConnectDB(dbName)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"cannot initialize db: %v\", err)\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn next(c)\n\t\t}\n\t\tlog.Println(err)\n\t\treturn c.Redirect(http.StatusTemporaryRedirect, \"/login\")\n\t}\n}", "func AuthMiddleware(repo mongodb.UserRepo) func(http.Handler) http.Handler {\n\treturn func(next http.Handler) http.Handler {\n\t\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\ttoken, err := parseToken(r)\n\t\t\tif err != nil {\n\t\t\t\tnext.ServeHTTP(w, r)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tclaims, ok := token.Claims.(jwt.MapClaims)\n\n\t\t\tif !ok || !token.Valid {\n\t\t\t\tnext.ServeHTTP(w, r)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tuser, err := repo.GetUser(claims[\"jti\"].(string))\n\t\t\tif err != nil {\n\t\t\t\tnext.ServeHTTP(w, r)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tctx := context.WithValue(r.Context(), CurrentUserKey, user)\n\n\t\t\tnext.ServeHTTP(w, r.WithContext(ctx))\n\t\t})\n\t}\n}", "func SetMiddlewareAuth(next http.HandlerFunc, authReq bool) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tresponse, err := auth.ValidateToken(r)\n\t\tif !authReq && err != nil {\n\t\t\tctx := context.WithValue(r.Context(), contextkey.ContextKeyUsernameCaller, \"\")\n\t\t\tctx2 := context.WithValue(ctx, contextkey.ContextKeyUserIDCaller, \"\")\n\t\t\tnext(w, r.WithContext(ctx2))\n\t\t\treturn\n\t\t} else if err != nil {\n\t\t\tresponses.ERROR(w, http.StatusUnauthorized, err)\n\t\t\treturn\n\t\t}\n\t\tnext(w, response)\n\t}\n}", "func New() iris2.HandlerFunc {\n\tl := &authMiddleware{}\n\treturn l.Serve\n}", "func (m JWTAuthMiddleware) Setup() {}", "func AuthMiddleware(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) {\n\tbody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer r.Body.Close()\n\n\tvar UserCred Credentials\n\tjson.Unmarshal(body, &UserCred)\n\n\ttoken, err := jwt.Parse(UserCred.Token, func(token *jwt.Token) (interface{}, error) {\n\t\treturn Secret, nil\n\t})\n\tif err == nil && token.Valid {\n\t\t// Create context to send username to other handlers after authentictaion\n\t\tcontext.Set(r, \"username\", UserCred.Username)\n\t\tnext(w, r)\n\t} else {\n\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\tfmt.Fprint(w, \"Unauthorized, You should sign in First\")\n\t}\n}", "func (route routerConfig) authMiddleware(deviceDB *devices.Database) func(next http.Handler) http.Handler {\n\treturn func(next http.Handler) http.Handler {\n\t\treturn http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) {\n\t\t\tif route.routeAuth[request.URL.Path] {\n\t\t\t\tnext.ServeHTTP(writer, request)\n\t\t\t} else {\n\t\t\t\tdev, err := auth.VerifyAccessToken(request, deviceDB)\n\t\t\t\tif err != nil {\n\t\t\t\t\terr.Encode(&writer)\n\t\t\t\t\treturn\n\t\t\t\t} else {\n\t\t\t\t\tcontext.Set(request, \"localpart\", dev.UserID)\n\t\t\t\t\tnext.ServeHTTP(writer, request)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}", "func GetAuthMiddleware(cfg *types.Config) gin.HandlerFunc {\n\tif !cfg.OIDCEnable {\n\t\treturn gin.BasicAuth(gin.Accounts{\n\t\t\t// Use the config's username and password for basic auth\n\t\t\tcfg.Username: cfg.Password,\n\t\t})\n\t}\n\treturn CustomAuth(cfg)\n}", "func AuthMiddleware(c *fiber.Ctx) error {\n\ttoken := c.Get(\"Authorization\")\n\tuser, err := auth.GetUserFromToken(token)\n\tif err != nil {\n\t\tc.Status(401)\n\t\tc.JSON(types.Error{\n\t\t\tMessage: \"Not Authorized\",\n\t\t})\n\t\treturn err\n\t}\n\tc.Locals(\"user\", user)\n\treturn c.Next()\n}", "func (m *middlewareCreator) CreateGinMiddleware() gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\tauthHeader := c.GetHeader(\"Authorization\")\n\t\trx := regexp.MustCompile(\"^[B|b]earer\\\\s*\")\n\t\tjwt := rx.ReplaceAllString(authHeader, \"\")\n\t\tresult := m.Validator.ValidateToken(jwt)\n\n\t\tif !result.Valid || result.Expired {\n\t\t\tif m.FailureHook != nil {\n\t\t\t\tm.FailureHook(c)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// Per RFC responding with a 401 whether invalid or expired\n\t\t\tc.AbortWithStatusJSON(http.StatusUnauthorized, m.notAuthorizedError())\n\t\t\treturn\n\t\t}\n\n\t\tc.Set(\"UserPayload\", result)\n\n\t\tif m.SuccessHook != nil {\n\t\t\tm.SuccessHook(c)\n\t\t}\n\t}\n}", "func (a *AuthMiddleware) GetAuthMiddleware() (*jwt.GinJWTMiddleware, error) {\n\tmiddleware, err := jwt.New(&jwt.GinJWTMiddleware{\n\t\tRealm: a.Realm,\n\t\tKey: a.Key,\n\t\tTimeout: a.Timeout,\n\t\tMaxRefresh: a.MaxRefresh,\n\t\tIdentityKey: a.IdentityKey,\n\t\tPayloadFunc: func(data interface{}) jwt.MapClaims {\n\t\t\tif v, ok := data.(drepository.Manager); ok {\n\t\t\t\treturn jwt.MapClaims{\n\t\t\t\t\ta.IdentityKey: v.ID,\n\t\t\t\t\t\"email\": v.Email,\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn jwt.MapClaims{}\n\t\t},\n\t\tIdentityHandler: func(c *gin.Context) interface{} {\n\t\t\tclaims := jwt.ExtractClaims(c)\n\n\t\t\tid, _ := primitive.ObjectIDFromHex(claims[a.IdentityKey].(string))\n\n\t\t\tc.Set(\"managerID\", claims[a.IdentityKey].(string))\n\n\t\t\treturn drepository.Manager{\n\t\t\t\tID: id,\n\t\t\t\tEmail: claims[\"email\"].(string),\n\t\t\t}\n\t\t},\n\t\tAuthenticator: func(c *gin.Context) (interface{}, error) {\n\t\t\tvar loginValues login\n\n\t\t\tif err := c.ShouldBind(&loginValues); err != nil {\n\t\t\t\treturn \"\", jwt.ErrMissingLoginValues\n\t\t\t}\n\n\t\t\tfind := &dto.FindManagers{Email: loginValues.Email}\n\t\t\tmanager := drepository.Manager{}\n\t\t\t_ = manager.FindOne(find)\n\n\t\t\tif err := bcrypt.CompareHashAndPassword(\n\t\t\t\t[]byte(manager.Password), []byte(loginValues.Password)); err == nil {\n\t\t\t\treturn manager, nil\n\t\t\t}\n\n\t\t\treturn nil, jwt.ErrFailedAuthentication\n\t\t},\n\t\tAuthorizator: func(data interface{}, c *gin.Context) bool {\n\t\t\tif v, ok := data.(drepository.Manager); ok {\n\t\t\t\tc.Set(a.IdentityKey, v.ID)\n\t\t\t\tc.Set(\"email\", v.Email)\n\t\t\t\treturn true\n\t\t\t}\n\t\t\treturn false\n\t\t},\n\t\tUnauthorized: func(c *gin.Context, code int, message string) {\n\t\t\tc.JSON(code, gin.H{\n\t\t\t\t\"code\": code,\n\t\t\t\t\"message\": message,\n\t\t\t})\n\t\t},\n\n\t\tTokenLookup: \"header: Authorization, query: token, cookie: token\",\n\t\tTokenHeadName: \"Bearer\",\n\t\tTimeFunc: time.Now,\n\t})\n\n\tif err != nil {\n\t\tlog.Fatal(\"JWT Error:\" + err.Error())\n\t}\n\n\treturn middleware, err\n}", "func authMiddleware(authService auth.Service, userService user.Service) gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\tauthHeader := c.GetHeader(\"Authorization\")\n\n\t\tif !strings.Contains(authHeader, \"Bearer\") {\n\t\t\tresponse := helper.APIResponse(\"Unauthorized\", http.StatusUnauthorized, \"error\", nil)\n\t\t\tc.AbortWithStatusJSON(http.StatusUnauthorized, response)\n\t\t\treturn\n\t\t}\n\n\t\t// Bearer tokentokentoken\n\t\ttokenString := \"\"\n\t\tarrayToken := strings.Split(authHeader, \" \")\n\t\tif len(arrayToken) == 2 {\n\t\t\ttokenString = arrayToken[1]\n\t\t}\n\n\t\ttoken, err := authService.ValidateToken(tokenString)\n\t\tif err != nil {\n\t\t\tresponse := helper.APIResponse(\"Unauthorized\", http.StatusUnauthorized, \"error\", nil)\n\t\t\tc.AbortWithStatusJSON(http.StatusUnauthorized, response)\n\t\t\treturn\n\t\t}\n\n\t\tclaim, ok := token.Claims.(jwt.MapClaims)\n\t\tif !ok || !token.Valid {\n\t\t\tresponse := helper.APIResponse(\"Unauthorized\", http.StatusUnauthorized, \"error\", nil)\n\t\t\tc.AbortWithStatusJSON(http.StatusUnauthorized, response)\n\t\t\treturn\n\t\t}\n\n\t\tuserID := int(claim[\"user_id\"].(float64))\n\n\t\tuser, err := userService.GetUserById(userID)\n\t\tif err != nil {\n\t\t\tresponse := helper.APIResponse(\"Unauthorized\", http.StatusUnauthorized, \"error\", nil)\n\t\t\tc.AbortWithStatusJSON(http.StatusUnauthorized, response)\n\t\t\treturn\n\t\t}\n\n\t\tc.Set(\"currentUser\", user)\n\t}\n}", "func SetupAuth(router *mux.Router) {\n\n\trouter.Use(authMiddleware)\n}", "func Auth() Middleware {\n\treturn func(h http.Handler) http.Handler {\n\t\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\t// Parse takes the token string and a function for looking up the key. The latter is especially\n\t\t\t// useful if you use multiple keys for your application. The standard is to use 'kid' in the\n\t\t\t// head of the token to identify which key to use, but the parsed token (head and claims) is provided\n\t\t\t// to the callback, providing flexibility.\n\t\t\ttoken, err := jwt.Parse(r.Header.Get(\"Token\"), func(token *jwt.Token) (interface{}, error) {\n\t\t\t\t// Don't forget to validate the alg is what you expect:\n\t\t\t\tif _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {\n\t\t\t\t\treturn nil, fmt.Errorf(\"Unexpected signing method: %v\", token.Header[\"alg\"])\n\t\t\t\t}\n\n\t\t\t\t// hmacSampleSecret is a []byte containing your secret, e.g. []byte(\"my_secret_key\")\n\t\t\t\treturn []byte(\"asndbu1vh23b12v31298yxcmnbas\"), nil\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// ctx := context.WithValue(r.Context(), domain.ContextKeyUser, <user>)\n\n\t\t\t// if claims, ok := token.Claims.(jwt.MapClaims); ok && token.Valid {\n\t\t\t// return\n\t\t\t// } else {\n\t\t\t// return\n\t\t\t// }\n\n\t\t\t// h.ServeHTTP(w, r.WithContext(merchant.NewContext(r.Context())))\n\t\t})\n\t}\n}", "func NewAuthMiddlewareHandler(handler http.Handler, auth influxdb.Authorizer) http.Handler {\n\treturn &authMiddlewareHandler{\n\t\thandler: handler,\n\t\tauth: auth,\n\t}\n}", "func (s *Setup) AuthMiddleware(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\n\t\t// Capturing Authorizathion header.\n\t\ttokenHeader := r.Header.Get(\"Authorization\")\n\n\t\t// Checking if the value is empty.\n\t\tif tokenHeader == \"\" {\n\t\t\terrhandler.DecodeError(w, r, s.logger, errEmptyToken, http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\t// Checking if the header contains Bearer string and if the token exists.\n\t\tif !strings.Contains(tokenHeader, \"Bearer\") || len(strings.Split(tokenHeader, \"Bearer \")) == 1 {\n\t\t\terrhandler.DecodeError(w, r, s.logger, errMalformedToken, http.StatusUnauthorized)\n\t\t\treturn\n\t\t}\n\n\t\t// Capturing the token.\n\t\tjwtString := strings.Split(tokenHeader, \"Bearer \")[1]\n\n\t\t// Parsing the token to verify its authenticity.\n\t\ttoken, err := jwt.Parse(jwtString, func(token *jwt.Token) (interface{}, error) {\n\t\t\tif _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {\n\t\t\t\treturn nil, fmt.Errorf(\"Unexpected signing method: %v\", token.Header[\"alg\"])\n\t\t\t}\n\t\t\tcfg, err := config.Load()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treturn []byte(cfg.JWTSecret), nil\n\t\t})\n\n\t\t// Returning parsing errors.\n\t\tif err != nil {\n\t\t\terrhandler.DecodeError(w, r, s.logger, err.Error(), http.StatusUnauthorized)\n\t\t\treturn\n\t\t}\n\n\t\t// If the token is valid.\n\t\tif token.Valid {\n\t\t\tnext.ServeHTTP(w, r)\n\t\t} else {\n\t\t\terrhandler.DecodeError(w, r, s.logger, errInvalidJWTToken, http.StatusUnauthorized)\n\t\t\treturn\n\t\t}\n\t})\n}", "func (s Server) AuthMiddleware(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t// Log request\n\t\tglog.Infof(\"%s %v: %+v\", r.RemoteAddr, r.Method, r.URL)\n\n\t\t// Check for none API calls\n\t\thttpPath := r.URL.Path\n\t\tif len(httpPath) <= len(s.APIPath) || httpPath[:len(s.APIPath)] != s.APIPath ||\n\t\t\thttpPath[len(s.APIPath):] == \".well-known/oauth-authorization-server\" {\n\t\t\tnext.ServeHTTP(w, r)\n\n\t\t\treturn\n\t\t}\n\n\t\t// If we are using OAuth2 server, try to use the bearer code cookie\n\t\tif s.OAuthServer != nil {\n\t\t\ts.AddCodeAuthentication(next, w, r)\n\t\t\treturn\n\t\t}\n\n\t\t// If we are using OAuth2 server, try to use the bearer code cookie\n\t\tif s.JWTTokenRSAKey != nil {\n\t\t\ts.AddJWTAuthentication(next, w, r)\n\t\t\treturn\n\t\t}\n\n\t\thandleError(w, fmt.Errorf(\"missing JWT public key and/or OAuth2, can't access k8s API server\"))\n\t})\n}", "func AuthMiddleware(auto401 bool) gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\tusers.UpdateContextUserModel(c, 0)\n\t\tbytes, _ := ioutil.ReadFile(\"./sample_key.pub\")\n\t\trsaPublic, _ := crypto.ParseRSAPublicKeyFromPEM(bytes)\n\n\t\tauth := c.Request.Header.Get(\"Authorization\")\n\t\tif len(auth) == 0 {\n\t\t\tc.AbortWithError(http.StatusUnauthorized, errors.New(\"Authorization not found\"))\n\t\t}\n\t\taccessToken := auth[7:len(auth)]\n\t\tjwt, err := jws.ParseJWT([]byte(accessToken))\n\t\tif err != nil {\n\t\t\tif auto401 {\n\t\t\t\tc.AbortWithError(http.StatusUnauthorized, err)\n\t\t\t}\n\t\t}\n\t\t// Validate token\n\t\tif err = jwt.Validate(rsaPublic, crypto.SigningMethodRS256); err != nil {\n\t\t\tc.AbortWithError(http.StatusUnauthorized, err)\n\t\t}\n\t\t//j, erro := jws.ParseFromHeader(c.Request, jws.General)\n\t\tj, erro := jws.ParseFromRequest(c.Request, jws.General)\n\t\tif erro != nil {\n\t\t\tif auto401 {\n\t\t\t\tc.AbortWithError(http.StatusUnauthorized, erro)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tfmt.Println(j)\n\t\t//fmt.Println(auth)\n\t\t//fmt.Println(token)\n\t\t/*\n\t\t\t\ttoken, err := request.ParseFromRequest(c.Request, request.OAuth2Extractor, func(token *jwt.Token) (interface{}, error) {\n\t\t\t\t\tb := ([]byte(common.NBSecretPassword))\n\t\t\t\t\treturn b, nil\n\t\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\tif auto401 {\n\t\t\t\t\tc.AbortWithError(http.StatusUnauthorized, err)\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\t\t*/\n\t\t//_, err := session.FindSession(&session.SessionModel{Token: token.Raw})\n\t\t/*\n\t\t\tif claims, ok := token.Claims.(jwt.MapClaims); ok && token.Valid {\n\t\t\t\tmyUserID := uint(claims[\"id\"].(float64))\n\t\t\t\tusers.UpdateContextUserModel(c, myUserID)\n\t\t\t}*/\n\t}\n}", "func authMiddleware() gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\t// Get the client secret key\n\t\terr := jwtMiddleWare.CheckJWT(c.Writer, c.Request)\n\t\tif err != nil {\n\t\t\t// Token not found\n\t\t\tfmt.Println(err)\n\t\t\tc.Abort()\n\t\t\tc.Writer.WriteHeader(http.StatusUnauthorized)\n\t\t\tc.Writer.Write([]byte(\"Unauthorized\"))\n\t\t\treturn\n\t\t}\n\t}\n}", "func jwtAuth(r *ghttp.Request) {\n\tapi.Auth.MiddlewareFunc()(r)\n\tr.Middleware.Next()\n}", "func New(authenticator auth.Authenticator) clevergo.MiddlewareFunc {\n\treturn func(next clevergo.Handle) clevergo.Handle {\n\t\treturn func(c *clevergo.Context) error {\n\t\t\tidentity, err := authenticator.Authenticate(c.Request, c.Response)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t\tauthenticator.Challenge(c.Request, c.Response)\n\t\t\t} else {\n\t\t\t\tc.WithValue(auth.IdentityKey, identity)\n\t\t\t}\n\t\t\treturn next(c)\n\t\t}\n\t}\n}", "func AuthMiddleware(authURL string, insecure bool, skipPath []string) echo.MiddlewareFunc {\n\tauth := newKeystoneAuth(authURL, insecure)\n\n\treturn func(next echo.HandlerFunc) echo.HandlerFunc {\n\t\treturn func(c echo.Context) error {\n\t\t\tfor _, pathQuery := range skipPath {\n\t\t\t\tswitch c.Request().URL.Path {\n\t\t\t\tcase \"/\":\n\t\t\t\t\treturn next(c)\n\t\t\t\tdefault:\n\t\t\t\t\tif strings.Contains(pathQuery, \"?\") {\n\t\t\t\t\t\tpaths := strings.Split(pathQuery, \"?\")\n\t\t\t\t\t\tif strings.Contains(c.Request().URL.Path, paths[0]) &&\n\t\t\t\t\t\t\tstrings.Compare(c.Request().URL.RawQuery, paths[1]) == 0 {\n\t\t\t\t\t\t\treturn next(c)\n\t\t\t\t\t\t}\n\t\t\t\t\t} else if strings.Contains(c.Request().URL.Path, pathQuery) {\n\t\t\t\t\t\treturn next(c)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tr := c.Request()\n\t\t\tif r.ProtoMajor == 2 && strings.Contains(r.Header.Get(\"Content-Type\"), \"application/grpc\") {\n\t\t\t\t// Skip grpc\n\t\t\t\treturn next(c)\n\t\t\t}\n\t\t\ttokenString := r.Header.Get(\"X-Auth-Token\")\n\t\t\tif tokenString == \"\" {\n\t\t\t\tcookie, _ := r.Cookie(\"x-auth-token\") // nolint: errcheck\n\t\t\t\tif cookie != nil {\n\t\t\t\t\ttokenString = cookie.Value\n\t\t\t\t}\n\t\t\t\tif tokenString == \"\" {\n\t\t\t\t\ttokenString = c.QueryParam(\"auth_token\")\n\t\t\t\t}\n\t\t\t}\n\t\t\tctx, err := authenticate(r.Context(), auth, tokenString)\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Errorf(\"Authentication failure: %s\", err)\n\t\t\t\treturn errutil.ToHTTPError(err)\n\t\t\t}\n\t\t\tnewRequest := r.WithContext(ctx)\n\t\t\tc.SetRequest(newRequest)\n\t\t\treturn next(c)\n\t\t}\n\t}\n}", "func (t *Controller) AuthMiddleware() gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\n\t\t// Set access token and start the auth process\n\t\tvar access_token = \"\"\n\n\t\t// Make sure we have a Bearer token.\n\t\tauth := strings.SplitN(c.Request.Header.Get(\"Authorization\"), \" \", 2)\n\n\t\tif len(auth) != 2 || auth[0] != \"Bearer\" {\n\n\t\t\t// We allow access token from the command line\n\t\t\tif os.Getenv(\"APP_ENV\") == \"local\" {\n\n\t\t\t\taccess_token = c.Query(\"access_token\")\n\n\t\t\t\tif len(access_token) <= 0 {\n\t\t\t\t\tc.JSON(http.StatusUnauthorized, gin.H{\"error\": \"Authorization Failed (#101)\"})\n\t\t\t\t\tc.AbortWithStatus(401)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t} else {\n\t\t\t\tc.JSON(http.StatusUnauthorized, gin.H{\"error\": \"Authorization Failed (#001)\"})\n\t\t\t\tc.AbortWithStatus(401)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t} else {\n\t\t\taccess_token = auth[1]\n\t\t}\n\n\t\t// See if this session is in our db.\n\t\tsession, err := t.DB.GetByAccessToken(access_token)\n\n\t\tif err != nil {\n\t\t\tservices.Critical(\"Access Token Not Found - Unable to Authenticate via HTTP (#002)\")\n\t\t\tc.JSON(http.StatusUnauthorized, gin.H{\"error\": \"Authorization Failed (#002)\"})\n\t\t\tc.AbortWithStatus(401)\n\t\t\treturn\n\t\t}\n\n\t\t// Get this user is in our db.\n\t\tuser, err := t.DB.GetUserById(session.UserId)\n\n\t\tif err != nil {\n\t\t\tservices.Critical(\"User Not Found - Unable to Authenticate - UserId (HTTP) : \" + fmt.Sprint(session.UserId) + \" - Session Id : \" + fmt.Sprint(session.Id))\n\t\t\tc.JSON(http.StatusUnauthorized, gin.H{\"error\": \"Authorization Failed (#003)\"})\n\t\t\tc.AbortWithStatus(401)\n\t\t\treturn\n\t\t}\n\n\t\t// Log this request into the last_activity col.\n\t\tsession.LastActivity = time.Now()\n\t\tsession.LastIpAddress = realip.RealIP(c.Request)\n\t\tt.DB.UpdateSession(&session)\n\n\t\t// Add this user to the context\n\t\tc.Set(\"userId\", user.Id)\n\n\t\t// CORS for local development.\n\t\tif os.Getenv(\"APP_ENV\") == \"local\" {\n\t\t\tc.Writer.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\t\t}\n\n\t\t// On to next request in the Middleware chain.\n\t\tc.Next()\n\t}\n}", "func (svc *Service) createMiddleware(cfg *config.Configuration) alice.Chain {\n\tidentityHandler := dphandlers.IdentityWithHTTPClient(svc.identityClient)\n\treturn alice.New(\n\t\tmiddleware.Whitelist(middleware.HealthcheckFilter(svc.healthCheck.Handler)),\n\t\tdprequest.HandlerRequestID(16),\n\t\tidentityHandler,\n\t)\n}", "func (a *JWTAuth) AuthMiddleware() gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\t// just handle the simplest case, authorization is not provided.\n\t\th := c.GetHeader(\"Authorization\")\n\t\tif h == \"\" {\n\t\t\tc.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{\"message\": \"empty header was sent\",\n\t\t\t\t\"code\": \"unauthorized\"})\n\t\t\treturn\n\t\t}\n\t\tclaims, err := a.VerifyJWT(h)\n\t\tlog.Printf(\"They key is: %v\", a.Key)\n\t\tif e, ok := err.(*jwt.ValidationError); ok {\n\t\t\tif e.Errors&jwt.ValidationErrorExpired != 0 {\n\t\t\t\t// in this case you might need to give it another spin\n\t\t\t\tc.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{\"message\": \"Token has expired\", \"code\": \"jwt_expired\"})\n\t\t\t\treturn\n\t\t\t} else {\n\t\t\t\tc.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{\"message\": \"Malformed token\", \"code\": \"jwt_malformed\"})\n\t\t\t\treturn\n\t\t\t}\n\t\t} else if err == nil {\n\t\t\t// FIXME it is better to let the endpoint explicitly Get the claim off the user\n\t\t\t// as we will assume the auth server will reside in a different domain!\n\t\t\tc.Set(\"mobile\", claims.Mobile)\n\t\t\tlog.Printf(\"the username is: %s\", claims.Mobile)\n\t\t\tc.Next()\n\t\t}\n\t}\n\n}", "func AuthMiddleware(next http.HandlerFunc) http.HandlerFunc {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tauthHeader := r.Header.Get(\"Authorization\")\n\t\tif authHeader != \"\" {\n\t\t\tbearer := strings.Split(authHeader, \" \")\n\t\t\tif len(bearer) == 2 {\n\t\t\t\ttoken, err := jwt.Parse(bearer[1], func(token *jwt.Token) (interface{}, error) {\n\t\t\t\t\tif _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {\n\t\t\t\t\t\treturn nil, fmt.Errorf(\"Error\")\n\t\t\t\t\t}\n\t\t\t\t\treturn []byte(\"secret\"), nil\n\t\t\t\t})\n\t\t\t\tif err != nil {\n\t\t\t\t\tjson.NewEncoder(w).Encode(err.Error())\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif token.Valid {\n\t\t\t\t\tnext(w, r)\n\t\t\t\t} else {\n\t\t\t\t\tresMap := make(map[string]string)\n\t\t\t\t\tresMap[\"message\"] = \"Failed\"\n\n\t\t\t\t\tres, err := json.Marshal(resMap)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\t\t\t}\n\t\t\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t\t\t\tw.Write(res)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\thttp.Error(w, \"Invalid authorization header\", http.StatusBadRequest)\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\tresMap := make(map[string]string)\n\t\t\tresMap[\"message\"] = \"Failed\"\n\n\t\t\tres, err := json.Marshal(resMap)\n\t\t\tif err != nil {\n\t\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\t}\n\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t\tw.Write(res)\n\t\t}\n\t})\n}", "func AuthMiddlware() gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\tclaim, ok := auth.TokenValid(c)\n\t\tif ok != nil {\n\t\t\t// c.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{\"error\": \"User needs to be signed in to access this service\"})\n\t\t\treturn\n\t\t}\n\n\t\tctx := context.WithValue(\n\t\t\tc.Request.Context(),\n\t\t\ttypes.UserKey(\"user\"),\n\t\t\tclaim)\n\n\t\tc.Request = c.Request.WithContext(ctx)\n\t\tc.Next()\n\t}\n}", "func NewMiddleWare(\n\tcustomClaimsFactory CustomClaimsFactory,\n\tvalidFunction CustomClaimsValidateFunction,\n) *Middleware {\n\treturn &Middleware{\n\t\t// todo: customize signing algorithm\n\t\tSigningAlgorithm: \"HS256\",\n\t\tJWTHeaderKey: \"Authorization\",\n\t\tJWTHeaderPrefixWithSplitChar: \"Bearer \",\n\t\tSigningKeyString: GetSignKey(),\n\t\tSigningKey: []byte(GetSignKey()),\n\t\tcustomClaimsFactory: customClaimsFactory,\n\t\tvalidFunction: validFunction,\n\t\t// MaxRefresh: default zero\n\t}\n}", "func NewMiddleware(creds *secrets.BasicAuthCredentials) es.CommandHandlerMiddleware {\n\treturn func(handler es.CommandHandler) es.CommandHandler {\n\t\treturn es.CommandHandlerFunc(func(ctx context.Context, cmd es.Command) error {\n\t\t\tcur, err := secrets.AuthFromContext(ctx)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif !creds.Equals(cur) {\n\t\t\t\treturn ErrInvalidCredentials\n\t\t\t}\n\n\t\t\treturn handler.HandleCommand(ctx, cmd)\n\t\t})\n\t}\n}", "func (routes *Routes) AuthMiddleware(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tauthHeader := r.Header[\"Authorization\"]\n\t\tif len(authHeader) == 0 {\n\t\t\twriteErrorMessage(w, \"Unauthorized\", http.StatusUnauthorized)\n\t\t\treturn\n\t\t}\n\t\tauthString := authHeader[0]\n\n\t\tauthParts := strings.Split(authString, \" \")\n\t\tif len(authParts) != 2 {\n\t\t\twriteErrorMessage(w, \"Unauthorized\", http.StatusUnauthorized)\n\t\t\tlog.Println(\"Unexpected auth header:\", authString)\n\t\t\treturn\n\t\t}\n\n\t\ttoken := authParts[1]\n\n\t\temail, err := routes.extractEmailFromJWT(token)\n\t\tif err != nil {\n\t\t\twriteErrorMessage(w, \"Unauthorized\", http.StatusUnauthorized)\n\t\t\tlog.Printf(\"Error parsing jwt %s error: %s\\n\", token, err.Error())\n\t\t\treturn\n\t\t}\n\n\t\t// set the email back on the context\n\t\tc := context.WithValue(r.Context(), emailContextKey, email)\n\t\tr = r.WithContext(c)\n\n\t\tnext.ServeHTTP(w, r)\n\t})\n}", "func GetAuthMiddleware(signingKey []byte) *jwtmiddleware.JWTMiddleware {\n\treturn jwtmiddleware.New(jwtmiddleware.Options{\n\t\tValidationKeyGetter: func(token *jwt.Token) (interface{}, error) {\n\t\t\treturn signingKey, nil\n\t\t},\n\t\tSigningMethod: jwt.SigningMethodHS256,\n\t})\n}", "func (amw *AuthMiddleware) Middleware(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\n\t\t// r.Host\n\n\t\tglobal.SetHttpHome(\"http://\" + r.Host)\n\t\tglobal.SetHttpSolicitado(\"http://\" + r.Host + r.URL.RequestURI())\n\t\t// global.Logger.Rosa(global.HttpSolicitado())\n\n\t\t//fmt.Printf(\"%C \\n\", r.Header)\n\t\tnext.ServeHTTP(w, r)\n\t\t// if user, found := amw.TokenUsers[token]; found {\n\t\t// \t// We found the token in our map\n\t\t// \tlog.Printf(\"Authenticated user %s\\n\", user)\n\t\t// \t// Pass down the request to the next middleware (or final handler)\n\t\t// \tnext.ServeHTTP(w, r)\n\t\t// } else {\n\t\t// \t// Write an error and stop the handler chain\n\t\t// \thttp.Error(w, \"Forbidden\", http.StatusForbidden)\n\t\t// }\n\t})\n}", "func AuthMiddlewareHandlerFunc(authenticator *Authenticator, handleFunc http.HandlerFunc) func(w http.ResponseWriter, r *http.Request) {\n\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\n\t\tuser := authenticator.AuthenticateHttpRequest(r)\n\t\tif user != nil {\n\t\t\thandleFunc(w, r.WithContext(context.WithValue(r.Context(), \"User\", user)))\n\t\t} else {\n\t\t\tw.Header().Add(\"WWW-Authenticate\", \"Basic realm=restricted\")\n\t\t\thttp.Error(w, http.StatusText(http.StatusUnauthorized),\n\t\t\t\thttp.StatusUnauthorized)\n\t\t}\n\n\t}\n\n}", "func AuthMiddleware(h http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tvar loggedIn bool\n\t\tsessionStore, err := sessionManager.cookieStore.Get(r, \"auth\")\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\tif _, ok := sessionStore.Values[\"token\"]; ok {\n\t\t\ttoken := sessionStore.Values[\"token\"].(string)\n\t\t\tuser := User{}\n\t\t\tsession := &Session{Token: token}\n\n\t\t\tdb.Where(session).First(session)\n\t\t\tdb.Model(session).Related(&user)\n\t\t\tif user != (User{}) {\n\t\t\t\tsession.LastSeenTime = time.Now()\n\t\t\t\tgo db.Save(session)\n\t\t\t\tcontext.Set(r, \"user\", &user)\n\t\t\t\tloggedIn = true\n\t\t\t}\n\t\t}\n\t\tcontext.Set(r, \"loggedIn\", loggedIn)\n\n\t\th.ServeHTTP(w, r)\n\t})\n}", "func AuthenticationMiddleware(repo *storage.Repositories) echo.MiddlewareFunc {\n\treturn func(next echo.HandlerFunc) echo.HandlerFunc {\n\t\treturn func(c echo.Context) error {\n\t\t\tuser, err := auth.CheckAuth(repo, c)\n\t\t\tif err != nil {\n\t\t\t\tctx := graph.AddAnonymousUserToContext(c.Request().Context())\n\t\t\t\tc.SetRequest(c.Request().WithContext(ctx))\n\t\t\t\treturn next(c)\n\t\t\t}\n\t\t\tctx := graph.AddUserToContext(c.Request().Context(), user)\n\t\t\tc.SetRequest(c.Request().WithContext(ctx))\n\t\t\treturn next(c)\n\n\t\t}\n\t}\n}", "func (h *Handler) oAuthMiddleware(c *gin.Context) {\n\tauthError := func() error {\n\t\twt := c.GetHeader(\"Authorization\")\n\t\tif wt == \"\" {\n\t\t\treturn errors.New(\"Authorization header not set\")\n\t\t}\n\t\tclaims, err := h.parseJWT(wt)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"could not parse JWT\")\n\t\t}\n\t\tc.Set(\"user\", claims)\n\t\treturn nil\n\t}()\n\tif authError != nil {\n\t\tc.AbortWithStatusJSON(http.StatusForbidden, gin.H{\n\t\t\t\"error\": \"authentication failed\",\n\t\t})\n\t\tlogrus.Debugf(\"Authentication middleware check failed: %v\\n\", authError)\n\t\treturn\n\t}\n\tc.Next()\n}", "func NewEnsureAuth(handlerToWrap http.Handler, permission string) http.Handler {\n\n\treturn &EnsureAuth{\n\t\tpermission: permission,\n\t\thandler: handlerToWrap,\n\t}\n}", "func (c *AuthMiddleware) Middleware() echo.MiddlewareFunc {\n\treturn func(next echo.HandlerFunc) echo.HandlerFunc {\n\t\treturn func(ce echo.Context) error {\n\t\t\tcfg := middleware.DefaultJWTConfig\n\t\t\tcfg.SigningKey = []byte(c.Auth.JWTSecret)\n\t\t\tcfg.TokenLookup = \"cookie:secure_token\"\n\n\t\t\tif err := middleware.JWTWithConfig(cfg)(next)(ce); err != nil {\n\t\t\t\tcleanCookie(ce)\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\treturn nil\n\t\t}\n\t}\n}", "func AuthJWTMiddleware(iss, userPoolID, region string) (*AuthMiddleware, error) {\n\n\t// Download the public json web key for the given user pool ID at the start of the plugin\n\tjwk, err := getJWK(fmt.Sprintf(\"https://cognito-idp.%v.amazonaws.com/%v/.well-known/jwks.json\", region, userPoolID))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tauthMiddleware := &AuthMiddleware{\n\t\tTimeout: time.Hour,\n\n\t\tUnauthorized: func(c *gin.Context, code int, message string) {\n\t\t\tc.JSON(code, AuthError{Code: code, Message: message})\n\t\t},\n\n\t\t// Token header\n\t\tTokenLookup: \"header:\" + AuthorizationHeader,\n\t\tTimeFunc: time.Now,\n\t\tJWK: jwk,\n\t\tIss: iss,\n\t\tRegion: region,\n\t\tUserPoolID: userPoolID,\n\t}\n\treturn authMiddleware, nil\n}", "func AuthMiddleware(handler http.HandlerFunc, session *r.Session) http.HandlerFunc {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tvar token string\n\t\tvar username string\n\t\tfmt.Println(r.Header.Get(\"Content-Type\"))\n\t\tif r.Header.Get(\"Content-Type\") == \"application/x-www-form-urlencoded\" {\n\t\t\t// fmt.Println(\"Multipart\")\n\t\t\tr.ParseForm()\n\t\t\ttoken = r.Form.Get(\"token\")\n\t\t\tusername = r.Form.Get(\"username\")\n\t\t} else if strings.Contains(r.Header.Get(\"Content-Type\"), \"multipart/form-data\") {\n\t\t\t// fmt.Println(\"Multipart\")\n\t\t\tr.ParseMultipartForm(10 << 20)\n\t\t\ttoken = r.FormValue(\"token\")\n\t\t\tusername = r.FormValue(\"username\")\n\t\t\tfmt.Println(token + \":\" + username)\n\t\t}\n\t\tif token != \"\" && username != \"\" && ValidateJWT(token, session) == username {\n\t\t\t// fmt.Println(\"Authorized\")\n\t\t\thandler.ServeHTTP(w, r)\n\t\t} else {\n\t\t\tfmt.Fprint(w, `{ \"error\" : \"Not Authorized\"}`)\n\t\t}\n\t})\n}", "func authWrap(h http.Handler) http.Handler {\n\treturn negroni.New(&AuthMiddleware{}, negroni.Wrap(h))\n}", "func (cx *Context) NewEnsureAuth(handler http.Handler) http.Handler {\n\treturn &EnsureAuth{handler, cx}\n}", "func authMiddleware(handler func(*int, http.ResponseWriter, *http.Request)) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\t// given auth token, finds user info\n\t\tc, statusCode, err := fetchCookie(r)\n\t\tif err != nil {\n\t\t\tw.WriteHeader(statusCode)\n\t\t\t_, _ = fmt.Fprintf(w, err.Error())\n\t\t\treturn\n\t\t}\n\t\tuserId, err := wardrobe.VerifyCookie(c.Value)\n\t\tif err != nil {\n\t\t\tif err == redis.Nil {\n\t\t\t\t// This means there wasn't a valid user id mapped by the cookie\n\t\t\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\t\t\t_, _ = fmt.Fprintf(w, \"Invalid session_token cookie: %s\", c.Value)\n\t\t\t\treturn\n\t\t\t}\n\t\t\t// If there is an error fetching from cache, return an internal server error status\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\t_, _ = fmt.Fprintf(w, err.Error())\n\t\t\treturn\n\t\t}\n\t\tif userId == nil {\n\t\t\t// If the session token is not present in cache, return an unauthorized error\n\t\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\t\treturn\n\t\t}\n\t\thandler(userId, w, r)\n\t}\n}", "func Auth() gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\tfmt.Println(\"Auth middleware\")\n\t}\n}", "func AuthenticationMiddleware(next http.HandlerFunc) http.HandlerFunc {\n return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {\n authorizationHeader := req.Header.Get(\"authorization\")\n if authorizationHeader != \"\" {\n bearerToken := strings.Split(authorizationHeader, \" \")\n if len(bearerToken) == 2 {\n token, error := jwt.Parse(bearerToken[1], func(token *jwt.Token) (interface{}, error) {\n if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {\n return nil, fmt.Errorf(\"There was an error\")\n }\n return []byte(\"secret\"), nil\n })\n if error != nil {\n json.NewEncoder(w).Encode(Exception{Message: error.Error()})\n return\n }\n if token.Valid {\n log.Println(\"TOKEN WAS VALID\")\n context.Set(req, \"decoded\", token.Claims)\n next(w, req)\n } else {\n json.NewEncoder(w).Encode(Exception{Message: \"Invalid authorization token\"})\n }\n }\n } else {\n json.NewEncoder(w).Encode(Exception{Message: \"An authorization header is required\"})\n }\n })\n}", "func MakeAuth(v *jwt.Verifier) wrappedHandlerFunc {\n\treturn func() api.HandlerFunc {\n\t\treturn func(ctx *api.CTX, next func()) {\n\t\t\theader := ctx.R.Header.Get(\"Authorization\")\n\t\t\tparts := strings.Split(header, \" \")\n\t\t\tif len(parts) != 2 {\n\t\t\t\tctx.SendResponse(http.StatusUnauthorized, nil, \"Must be authenticated\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif parts[0] != \"Bearer\" {\n\t\t\t\tctx.SendResponse(http.StatusUnauthorized, nil, \"Authorization method not supported\")\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\ttoken, err := v.Verify(parts[1])\n\t\t\tif err != nil {\n\t\t\t\tctx.SendResponse(http.StatusUnauthorized, nil, \"Provided JWT is invalid\")\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tuser := &api.User{}\n\t\t\tuser.Token = token.Body\n\n\t\t\tid := token.Body[\"sub\"]\n\t\t\tuser.ID = &id\n\t\t\tctx.User = user\n\t\t\tnext()\n\t\t}\n\t}\n}", "func Middleware(option BasicAuthOption) *BasicAuthMiddleware {\n\treturn &BasicAuthMiddleware{option: option}\n}", "func SetMiddlewareAuthentication(next http.HandlerFunc) http.HandlerFunc {\r\n\treturn func(w http.ResponseWriter, r *http.Request) {\r\n\r\n\t\terr := auth.IsTokenValid(r)\r\n\t\tif err != nil {\r\n\t\t\tresponses.ERROR(w, http.StatusUnauthorized, errors.New(\"Unauthorized\"))\r\n\t\t\treturn\r\n\t\t}\r\n\r\n\t\tnext(w, r)\r\n\t}\r\n}", "func Auth() func(http.Handler) http.Handler {\n\treturn func(next http.Handler) http.Handler {\n\t\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\tgivenAPIKey := r.Header.Get(\"API-KEY\")\n\n\t\t\t// Pass through unauthenticated requests.\n\t\t\tif givenAPIKey == \"\" {\n\t\t\t\tnext.ServeHTTP(w, r)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// Inject admin user.\n\t\t\tif givenAPIKey == adminAPIKey {\n\t\t\t\tuser := &User{Name: \"Eric\", IsAdmin: true}\n\t\t\t\tctx := context.WithValue(r.Context(), userKey, user)\n\t\t\t\tr = r.WithContext(ctx)\n\t\t\t\tnext.ServeHTTP(w, r)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// Inject non admin user.\n\t\t\tuser := &User{Name: \"Greg\", IsAdmin: false}\n\t\t\tctx := context.WithValue(r.Context(), userKey, user)\n\t\t\tr = r.WithContext(ctx)\n\t\t\tnext.ServeHTTP(w, r)\n\t\t})\n\t}\n}", "func (amw *authenticationMiddleware) Middleware(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\ttoken := r.Header.Get(\"X-WebAuth-User\")\n\n\t\tuserExists, databaseErr := userExists(token)\n\n\t\tif DEBUG {\n\t\t\tuserExists = true\n\t\t}\n\n\t\tif databaseErr != nil {\n\t\t\thttp.Error(w, \"Internal Server Error\", http.StatusInternalServerError)\n\t\t}\n\n\t\tif userExists {\n\t\t\tlog.Printf(\"Authenticated user %s\\n\", token)\n\t\t\tnext.ServeHTTP(w, r)\n\t\t} else {\n\t\t\thttp.Error(w, \"Forbidden\", http.StatusUnauthorized)\n\t\t}\n\t})\n}", "func authWrapFunc(f func(http.ResponseWriter, *http.Request)) http.Handler {\n\treturn negroni.New(&AuthMiddleware{}, negroni.Wrap(http.HandlerFunc(f)))\n}", "func (amw *jwtMiddleware) Middleware(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t// TODO: Log error details when authentication fails\n\t\tctx := r.Context()\n\t\tauthorizationHeader := r.Header.Get(\"authorization\")\n\t\tif authorizationHeader == \"\" {\n\t\t\tinvalidAuthResponse(w)\n\t\t\treturn\n\t\t}\n\n\t\tbearerToken := strings.Split(authorizationHeader, \" \")\n\t\tif len(bearerToken) != 2 {\n\t\t\tinvalidAuthResponse(w)\n\t\t\treturn\n\t\t}\n\n\t\tclaims := &JWTClaims{}\n\t\ttoken, err := jwt.ParseWithClaims(bearerToken[1], claims, func(token *jwt.Token) (interface{}, error) {\n\t\t\tif _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {\n\t\t\t\treturn nil, fmt.Errorf(\"invalid signing method\")\n\t\t\t}\n\t\t\treturn []byte(amw.cfg.Secret), nil\n\t\t})\n\n\t\tif err != nil {\n\t\t\tinvalidAuthResponse(w)\n\t\t\treturn\n\t\t}\n\n\t\tif !token.Valid {\n\t\t\tinvalidAuthResponse(w)\n\t\t\treturn\n\t\t}\n\n\t\tctx, err = amw.claimsToContext(ctx, claims)\n\t\tif err != nil {\n\t\t\tinvalidAuthResponse(w)\n\t\t\treturn\n\t\t}\n\t\tif !IsEnabled(ctx) || IsAnonymous(ctx) {\n\t\t\tinvalidAuthResponse(w)\n\t\t\treturn\n\t\t}\n\n\t\tif err := amw.manager.ValidateToken(claims.TokenID); err != nil {\n\t\t\tinvalidAuthResponse(w)\n\t\t\treturn\n\t\t}\n\t\tctx = SetJWTClaim(ctx, *claims)\n\t\tnext.ServeHTTP(w, r.WithContext(ctx))\n\t})\n}", "func AuthenticationMiddleware(nextHandler http.HandlerFunc) http.HandlerFunc {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tauthorizationHeader := r.Header.Get(\"Authorization\")\n\t\tif authorizationHeader == \"\" {\n\t\t\tRespondWithError(w, http.StatusUnauthorized, \"You are not authorized\")\n\t\t\treturn\n\t\t}\n\t\ttoken, err := jwt.Parse(authorizationHeader, func(token *jwt.Token) (interface{}, error) {\n\t\t\t// Don't forget to validate the alg is what you expect:\n\t\t\tif _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {\n\t\t\t\treturn nil, fmt.Errorf(\"Unexpected signing method: %v\", token.Header[\"alg\"])\n\t\t\t}\n\n\t\t\t// hmacSampleSecret is a []byte containing your secret, e.g. []byte(\"my_secret_key\")\n\t\t\treturn []byte(SecreteKey), nil\n\t\t})\n\n\t\tif claims, ok := token.Claims.(jwt.MapClaims); ok && token.Valid {\n\t\t\tvar id string\n\t\t\tid, ok = claims[\"id\"].(string)\n\t\t\tif !ok {\n\t\t\t\tRespondWithError(w, http.StatusUnauthorized, \"Error converting claim to string\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\tctx := context.WithValue(r.Context(), \"user_id\", id)\n\t\t\tnextHandler.ServeHTTP(w, r.WithContext(ctx))\n\t\t} else {\n\t\t\tfmt.Println(err)\n\t\t\tRespondWithError(w, http.StatusUnauthorized, \"An authorized error occurred\")\n\t\t}\n\t})\n}", "func NewMiddleware(backend TokenIntrospecter) *Middleware {\n\treturn &Middleware{Backend: backend}\n}", "func (a Auth) Auth() func(http.Handler) http.Handler {\n\treturn func(next http.Handler) http.Handler {\n\t\tif a.UnauthHandler == nil {\n\t\t\ta.UnauthHandler = DefaultUnauthHandler\n\t\t}\n\t\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\tclaims, ok := jwtauth.GetClaimsFromContext(r.Context())\n\t\t\tif !ok {\n\t\t\t\tvar err error\n\t\t\t\tclaims, err = a.AuthenticateRequest(r)\n\t\t\t\tif err != nil {\n\t\t\t\t\ta.UnauthHandler(w, r, err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tr = r.WithContext(jwtauth.AddClaimsToContext(r.Context(), claims))\n\t\t\t}\n\t\t\tif err := a.Authorise(claims); err != nil {\n\t\t\t\ta.UnauthHandler(w, r, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tnext.ServeHTTP(w, r)\n\t\t})\n\t}\n}", "func NewLocalAuthenticationMiddleware() func(http.Handler) http.Handler {\n\treturn func(next http.Handler) http.Handler {\n\t\treturn authenticateMiddleware(next)\n\t}\n}", "func (mw *AuthMiddleware) MiddlewareInit() {\n\n\tif mw.TokenLookup == \"\" {\n\t\tmw.TokenLookup = \"header:\" + AuthorizationHeader\n\t}\n\n\tif mw.Timeout == 0 {\n\t\tmw.Timeout = time.Hour\n\t}\n\n\tif mw.TimeFunc == nil {\n\t\tmw.TimeFunc = time.Now\n\t}\n\n\tif mw.Unauthorized == nil {\n\t\tmw.Unauthorized = func(c *gin.Context, code int, message string) {\n\t\t\tc.JSON(code, AuthError{Code: code, Message: message})\n\t\t}\n\t}\n\n\tif mw.Realm == \"\" {\n\t\tmw.Realm = \"gin jwt\"\n\t}\n}", "func Setup(app *buffalo.App, provider Provider, config Config) *Authentic {\n\tconfig.applyDefault()\n\n\tmanager := &Authentic{\n\t\tapp: app,\n\t\tprovider: provider,\n\t\tConfig: config,\n\t}\n\n\tapp.Use(manager.AuthorizeMW, manager.CurrentUserMW)\n\n\tapp.GET(config.LoginPath, manager.login)\n\tapp.POST(config.LoginPath, manager.loginHandler)\n\tapp.DELETE(config.LogoutPath, manager.logoutHandler)\n\n\tfor _, mw := range []buffalo.MiddlewareFunc{manager.CurrentUserMW, manager.AuthorizeMW} {\n\t\tapp.Middleware.Skip(mw, manager.login, manager.loginHandler, manager.logoutHandler)\n\t\tapp.Middleware.Skip(mw, manager.Config.PublicHandlers...)\n\t}\n\n\treturn manager\n}", "func AuthMiddleware(handler http.HandlerFunc) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tusername, password, _ := r.BasicAuth()\n\t\tfmt.Println(r.BasicAuth())\n\t\tif username==\"\" || !checkUsernameAndPassword(username, password) {\n\t\t\t//w.Header().Set(\"WWW-Authenticate\", `Basic realm=\"Please enter your username and password for this site\"`)\n\t\t\t//w.WriteHeader(401)\n\t\t\t//w.Write([]byte(\"Unauthorised.\\n\"))\n\t\t\t//w.Write([]byte(\"checking session instead.\\n\"))\n\t\t\tsession, err := store.Get(r, \"cookie-name\")\n\t\t\tif err != nil {\n\t\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tuser := getUser(session)\n\t\t\tfmt.Println(user)\n\t\t\tif auth := user.Authenticated; !auth {\n\t\t\t\tsession.AddFlash(\"You don't have access!\")\n\t\t\t\terr = session.Save(r, w)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Printf(\"You don't have access!\")\n\t\t\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tfmt.Printf(\"You don't have access!\")\n\t\t\t\thttp.Redirect(w, r, \"/forbidden\", http.StatusFound)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tfmt.Println(\"authenticated via user session\")\n\t\t\thandler(w, r)\n\t\t\treturn\n\t\t}\n\t\tfmt.Println(\"authenticated via basic auth\")\n\t\thandler(w, r)\n\t}\n}", "func AuthMiddlewareHandler(authenticator *Authenticator, h http.Handler) http.Handler {\n\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\n\t\tuser := authenticator.AuthenticateHttpRequest(r)\n\t\tif user != nil {\n\t\t\th.ServeHTTP(w, r.WithContext(context.WithValue(r.Context(), \"User\", user)))\n\t\t} else {\n\t\t\tw.Header().Add(\"WWW-Authenticate\", \"Basic realm=restricted\")\n\t\t\thttp.Error(w, http.StatusText(http.StatusUnauthorized),\n\t\t\t\thttp.StatusUnauthorized)\n\t\t}\n\n\t})\n\n}", "func NewMiddleware(config *Config) (*Middleware, error) {\n\tif err := config.Valid(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tmux := http.NewServeMux()\n\tconfig.mux = mux\n\n\tm := &Middleware{\n\t\tConfig: config,\n\t}\n\tmux.HandleFunc(m.CallbackPath, m.authorizeCallback)\n\tmux.HandleFunc(\"/\", m.defaultHandler)\n\n\treturn m, nil\n}", "func AuthenticationMiddleware() echo.MiddlewareFunc {\n\treturn middleware.JWTWithConfig(middleware.JWTConfig{\n\t\tSigningMethod: \"HS256\",\n\t\tSigningKey: []byte(config.Config.JWTSecret),\n\t})\n}", "func MustAuth(handler http.Handler) http.Handler {\n return &authHandler{next: handler}\n}", "func (s *Server) AuthMiddleware(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tcookie, err := r.Cookie(\"password\")\n\t\tif err != nil || cookie.Value != s.Password {\n\t\t\thttp.Redirect(w, r, \"/login?redirect=\"+url.QueryEscape(r.URL.String()), http.StatusFound)\n\t\t\treturn\n\t\t}\n\t\tr = ctxAppendTemplateVars(r, map[string]interface{}{\n\t\t\t\"loggedin\": true,\n\t\t\t\"redirect\": r.URL.String(),\n\t\t})\n\t\tnext.ServeHTTP(w, r)\n\t})\n}", "func handlerAuthCheck(h http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tswitch adminConfig.Auth {\n\t\tcase settings.AuthDB:\n\t\t\t// Check if user is already authenticated\n\t\t\tauthenticated, session := sessionsmgr.CheckAuth(r)\n\t\t\tif !authenticated {\n\t\t\t\thttp.Redirect(w, r, \"/login\", http.StatusFound)\n\t\t\t\treturn\n\t\t\t}\n\t\t\t// Set middleware values\n\t\t\ts := make(sessions.ContextValue)\n\t\t\ts[ctxUser] = session.Username\n\t\t\ts[ctxCSRF] = session.Values[ctxCSRF].(string)\n\t\t\tctx := context.WithValue(r.Context(), sessions.ContextKey(\"session\"), s)\n\t\t\t// Update metadata for the user\n\t\t\tif err := adminUsers.UpdateMetadata(session.IPAddress, session.UserAgent, session.Username, s[\"csrftoken\"]); err != nil {\n\t\t\t\tlog.Printf(\"error updating metadata for user %s: %v\", session.Username, err)\n\t\t\t}\n\t\t\t// Access granted\n\t\t\th.ServeHTTP(w, r.WithContext(ctx))\n\t\tcase settings.AuthSAML:\n\t\t\t_, err := samlMiddleware.Session.GetSession(r)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"GetSession %v\", err)\n\t\t\t}\n\t\t\tcookiev, err := r.Cookie(samlConfig.TokenName)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"error extracting JWT data: %v\", err)\n\t\t\t\thttp.Redirect(w, r, samlConfig.LoginURL, http.StatusFound)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tjwtdata, err := parseJWTFromCookie(samlData.KeyPair, cookiev.Value)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"error parsing JWT: %v\", err)\n\t\t\t\thttp.Redirect(w, r, samlConfig.LoginURL, http.StatusFound)\n\t\t\t\treturn\n\t\t\t}\n\t\t\t// Check if user is already authenticated\n\t\t\tauthenticated, session := sessionsmgr.CheckAuth(r)\n\t\t\tif !authenticated {\n\t\t\t\t// Create user if it does not exist\n\t\t\t\tif !adminUsers.Exists(jwtdata.Username) {\n\t\t\t\t\tlog.Printf(\"user not found: %s\", jwtdata.Username)\n\t\t\t\t\thttp.Redirect(w, r, forbiddenPath, http.StatusFound)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tu, err := adminUsers.Get(jwtdata.Username)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Printf(\"error getting user %s: %v\", jwtdata.Username, err)\n\t\t\t\t\thttp.Redirect(w, r, forbiddenPath, http.StatusFound)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\taccess, err := adminUsers.GetEnvAccess(u.Username, u.DefaultEnv)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Printf(\"error getting access for %s: %v\", jwtdata.Username, err)\n\t\t\t\t\thttp.Redirect(w, r, forbiddenPath, http.StatusFound)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\t// Create new session\n\t\t\t\tsession, err = sessionsmgr.Save(r, w, u, access)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Printf(\"session error: %v\", err)\n\t\t\t\t\thttp.Redirect(w, r, samlConfig.LoginURL, http.StatusFound)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t\t// Set middleware values\n\t\t\ts := make(sessions.ContextValue)\n\t\t\ts[ctxUser] = session.Username\n\t\t\ts[ctxCSRF] = session.Values[ctxCSRF].(string)\n\t\t\tctx := context.WithValue(r.Context(), sessions.ContextKey(\"session\"), s)\n\t\t\t// Update metadata for the user\n\t\t\terr = adminUsers.UpdateMetadata(session.IPAddress, session.UserAgent, session.Username, s[\"csrftoken\"])\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"error updating metadata for user %s: %v\", session.Username, err)\n\t\t\t}\n\t\t\t// Access granted\n\t\t\tsamlMiddleware.RequireAccount(h).ServeHTTP(w, r.WithContext(ctx))\n\t\t}\n\t})\n}", "func authMiddleware(handler http.HandlerFunc) http.HandlerFunc {\n\t// return auth middleware if configuration settings are present\n\tif cfg.HttpAuthUsername != \"\" && cfg.HttpAuthPassword != \"\" {\n\t\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\t\t// poor man's logging:\n\t\t\tfmt.Println(r.Method, r.URL.Path, time.Now())\n\n\t\t\t// If this server is operating behind a proxy, but we still want to force\n\t\t\t// users to use https, cfg.ProxyForceHttps == true will listen for the common\n\t\t\t// X-Forward-Proto & redirect to https\n\t\t\tif cfg.ProxyForceHttps {\n\t\t\t\tif r.Header.Get(\"X-Forwarded-Proto\") == \"http\" {\n\t\t\t\t\tw.Header().Set(\"Connection\", \"close\")\n\t\t\t\t\turl := \"https://\" + r.Host + r.URL.String()\n\t\t\t\t\thttp.Redirect(w, r, url, http.StatusMovedPermanently)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tuser, pass, ok := r.BasicAuth()\n\t\t\tif !ok || subtle.ConstantTimeCompare([]byte(user), []byte(cfg.HttpAuthUsername)) != 1 || subtle.ConstantTimeCompare([]byte(pass), []byte(cfg.HttpAuthPassword)) != 1 {\n\t\t\t\tw.Header().Set(\"WWW-Authenticate\", `Basic realm=\"Please enter your username and password for this site\"`)\n\t\t\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\t\t\tw.Write([]byte(\"access denied \\n\"))\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// TODO - Strict Transport config?\n\t\t\t// if cfg.TLS {\n\t\t\t// \t// If TLS is enabled, set 1 week strict TLS, 1 week for now to prevent catastrophic mess-ups\n\t\t\t// \tw.Header().Add(\"Strict-Transport-Security\", \"max-age=604800\")\n\t\t\t// }\n\t\t\thandler(w, r)\n\t\t}\n\t}\n\n\t// no-auth middware func\n\treturn middleware(handler)\n}", "func CheckForAuthMiddleware(s *Setup) func(next http.Handler) http.Handler {\n\treturn func(next http.Handler) http.Handler {\n\t\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\tif isAuthenticated(r) {\n\t\t\t\tnext.ServeHTTP(w, r)\n\t\t\t\treturn\n\t\t\t}\n\t\t\ts.Logger.Printf(\"unauthenticated access attempt\")\n\t\t\taddCors(w)\n\t\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\t})\n\t}\n}", "func Middleware() func(http.Handler) http.Handler {\n\treturn func(next http.Handler) http.Handler {\n\t\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\t// for _, cookie := range r.Cookies() {\n\t\t\t// \tfmt.Fprint(w, cookie.Name)\n\t\t\t// \tlog.Println(cookie.Name)\n\t\t\t// }\n\t\t\t// log.Println(formatRequest(r))\n\t\t\t// log.Println(r.Cookies())\n\t\t\t// c, err := r.Cookie(\"auth-cookie\")\n\n\t\t\t// Allow unauthenticated users in\n\t\t\t// if err != nil || c == nil {\n\t\t\t// \tnext.ServeHTTP(w, r)\n\t\t\t// \treturn\n\t\t\t// }\n\n\t\t\t_, claims, err := jwtauth.FromContext(r.Context())\n\t\t\t// Allow unauthenticated users in\n\t\t\tif claims == nil || err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t\tnext.ServeHTTP(w, r)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tidentity := claims[\"session\"].(map[string]interface{})[\"identity\"]\n\t\t\tlog.Println(identity)\n\t\t\t// parsed, err := gabs.ParseJSON(claims)\n\t\t\t// log.Println(parsed)\n\t\t\t// log.Println(err)\n\n\t\t\tnext.ServeHTTP(w, r)\n\t\t})\n\t}\n}", "func (m *Middleware) AuthMiddleware(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) {\n\tvar middlewareUser middlewareUser\n\tvar session *sessions.Session\n\tvar err error\n\n\tif m.SessionKeys == nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tsession, err = m.SessionStore.Get(r, m.SessionKeys.SessionName)\n\n\tif err != nil {\n\t\tfmt.Printf(\"no session err: %s\\n\", err.Error())\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t// If session is considered new, that means\n\t// either current user is truly not logged in or cache was/is down\n\tif session.IsNew {\n\t\t// fmt.Printf(\"new session\\n\")\n\n\t\t// First we determine if user is sending a cookie with our user cookie key\n\t\t// If they are, try retrieving from db if Middleware#QueryDB is set\n\t\tif _, err := r.Cookie(m.SessionKeys.SessionName); err == nil {\n\t\t\tfmt.Printf(\"has cookie but not found in store\\n\")\n\t\t\tif m.DB != nil && m.QueryDB != nil {\n\t\t\t\tfmt.Printf(\"auth middleware db\\n\")\n\t\t\t\tuserBytes, err := m.QueryDB(r, m.DB, UserQuery)\n\n\t\t\t\tif err != nil {\n\t\t\t\t\tswitch err.(type) {\n\t\t\t\t\tcase securecookie.Error:\n\t\t\t\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tif err == sql.ErrNoRows {\n\t\t\t\t\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\terr = json.Unmarshal(userBytes, &middlewareUser)\n\n\t\t\t\tif err != nil {\n\t\t\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\t\t\tw.Write([]byte(err.Error()))\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\t// Here we test to see if our session backend is responsive\n\t\t\t\t// If it is, that means current user logged in while cache was down\n\t\t\t\t// and was using the database to grab their sessions but since session\n\t\t\t\t// backend is back up, we can grab current user's session from\n\t\t\t\t// database and set it to session backend and use that instead of database\n\t\t\t\t// for future requests\n\t\t\t\tif _, err = m.SessionStore.Ping(); err == nil {\n\t\t\t\t\tfmt.Printf(\"ping successful\\n\")\n\t\t\t\t\tsessionIDBytes, err := m.QueryDB(r, m.DB, SessionQuery)\n\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tif err == sql.ErrNoRows {\n\t\t\t\t\t\t\tfmt.Printf(\"auth middleware db no row found\\n\")\n\t\t\t\t\t\t\tnext(w, r)\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\n\t\t\t\t\tfmt.Printf(\"session bytes: %s\\n\", sessionIDBytes)\n\n\t\t\t\t\tsession, _ = m.SessionStore.New(r, m.SessionKeys.SessionName)\n\t\t\t\t\tsession.ID = string(sessionIDBytes)\n\t\t\t\t\tfmt.Printf(\"session id: %s\\n\", session.ID)\n\t\t\t\t\tsession.Values[m.SessionKeys.Keys.UserKey] = userBytes\n\t\t\t\t\tsession.Save(r, w)\n\t\t\t\t\tfmt.Printf(\"set session into store \\n\")\n\t\t\t\t}\n\n\t\t\t\tctx := context.WithValue(r.Context(), UserCtxKey, userBytes)\n\t\t\t\tctxWithEmail := context.WithValue(ctx, MiddlewareUserCtxKey, middlewareUser)\n\t\t\t\tnext(w, r.WithContext(ctxWithEmail))\n\t\t\t} else {\n\t\t\t\tnext(w, r)\n\t\t\t}\n\t\t} else {\n\t\t\t// fmt.Printf(\"new session, no cookie\\n\")\n\t\t\tnext(w, r)\n\t\t}\n\t} else {\n\t\tif val, ok := session.Values[m.SessionKeys.Keys.UserKey]; ok {\n\t\t\tuserBytes := val.([]byte)\n\n\t\t\terr := json.Unmarshal(val.([]byte), &middlewareUser)\n\n\t\t\tif err != nil {\n\t\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\t\tw.Write([]byte(err.Error()))\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tctx := context.WithValue(r.Context(), UserCtxKey, userBytes)\n\t\t\tctxWithEmail := context.WithValue(ctx, MiddlewareUserCtxKey, middlewareUser)\n\t\t\tnext(w, r.WithContext(ctxWithEmail))\n\t\t} else {\n\t\t\tnext(w, r)\n\t\t}\n\t}\n}", "func NewAuth(svc *auth.Service, e *echo.Echo, mw echo.MiddlewareFunc) {\n\ta := Auth{svc}\n\t// swagger:route POST /login auth login\n\t// Logs in user by username and password.\n\t// responses:\n\t// 200: loginResp\n\t// 400: errMsg\n\t// 401: errMsg\n\t// \t403: err\n\t// 404: errMsg\n\t// 500: err\n\te.POST(\"/login\", a.login)\n\t// swagger:operation GET /refresh/{token} auth refresh\n\t// ---\n\t// summary: Refreshes jwt token.\n\t// description: Refreshes jwt token by checking at database whether refresh token exists.\n\t// parameters:\n\t// - name: token\n\t// in: path\n\t// description: refresh token\n\t// type: string\n\t// required: true\n\t// responses:\n\t// \"200\":\n\t// \"$ref\": \"#/responses/refreshResp\"\n\t// \"400\":\n\t// \"$ref\": \"#/responses/errMsg\"\n\t// \"401\":\n\t// \"$ref\": \"#/responses/err\"\n\t// \"500\":\n\t// \"$ref\": \"#/responses/err\"\n\te.GET(\"/refresh/:token\", a.refresh)\n\n\t// swagger:route GET /me auth meReq\n\t// Gets user's info from session\n\t// responses:\n\t// 200: userResp\n\t// 500: err\n\te.GET(\"/me\", a.me, mw)\n}", "func Middleware(next httprouter.Handle) httprouter.Handle {\n\treturn func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\n\t\ttoken := r.Header.Get(\"Authorization\")\n\n\t\tsessionID := getSessionByToken(token)\n\t\tuserID, status := checkSession(sessionID)\n\t\tif !status {\n\t\t\tresponse := Response{\n\t\t\t\tStatus: statusFail,\n\t\t\t\tData: ResponseError{\n\t\t\t\t\tError: \"Authorization failed\",\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\t\tjson.NewEncoder(w).Encode(response)\n\n\t\t\treturn\n\t\t}\n\t\tr.ParseForm()\n\t\tr.Form.Set(\"user_id\", userID)\n\n\t\tnext(w, r, ps)\n\t\treturn\n\t}\n}", "func (h *Handler) proxyAuthMiddleware(c *gin.Context) {\n\tauthError := func() error {\n\t\tclaims, err := h.fakeClaimsForProxy(c)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tc.Set(\"user\", claims)\n\t\treturn nil\n\t}()\n\tif authError != nil {\n\t\tc.AbortWithStatusJSON(http.StatusForbidden, gin.H{\n\t\t\t\"error\": \"authentication failed\",\n\t\t})\n\t\tlogrus.Errorf(\"Authentication middleware check failed: %v\\n\", authError)\n\t\treturn\n\t}\n\tc.Next()\n}", "func NewAuthenticationMiddleware() (mux.MiddlewareFunc, AuthenticationContext) {\n\tcontext := &authenticationContext{make(map[string]bool), make(map[string]string), make(map[string]PasswordInformation)}\n\tmiddlewareFunc := func(next http.Handler) http.Handler {\n\t\treturn authenticationMiddleware{next, context}\n\t}\n\treturn middlewareFunc, context\n}", "func NewGinMiddleware(fetchService fetcher.Service) (*jwt.GinJWTMiddleware, error) {\n\tkey := []byte(authKey)\n\n\treturn jwt.New(&jwt.GinJWTMiddleware{\n\t\tRealm: realmName,\n\t\tKey: key,\n\t\tTimeout: time.Hour,\n\t\tMaxRefresh: time.Hour,\n\t\tIdentityKey: IdentityKey,\n\t\tPayloadFunc: payloadHandler,\n\t\tIdentityHandler: identityHandler,\n\t\tAuthenticator: authHandlerBuilder(fetchService),\n\t\tUnauthorized: unauthorizedHandler,\n\t})\n}", "func (mw *AuthMiddleware) MiddlewareFunc(handler rest.HandlerFunc) rest.HandlerFunc {\n\tif mw.Authenticator == nil {\n\t\tmw.Authenticator = auth.DefaultAuthenticator()\n\t}\n\n\treturn func(writer rest.ResponseWriter, request *rest.Request) { mw.handler(writer, request, handler) }\n}", "func NewAuthRouter(routerServices *Services) *AuthRouter {\n router := new(AuthRouter)\n\n router.Services = routerServices\n\n return router\n}", "func UserMiddleware(next http.Handler) http.Handler {\r\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\r\n\t\ts := r.Header.Get(\"Authorization\")\r\n\t\ts1 := strings.Replace(s, \"Basic \", \"\", 1)\r\n\t\tfmt.Println(s, s1)\r\n\t\tue, err := base64.StdEncoding.DecodeString(s1)\r\n\t\tif err == nil {\r\n\t\t\t//TODO\r\n\t\t\tfmt.Println(\"User Middleware :\", string(ue))\r\n\t\t} else {\r\n\t\t\t//TODO\r\n\t\t\tfmt.Println(err)\r\n\t\t}\r\n\t\tnext.ServeHTTP(w, r)\r\n\t})\r\n}", "func Middleware(bnetAuth *Auth) func(http.Handler) http.Handler {\n\treturn func(next http.Handler) http.Handler {\n\t\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\n\t\t\t// return new context with previous context values and added value\n\t\t\tctx := context.WithValue(r.Context(), bnetAuthCtxKey, bnetAuth)\n\n\t\t\t// and call the next with our new context\n\t\t\tr = r.WithContext(ctx)\n\t\t\tnext.ServeHTTP(w, r)\n\t\t})\n\t}\n}", "func MustAuth(next http.Handler) http.Handler {\n\treturn &authHandler{\n\t\tnext: next,\n\t}\n}", "func (conf Config) Middleware() (mux.MiddlewareFunc, error) {\n\tp, err := oidc.NewProvider(context.Background(), conf.Issuer)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error initializing oidc provider: %v\", err)\n\t}\n\n\tcfg := oidc.Config{\n\t\tClientID: conf.ClientID,\n\t\tSupportedSigningAlgs: conf.Algorithms,\n\t\tSkipClientIDCheck: conf.ClientID == \"\",\n\t\tSkipExpiryCheck: conf.SkipExpiryCheck,\n\t\tSkipIssuerCheck: conf.SkipIssuerCheck,\n\t}\n\n\tv := p.Verifier(&cfg)\n\n\treturn mux.MiddlewareFunc(func(next http.Handler) http.Handler {\n\t\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\th := r.Header.Get(\"Authorization\")\n\t\t\tif h == \"\" {\n\t\t\t\ttwirp.WriteError(w, twirp.NewError(twirp.Unauthenticated, \"missing required header: Authorization\"))\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif !strings.HasPrefix(h, \"Bearer \") {\n\t\t\t\ttwirp.WriteError(w, twirp.NewError(twirp.Unauthenticated, \"invalid header: Authorization\"))\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t_, err := v.Verify(r.Context(), strings.TrimSpace(strings.TrimPrefix(h, \"Bearer \")))\n\t\t\tif err != nil {\n\t\t\t\ttwirp.WriteError(w, twirp.NewError(twirp.Unauthenticated, err.Error()))\n\t\t\t}\n\t\t\tnext.ServeHTTP(w, r)\n\t\t})\n\t}), nil\n}", "func (s *Server) WithAuth(next http.HandlerFunc) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\taccessToken := r.Header.Get(\"Authorization\")\n\t\tif accessToken == \"\" {\n\t\t\tlogrus.Warnf(\"[%v][handler.WithAuth] %s is not authenticated\", ctx_value.GetString(r.Context(), \"tracingID\"), r.Host)\n\t\t\ts.onErr(w, http.StatusForbidden, \"missing accesss token\")\n\t\t\treturn\n\t\t}\n\t\tauthedUser, err := s.userauthService.Authenticate(r.Context(), accessToken)\n\t\tif err != nil {\n\t\t\tlogrus.Errorf(\"[%v][Server.WithAuth] could not authenticate user: %v\", ctx_value.GetString(r.Context(), \"tracingID\"), err)\n\t\t\ts.onErr(w, err.Code(), err.Info())\n\t\t\treturn\n\t\t}\n\t\t// add JWT claims of user in r.Context()\n\t\tctxAuthedUser := ctx_value.AddValue(r.Context(), \"user\", authedUser)\n\t\tctxJwtToken := ctx_value.AddValue(ctxAuthedUser, \"authorization\", accessToken)\n\t\t// serve request with user claims in context\n\t\tnext(w, r.WithContext(ctxJwtToken))\n\t}\n}", "func (mw *AuthMiddleware) MiddlewareFunc() gin.HandlerFunc {\n\t// initialise\n\tmw.MiddlewareInit()\n\treturn func(c *gin.Context) {\n\t\tmw.middlewareImpl(c)\n\t\treturn\n\t}\n}", "func (p *OIDCProvider) Middleware() Middleware {\n\treturn func(next http.Handler) http.Handler {\n\t\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\tvar token string\n\n\t\t\tauthorizationHeader := r.Header.Get(\"Authorization\")\n\t\t\tif authorizationHeader != \"\" {\n\t\t\t\tauthorization := strings.Split(authorizationHeader, \" \")\n\t\t\t\tif len(authorization) != 2 {\n\t\t\t\t\tconst msg = \"invalid Authorization header\"\n\t\t\t\t\tlevel.Debug(p.logger).Log(\"msg\", msg)\n\t\t\t\t\thttp.Error(w, msg, http.StatusUnauthorized)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\ttoken = authorization[1]\n\t\t\t} else {\n\t\t\t\tcookie, err := r.Cookie(p.cookieName)\n\t\t\t\tif err != nil {\n\t\t\t\t\ttenant, ok := GetTenant(r.Context())\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\tconst msg = \"error finding tenant\"\n\t\t\t\t\t\tlevel.Warn(p.logger).Log(\"msg\", msg)\n\t\t\t\t\t\thttp.Error(w, msg, http.StatusInternalServerError)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\t// Redirect users to the OIDC login\n\t\t\t\t\tw.Header().Set(\"Location\", path.Join(\"/oidc\", tenant, \"/login\"))\n\t\t\t\t\thttp.Error(w, \"failed to find token\", http.StatusFound)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\ttoken = cookie.Value\n\t\t\t}\n\n\t\t\tidToken, err := p.verifier.Verify(oidc.ClientContext(r.Context(), p.client), token)\n\t\t\tif err != nil {\n\t\t\t\tconst msg = \"failed to authenticate\"\n\t\t\t\tlevel.Debug(p.logger).Log(\"msg\", msg, \"err\", err)\n\t\t\t\thttp.Error(w, msg, http.StatusBadRequest)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tsub := idToken.Subject\n\t\t\tif p.config.UsernameClaim != \"\" {\n\t\t\t\tclaims := map[string]interface{}{}\n\t\t\t\tif err := idToken.Claims(&claims); err != nil {\n\t\t\t\t\tconst msg = \"failed to read claims\"\n\t\t\t\t\tlevel.Warn(p.logger).Log(\"msg\", msg, \"err\", err)\n\t\t\t\t\thttp.Error(w, msg, http.StatusInternalServerError)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\trawUsername, ok := claims[p.config.UsernameClaim]\n\t\t\t\tif !ok {\n\t\t\t\t\tconst msg = \"username cannot be empty\"\n\t\t\t\t\tlevel.Debug(p.logger).Log(\"msg\", msg)\n\t\t\t\t\thttp.Error(w, msg, http.StatusBadRequest)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tusername, ok := rawUsername.(string)\n\t\t\t\tif !ok || username == \"\" {\n\t\t\t\t\tconst msg = \"invalid username claim value\"\n\t\t\t\t\tlevel.Debug(p.logger).Log(\"msg\", msg)\n\t\t\t\t\thttp.Error(w, msg, http.StatusBadRequest)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tsub = username\n\t\t\t}\n\t\t\tctx := context.WithValue(r.Context(), subjectKey, sub)\n\n\t\t\tif p.config.GroupClaim != \"\" {\n\t\t\t\tvar groups []string\n\t\t\t\tclaims := map[string]interface{}{}\n\t\t\t\tif err := idToken.Claims(&claims); err != nil {\n\t\t\t\t\tconst msg = \"failed to read claims\"\n\t\t\t\t\tlevel.Warn(p.logger).Log(\"msg\", msg, \"err\", err)\n\t\t\t\t\thttp.Error(w, msg, http.StatusInternalServerError)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\trawGroup, ok := claims[p.config.GroupClaim]\n\t\t\t\tif !ok {\n\t\t\t\t\tconst msg = \"group cannot be empty\"\n\t\t\t\t\tlevel.Debug(p.logger).Log(\"msg\", msg)\n\t\t\t\t\thttp.Error(w, msg, http.StatusBadRequest)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tswitch v := rawGroup.(type) {\n\t\t\t\tcase string:\n\t\t\t\t\tgroups = append(groups, v)\n\t\t\t\tcase []string:\n\t\t\t\t\tgroups = v\n\t\t\t\tcase []interface{}:\n\t\t\t\t\tgroups = make([]string, 0, len(v))\n\t\t\t\t\tfor i := range v {\n\t\t\t\t\t\tgroups = append(groups, fmt.Sprintf(\"%v\", v[i]))\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tctx = context.WithValue(ctx, groupsKey, groups)\n\t\t\t}\n\n\t\t\tnext.ServeHTTP(w, r.WithContext(ctx))\n\t\t})\n\t}\n}", "func Middleware() func(http.Handler) http.Handler {\n\treturn func(next http.Handler) http.Handler {\n\t\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\n\t\t\t// Put incoming request auth headers into the context or use later\n\t\t\t// to verify signature\n\t\t\treqTs := r.Header.Get(reqTsHeader)\n\t\t\tsignature := r.Header.Get(signatureHeader)\n\t\t\tdid := r.Header.Get(didHeader)\n\t\t\t// didKey is optional\n\t\t\tif reqTs == \"\" && signature == \"\" {\n\t\t\t\tlog.Infof(\"No auth headers found\")\n\t\t\t\tnext.ServeHTTP(w, r)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// Store values into context\n\t\t\tctx := context.WithValue(r.Context(), DidCtxKey, did)\n\t\t\tctx = context.WithValue(ctx, ReqTsCtxKey, reqTs)\n\t\t\tctx = context.WithValue(ctx, SignatureCtxKey, signature)\n\n\t\t\t// and call the next with our new context\n\t\t\tr = r.WithContext(ctx)\n\t\t\tnext.ServeHTTP(w, r)\n\n\t\t})\n\t}\n}", "func NewAuth(jwt jwts.JWTer, userRepo repository.UserRepository) auth.Service {\n\treturn &authsrvc{jwt, userRepo}\n}", "func RequiredAuth(appCtx appctx.AppContext, authStore AuthenStore) func(c *gin.Context) {\n\ttokenProvider := jwt.NewTokenJWTProvider(appCtx.SecretKey())\n\n\treturn func(c *gin.Context) {\n\t\ttoken, err := extractTokenFromHeaderString(c.GetHeader(\"Authorization\"))\n\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\t//db := appCtx.GetMaiDBConnection()\n\t\t//store := userstore.NewSQLStore(db)\n\t\t//\n\t\tpayload, err := tokenProvider.Validate(token)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\t//\n\t\t//user, err := store.FindUser(c.Request.Context(), map[string]interface{}{\"id\": payload.UserId})\n\n\t\tuser, err := authStore.FindUser(c.Request.Context(), map[string]interface{}{\"id\": payload.UserId})\n\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tif user.Status == 0 {\n\t\t\tpanic(common.ErrNoPermission(errors.New(\"user has been deleted or banned\")))\n\t\t}\n\n\t\tuser.Mask(false)\n\n\t\tc.Set(common.CurrentUser, user)\n\t\tc.Next()\n\t}\n}", "func Middleware(conn *storage.Conn, key []byte) func(http.Handler) http.Handler {\n\treturn func(next http.Handler) http.Handler {\n\t\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\tauth := r.Header.Get(\"Authorization\")\n\n\t\t\t// Allow unauthenticated users in\n\t\t\tif auth == \"\" {\n\t\t\t\tnext.ServeHTTP(w, r)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tuserID, err := validateAndGetUserID(key, auth)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error().Err(err).Msg(\"failed validate user for ID\")\n\t\t\t\thttp.Error(w, \"Invalid auth\", http.StatusForbidden)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// Get the user from the database\n\t\t\tadmin, err := getUserByID(r.Context(), conn, userID)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error().Err(err).Msg(\"failed get user for ID\")\n\t\t\t\thttp.Error(w, \"Invalid auth\", http.StatusForbidden)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// Put it in context\n\t\t\tctx := context.WithValue(r.Context(), adminCtxKey, admin)\n\n\t\t\t// And call the next with our new context\n\t\t\tr = r.WithContext(ctx)\n\t\t\tnext.ServeHTTP(w, r)\n\t\t})\n\t}\n}", "func AuthenticationMiddleware() gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\tbearer := c.Request.Header.Get(\"Authorization\")\n\n\t\tif len(bearer) != 0 {\n\t\t\tc.Next()\n\t\t} else {\n\t\t\tc.JSON(400, \"Not Authenticated\")\n\t\t\tc.Abort()\n\t\t}\n\t}\n}" ]
[ "0.749641", "0.71149826", "0.7067203", "0.6840958", "0.684059", "0.6691852", "0.66247874", "0.6615308", "0.6607044", "0.6568086", "0.65581614", "0.65564984", "0.65474516", "0.65150654", "0.65064883", "0.6489449", "0.6460329", "0.6460041", "0.6445028", "0.64227384", "0.63991654", "0.6393123", "0.6387601", "0.63622534", "0.63493705", "0.634857", "0.6342221", "0.632595", "0.6323652", "0.6271473", "0.6193297", "0.6166434", "0.61659014", "0.61655796", "0.61653626", "0.615654", "0.615491", "0.6129169", "0.6126313", "0.6113717", "0.6092077", "0.6088468", "0.6012645", "0.6008815", "0.6002436", "0.5986161", "0.59712195", "0.5943596", "0.59406316", "0.5925486", "0.591337", "0.59093255", "0.58883464", "0.58825344", "0.5855093", "0.58542436", "0.5843808", "0.5841374", "0.5814033", "0.5813012", "0.58098835", "0.57998574", "0.57909244", "0.57852954", "0.57788295", "0.57774305", "0.57430816", "0.57410854", "0.57375085", "0.57167065", "0.5707025", "0.5706496", "0.5688992", "0.5673089", "0.56730354", "0.5667717", "0.5647315", "0.56314516", "0.5631332", "0.5613158", "0.55778545", "0.55552983", "0.55364156", "0.5535272", "0.5523877", "0.5521609", "0.5509551", "0.55073124", "0.5487106", "0.54854363", "0.5483703", "0.5479164", "0.5473845", "0.5465266", "0.5462864", "0.5457692", "0.5438125", "0.5436655", "0.5420659", "0.5408507" ]
0.76894057
0
MakeHandler creates the api request handler
func MakeHandler() *http.Handler { api := rest.NewApi() authMiddleware, err := CreateAuthMiddleware() if err != nil { panic(err) } api.Use(&rest.IfMiddleware{ // Only authenticate non login or register requests Condition: func(request *rest.Request) bool { return (request.URL.Path != variables.APIPathLoginUserServer) && (request.URL.Path != variables.APIPathRegisterUserServer) }, IfTrue: authMiddleware, }) api.Use(rest.DefaultProdStack...) router, err := rest.MakeRouter( rest.Post(variables.APIPathLoginUserServer, authMiddleware.LoginHandler), rest.Get(variables.APIPathRefreshUserServer, authMiddleware.RefreshHandler), rest.Post(variables.APIPathRegisterUserServer, PostRegister), rest.Get(variables.APIPathUserServer, GetUser), rest.Post(variables.APIPathUserServer, PostUser), ) if err != nil { log.Fatal(err) } api.SetApp(router) handler := api.MakeHandler() return &handler }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func MakeHandler(svc manager.Service) http.Handler {\n\topts := []kithttp.ServerOption{\n\t\tkithttp.ServerErrorEncoder(encodeError),\n\t}\n\n\tregistration := kithttp.NewServer(\n\t\tregistrationEndpoint(svc),\n\t\tdecodeCredentials,\n\t\tencodeResponse,\n\t\topts...,\n\t)\n\n\tlogin := kithttp.NewServer(\n\t\tloginEndpoint(svc),\n\t\tdecodeCredentials,\n\t\tencodeResponse,\n\t\topts...,\n\t)\n\n\taddClient := kithttp.NewServer(\n\t\taddClientEndpoint(svc),\n\t\tdecodeAddClient,\n\t\tencodeResponse,\n\t\topts...,\n\t)\n\n\tviewClient := kithttp.NewServer(\n\t\tviewClientEndpoint(svc),\n\t\tdecodeViewClient,\n\t\tencodeResponse,\n\t\topts...,\n\t)\n\n\tremoveClient := kithttp.NewServer(\n\t\tremoveClientEndpoint(svc),\n\t\tdecodeViewClient,\n\t\tencodeResponse,\n\t\topts...,\n\t)\n\n\tr := bone.New()\n\n\tr.Post(\"/users\", registration)\n\tr.Post(\"/tokens\", login)\n\tr.Post(\"/clients\", addClient)\n\tr.Get(\"/clients/:id\", viewClient)\n\tr.Delete(\"/clients/:id\", removeClient)\n\tr.Handle(\"/metrics\", promhttp.Handler())\n\n\treturn r\n}", "func MakeHandler(svc notifiers.Service, tracer opentracing.Tracer, logger logger.Logger) http.Handler {\n\topts := []kithttp.ServerOption{\n\t\tkithttp.ServerErrorEncoder(apiutil.LoggingErrorEncoder(logger, encodeError)),\n\t}\n\n\tmux := bone.New()\n\n\tmux.Post(\"/subscriptions\", kithttp.NewServer(\n\t\tkitot.TraceServer(tracer, \"create_subscription\")(createSubscriptionEndpoint(svc)),\n\t\tdecodeCreate,\n\t\tencodeResponse,\n\t\topts...,\n\t))\n\n\tmux.Get(\"/subscriptions/:id\", kithttp.NewServer(\n\t\tkitot.TraceServer(tracer, \"view_subscription\")(viewSubscriptionEndpint(svc)),\n\t\tdecodeSubscription,\n\t\tencodeResponse,\n\t\topts...,\n\t))\n\n\tmux.Get(\"/subscriptions\", kithttp.NewServer(\n\t\tkitot.TraceServer(tracer, \"list_subscriptions\")(listSubscriptionsEndpoint(svc)),\n\t\tdecodeList,\n\t\tencodeResponse,\n\t\topts...,\n\t))\n\n\tmux.Delete(\"/subscriptions/:id\", kithttp.NewServer(\n\t\tkitot.TraceServer(tracer, \"delete_subscription\")(deleteSubscriptionEndpint(svc)),\n\t\tdecodeSubscription,\n\t\tencodeResponse,\n\t\topts...,\n\t))\n\n\tmux.GetFunc(\"/health\", mainflux.Health(\"notifier\"))\n\tmux.Handle(\"/metrics\", promhttp.Handler())\n\n\treturn mux\n}", "func MakeHttpHandler(ctx context.Context, endpoints endpoint.Endpoints, logger log.Logger, zipkinTracer *gozipkin.Tracer,) http.Handler {\n\tr := mux.NewRouter()\n\t// 链路追踪\n\tzipkinServer := zipkin.HTTPServerTrace(zipkinTracer, zipkin.Name(\"http-transport\"))\n\toptions := []kithttp.ServerOption{\n\t\tkithttp.ServerErrorLogger(logger),\n\t\tkithttp.ServerErrorEncoder(kithttp.DefaultErrorEncoder),\n\t\tkithttp.ServerErrorEncoder(func(ctx context.Context, err error, w http.ResponseWriter) {\n\t\t\tlogger.Log(fmt.Sprint(ctx.Value(ContextReqUUid)))\n\t\t\tw.WriteHeader(http.StatusOK)\n\t\t\tjson.NewEncoder(w).Encode(err)\n\t\t}),\n\t\tkithttp.ServerBefore(func(ctx context.Context, request *http.Request) context.Context {\n\t\t\tUUID := uuid.NewV5(uuid.Must(uuid.NewV4(),nil), \"req_uuid\").String()\n\t\t\tlogger.Log(\"给请求添加uuid\", zap.Any(\"UUID\", UUID))\n\t\t\tctx = context.WithValue(ctx, ContextReqUUid, UUID)\n\t\t\treturn ctx\n\t\t}),\n\t\tzipkinServer,\n\t}\n\tr.Methods(\"POST\").Path(\"/movie-tags\").Handler(kithttp.NewServer(\n\t\tendpoints.MovieTags,\n\t\tdecodeMoviesTagsRequest, // 请求参数\n\t\tencode.JsonResponse,\n\t\toptions...,\n\t))\n // 暴露具体的 endpoint\n\tr.Methods(\"POST\").Path(\"/movie-list\").Handler(kithttp.NewServer(\n\t\tendpoints.MoviesList,\n\t\tdecodeHotPlayMoviesrRequest, // 请求参数\n\t\tencode.JsonResponse,\n\t\toptions...,\n\t))\n\n\tr.Methods(\"POST\").Path(\"/movie-detail\").Handler(kithttp.NewServer(\n\t\tendpoints.MovieDetail,\n\t\tdecodMovieDetailRequest, // 请求参数\n\t\tencode.JsonResponse,\n\t\toptions...,\n\t))\n\tr.Methods(\"POST\").Path(\"/credits\").Handler(kithttp.NewServer(\n\t\tendpoints.MovieCreditsWithTypes,\n\t\tdecodeMovieCreditsWithTypes, // 请求参数\n\t\tencode.JsonResponse,\n\t\toptions...,\n\t))\n\treturn r\n}", "func MakeHandler(fn func(http.ResponseWriter, *http.Request)) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\n\t\tif attmpt := apiAttempt(w, r); !attmpt {\n\t\t\tfn(w, r)\n\t\t}\n\t\tcontext.Clear(r)\n\n\t}\n}", "func MakeHandler(svc users.Service, tracer opentracing.Tracer, l log.Logger) http.Handler {\n\tlogger = l\n\n\topts := []kithttp.ServerOption{\n\t\tkithttp.ServerErrorEncoder(encodeError),\n\t}\n\n\tmux := bone.New()\n\n\tmux.Post(\"/users\", kithttp.NewServer(\n\t\tkitot.TraceServer(tracer, \"register\")(registrationEndpoint(svc)),\n\t\tdecodeCredentials,\n\t\tencodeResponse,\n\t\topts...,\n\t))\n\n\tmux.Get(\"/users\", kithttp.NewServer(\n\t\tkitot.TraceServer(tracer, \"user_info\")(userInfoEndpoint(svc)),\n\t\tdecodeViewInfo,\n\t\tencodeResponse,\n\t\topts...,\n\t))\n\n\tmux.Put(\"/users\", kithttp.NewServer(\n\t\tkitot.TraceServer(tracer, \"update_user\")(updateUserEndpoint(svc)),\n\t\tdecodeUpdateUser,\n\t\tencodeResponse,\n\t\topts...,\n\t))\n\n\tmux.Post(\"/password/reset-request\", kithttp.NewServer(\n\t\tkitot.TraceServer(tracer, \"res-req\")(passwordResetRequestEndpoint(svc)),\n\t\tdecodePasswordResetRequest,\n\t\tencodeResponse,\n\t\topts...,\n\t))\n\n\tmux.Put(\"/password/reset\", kithttp.NewServer(\n\t\tkitot.TraceServer(tracer, \"reset\")(passwordResetEndpoint(svc)),\n\t\tdecodePasswordReset,\n\t\tencodeResponse,\n\t\topts...,\n\t))\n\n\tmux.Patch(\"/password\", kithttp.NewServer(\n\t\tkitot.TraceServer(tracer, \"reset\")(passwordChangeEndpoint(svc)),\n\t\tdecodePasswordChange,\n\t\tencodeResponse,\n\t\topts...,\n\t))\n\n\tmux.Post(\"/tokens\", kithttp.NewServer(\n\t\tkitot.TraceServer(tracer, \"login\")(loginEndpoint(svc)),\n\t\tdecodeCredentials,\n\t\tencodeResponse,\n\t\topts...,\n\t))\n\n\tmux.GetFunc(\"/version\", mainflux.Version(\"users\"))\n\tmux.Handle(\"/metrics\", promhttp.Handler())\n\n\treturn mux\n}", "func makeHandler() (http.Handler, *serializerLog, *mapStorage) {\n\treturn makeHandlerSubPath(\"\")\n}", "func MakeHandler(svc Service, opts []kithttp.ServerOption, responseEncoder kithttp.EncodeResponseFunc) http.Handler {\n\tauthMid := middleware.AuthMiddleware(svc.(*service).authenticator, \"\", \"\")\n\tlistHandler := kithttp.NewServer(\n\t\tauthMid(makeListEndpoint(svc)),\n\t\tdecodeListRequest,\n\t\tresponseEncoder, opts...)\n\tauthMid = middleware.AuthMiddleware(svc.(*service).authenticator, \"\", permission.StartCampaign)\n\tstartHandler := kithttp.NewServer(\n\t\tauthMid(makeStartEndpoint(svc)),\n\t\tdecodeStartRequest,\n\t\tresponseEncoder, opts...)\n\tprogressHandler := kithttp.NewServer(\n\t\tauthMid(makeProgressEndpoint(svc)),\n\t\tdecodeProgressRequest,\n\t\tresponseEncoder, opts...)\n\treportHandler := kithttp.NewServer(\n\t\tauthMid(makeReportEndpoint(svc)),\n\t\tdecodeReportRequest,\n\t\tresponseEncoder, opts...)\n\tauthMid = middleware.AuthMiddleware(svc.(*service).authenticator, \"\", permission.StopCampaign)\n\tstopHandler := kithttp.NewServer(\n\t\tauthMid(makeStopEndpoint(svc)),\n\t\tdecodeStopRequest,\n\t\tresponseEncoder, opts...)\n\tr := mux.NewRouter()\n\n\tr.Handle(\"/campaign/v1/list\", listHandler).Methods(\"GET\", \"POST\")\n\tr.Handle(\"/campaign/v1/start\", startHandler).Methods(\"POST\")\n\tr.Handle(\"/campaign/v1/progress\", progressHandler).Methods(\"GET\", \"POST\")\n\tr.Handle(\"/campaign/v1/stop\", stopHandler).Methods(\"POST\")\n\tr.Handle(\"/campaign/v1/report\", reportHandler).Methods(\"GET\", \"POST\")\n\treturn r\n}", "func MakeHandler(bs Service, logger kitlog.Logger) http.Handler {\n\topts := []kithttp.ServerOption{\n\t\tkithttp.ServerErrorLogger(logger),\n\t\tkithttp.ServerErrorEncoder(encodeError),\n\t}\n\n\tgetBaseDataHandler := kithttp.NewServer(\n\t\tmakeGetBaseDataEndpoint(bs),\n\t\tdecodeGetBaseDataRequest,\n\t\tencodeResponse,\n\t\topts...,\n\t)\n\n\taddBaseDataHandler := kithttp.NewServer(\n\t\tmakePostBaseDataEndpoint(bs),\n\t\tdecodePostBaseDataRequest,\n\t\tencodeResponse,\n\t\topts...,\n\t)\n\n\tgetAllBaseDataHandler := kithttp.NewServer(\n\t\tmakeGetAllBaseDataEndpoint(bs),\n\t\tdecodeGetAllBaseDataRequest,\n\t\tencodeResponse,\n\t\topts...,\n\t)\n\n\tupdateMultiBaseDataHandler := kithttp.NewServer(\n\t\tmakePutMultiBaseDataEndpoint(bs),\n\t\tdecodePutMultiBaseDataRequest,\n\t\tencodeResponse,\n\t\topts...,\n\t)\n\n\tdeleteMultiBaseDataHandler := kithttp.NewServer(\n\t\tmakeDeleteMultiBaseDataEndpoint(bs),\n\t\tdecodeDeleteMultiBaseDataRequest,\n\t\tencodeResponse,\n\t\topts...,\n\t)\n\n\tdeleteMultiLabelHandler := kithttp.NewServer(\n\t\tmakeDeleteMultiLabelEndpoint(bs),\n\t\tdecodeDeleteMultiLabelRequest,\n\t\tencodeResponse,\n\t\topts...,\n\t)\n\n\tr := mux.NewRouter()\n\n\tr.Handle(\"/baseing/v1/data/{id}\", getBaseDataHandler).Methods(\"GET\")\n\tr.Handle(\"/baseing/v1/data\", getAllBaseDataHandler).Methods(\"GET\")\n\tr.Handle(\"/baseing/v1/data\", addBaseDataHandler).Methods(\"POST\")\n\tr.Handle(\"/baseing/v1/data\", updateMultiBaseDataHandler).Methods(\"PUT\")\n\tr.Handle(\"/baseing/v1/data\", deleteMultiBaseDataHandler).Methods(\"DELETE\")\n\tr.Handle(\"/baseing/v1/data/label\", deleteMultiLabelHandler).Methods(\"DELETE\")\n\treturn r\n}", "func MakeHandler(csvc clients.Service, psvc policies.Service, mux *bone.Mux, logger logger.Logger) http.Handler {\n\topts := []kithttp.ServerOption{\n\t\tkithttp.ServerErrorEncoder(apiutil.LoggingErrorEncoder(logger, api.EncodeError)),\n\t}\n\tmux.Post(\"/connect\", kithttp.NewServer(\n\t\totelkit.EndpointMiddleware(otelkit.WithOperation(\"connect\"))(connectThingsEndpoint(psvc)),\n\t\tdecodeConnectList,\n\t\tapi.EncodeResponse,\n\t\topts...,\n\t))\n\n\tmux.Post(\"/disconnect\", kithttp.NewServer(\n\t\totelkit.EndpointMiddleware(otelkit.WithOperation(\"disconnect\"))(disconnectThingsEndpoint(psvc)),\n\t\tdecodeConnectList,\n\t\tapi.EncodeResponse,\n\t\topts...,\n\t))\n\n\tmux.Post(\"/channels/:chanID/things/:thingID\", kithttp.NewServer(\n\t\totelkit.EndpointMiddleware(otelkit.WithOperation(\"connect_thing\"))(connectEndpoint(psvc)),\n\t\tdecodeConnectThing,\n\t\tapi.EncodeResponse,\n\t\topts...,\n\t))\n\n\tmux.Delete(\"/channels/:chanID/things/:thingID\", kithttp.NewServer(\n\t\totelkit.EndpointMiddleware(otelkit.WithOperation(\"disconnect_thing\"))(disconnectEndpoint(psvc)),\n\t\tdecodeDisconnectThing,\n\t\tapi.EncodeResponse,\n\t\topts...,\n\t))\n\n\tmux.Post(\"/identify\", kithttp.NewServer(\n\t\totelkit.EndpointMiddleware(otelkit.WithOperation(\"identify\"))(identifyEndpoint(csvc)),\n\t\tdecodeIdentify,\n\t\tapi.EncodeResponse,\n\t\topts...,\n\t))\n\n\tmux.Put(\"/things/policies\", kithttp.NewServer(\n\t\totelkit.EndpointMiddleware(otelkit.WithOperation(\"update_policy\"))(updatePolicyEndpoint(psvc)),\n\t\tdecodeUpdatePolicy,\n\t\tapi.EncodeResponse,\n\t\topts...,\n\t))\n\n\tmux.Get(\"/things/policies\", kithttp.NewServer(\n\t\totelkit.EndpointMiddleware(otelkit.WithOperation(\"list_policies\"))(listPoliciesEndpoint(psvc)),\n\t\tdecodeListPolicies,\n\t\tapi.EncodeResponse,\n\t\topts...,\n\t))\n\n\tmux.Post(\"/channels/:chanID/access\", kithttp.NewServer(\n\t\totelkit.EndpointMiddleware(otelkit.WithOperation(\"authorize\"))(authorizeEndpoint(psvc)),\n\t\tdecodeCanAccess,\n\t\tapi.EncodeResponse,\n\t\topts...,\n\t))\n\treturn mux\n\n}", "func MakeHandler(fn func(http.ResponseWriter, *http.Request)) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\n\t\tif attmpt := apiAttempt(w, r); !attmpt {\n\t\t\tfn(w, r)\n\t\t}\n\n\t}\n}", "func makeHandler(server *ServerContext, privs handlerPrivs, method handlerMethod) http.Handler {\n\treturn http.HandlerFunc(func(r http.ResponseWriter, rq *http.Request) {\n\t\th := newHandler(server, privs, r, rq)\n\t\terr := h.invoke(method)\n\t\th.writeError(err)\n\t\th.logDuration(true) \n\t})\n}", "func MakeHTTPHandler(s Service) http.Handler {\n\tr := chi.NewRouter()\n\n\tListPostsHandler := kithttp.NewServer(\n\t\tmakeListPostsEndpoint(s),\n\t\tlistPostsRequestDecoder,\n\t\tresthttp.EncodeJSONResponse,\n\t)\n\tr.Method(http.MethodGet, \"/\", ListPostsHandler)\n\n\tGetPostHandler := kithttp.NewServer(\n\t\tmakeGetPostEndpoint(s),\n\t\tgetPostRequestDecoder,\n\t\tresthttp.EncodeJSONResponse,\n\t\t[]kithttp.ServerOption{\n\t\t\tkithttp.ServerErrorEncoder(resthttp.EncodeError),\n\t\t}...,\n\t)\n\tr.Method(http.MethodGet, \"/{id}\", GetPostHandler)\n\n\treturn r\n}", "func MakeHandler(s Service, logger kitlog.Logger) http.Handler {\n opts := []kithttp.ServerOption{\n kithttp.ServerErrorLogger(logger),\n kithttp.ServerErrorEncoder(encodeError),\n }\n\n\te := MakeServerEndpoints(s)\n\n tickersHandler := kithttp.NewServer(\n e.TickersEndpoint,\n decodeTickerRequest,\n kithttp.EncodeJSONResponse,\n opts...,\n )\n\n r := mux.NewRouter()\n\n r.Handle(\"/ex/v1/tickers/{base}\", tickersHandler).Methods(\"GET\")\n /*\n getTickersHandler := kithttp.NewServer(\n makeGetTickersEndpoint(es),\n decodeEmptyRequest,\n kithttp.EncodeJSONResponse,\n opts...,\n )\n */\n\n //r.Handle(\"/pub/ex/v1/getTicker/{pair}\", getTickerHandler).Methods(\"GET\")\n //r.Handle(\"/pub/ex/v1/getTickers\", getTickersHandler).Methods(\"GET\")\n\n\treturn r\n}", "func MakeHandler(svc Service, opts ...kithttp.ServerOption) http.Handler {\n\tr := mux.NewRouter()\n\tr.StrictSlash(true)\n\n\tr.Methods(\"GET\").Path(`/`).Name(\"ruleList\").Handler(\n\t\tkithttp.NewServer(\n\t\t\tlistEndpoint(svc),\n\t\t\tdecodeListRequest,\n\t\t\tkithttp.EncodeJSONResponse,\n\t\t\topts...,\n\t\t),\n\t)\n\n\tr.Methods(\"GET\").Path(`/{id:[a-zA-Z0-9]+}`).Name(\"ruleGet\").Handler(\n\t\tkithttp.NewServer(\n\t\t\tgetEndpoint(svc),\n\t\t\tdecodeGetRequest,\n\t\t\tkithttp.EncodeJSONResponse,\n\t\t\tappend(\n\t\t\t\topts,\n\t\t\t\tkithttp.ServerBefore(extractMuxVars(varID)),\n\t\t\t)...,\n\t\t),\n\t)\n\n\tr.Methods(\"PUT\").Path(`/{id:[a-zA-Z0-9]+}/activate`).Name(\"ruleActivate\").Handler(\n\t\tkithttp.NewServer(\n\t\t\tactivateEndpoint(svc),\n\t\t\tdecodeActivateRequest,\n\t\t\tkithttp.EncodeJSONResponse,\n\t\t\tappend(\n\t\t\t\topts,\n\t\t\t\tkithttp.ServerBefore(extractMuxVars(varID)),\n\t\t\t)...,\n\t\t),\n\t)\n\n\tr.Methods(\"PUT\").Path(`/{id:[a-zA-Z0-9]+}/deactivate`).Name(\"ruleDeactivate\").Handler(\n\t\tkithttp.NewServer(\n\t\t\tdeactivateEndpoint(svc),\n\t\t\tdecodeDeactivateRequest,\n\t\t\tkithttp.EncodeJSONResponse,\n\t\t\tappend(\n\t\t\t\topts,\n\t\t\t\tkithttp.ServerBefore(extractMuxVars(varID)),\n\t\t\t)...,\n\t\t),\n\t)\n\n\tr.Methods(\"PUT\").Path(`/{id:[a-zA-Z0-9]+}/rollout`).Name(\"ruleUpdateRollout\").Handler(\n\t\tkithttp.NewServer(\n\t\t\tupdateRolloutEndpoint(svc),\n\t\t\tdecodeUpdateRolloutRequest,\n\t\t\tkithttp.EncodeJSONResponse,\n\t\t\tappend(\n\t\t\t\topts,\n\t\t\t\tkithttp.ServerBefore(extractMuxVars(varID)),\n\t\t\t)...,\n\t\t),\n\t)\n\n\treturn r\n}", "func CreateHTTPAPIHandler(iManager integration.IntegrationManager, cManager clientapi.DevOpsClientManager,\n\tsManager settings.SettingsManager,\n\tsbManager systembanner.SystemBannerManager,\n\ttpManager thirdpartyapi.ThirdPartyManager) (http.Handler, error) {\n\n\tmw := NewLicenseMiddlewareFactory(false)\n\tapiHandler := APIHandler{iManager: iManager, cManager: cManager, sManager: &sManager, groupValidator: mw}\n\twsContainer := restful.NewContainer()\n\twsContainer.EnableContentEncoding(true)\n\n\tapiV1Ws := new(restful.WebService)\n\n\tInstallFilters(apiV1Ws, cManager, mw)\n\n\tapiV1Ws.Path(\"/api/v1\").\n\t\tConsumes(restful.MIME_JSON).\n\t\tProduces(restful.MIME_JSON).\n\t\tParam(restful.HeaderParameter(\"Authorization\", \"Given Bearer token will use this as authorization for the API\"))\n\n\twsContainer.Add(apiV1Ws)\n\n\tapiV2Ws := new(restful.WebService)\n\tapiV2Ws.Path(\"\").\n\t\tConsumes(restful.MIME_JSON).\n\t\tProduces(restful.MIME_JSON).\n\t\tParam(restful.HeaderParameter(\"Authorization\", \"Given Bearer token will use this as authorization for the API\"))\n\twsContainer.Add(apiV2Ws)\n\n\tintegrationHandler := integration.NewIntegrationHandler(iManager)\n\tintegrationHandler.Install(apiV1Ws)\n\tintegrationHandler.Install(apiV2Ws)\n\n\tsettingsHandler := settings.NewSettingsHandler(sManager)\n\tsettingsHandler.Install(apiV1Ws)\n\tsettingsHandler.Install(apiV2Ws)\n\n\tsystemBannerHandler := systembanner.NewSystemBannerHandler(sbManager)\n\tsystemBannerHandler.Install(apiV1Ws)\n\tsystemBannerHandler.Install(apiV2Ws)\n\n\tthirPartyHandler := thirdparty.NewThirdPartyHandler(&sManager, cManager, tpManager)\n\tthirPartyHandler.Install(apiV1Ws)\n\tthirPartyHandler.Install(apiV2Ws)\n\n\tconfigurationHandler := thandler.NewAPIHandler(\"configuration\")\n\tconfigurationHandler.Install(apiV1Ws)\n\tconfigurationHandler.Install(apiV2Ws)\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/namespaces\").\n\t\t\tTo(apiHandler.handleGetNamespaces).\n\t\t\tWrites(ns.NamespaceList{}).\n\t\t\tDoc(\"get namespaces list\").\n\t\t\tReturns(200, \"OK\", ns.NamespaceList{}))\n\n\tapiV2Ws.Route(\n\t\tapiV2Ws.GET(\"/apis/v1/projects/{name}/clusters/{cluster}/namespaces\").\n\t\t\tTo(apiHandler.handleNewGetNamespaces).\n\t\t\tWrites(v1.NamespaceList{}).\n\t\t\tDoc(\"new get project list\").\n\t\t\tReturns(200, \"OK\", v1.NamespaceList{}))\n\n\tapiV2Ws.Route(\n\t\tapiV2Ws.GET(\"/project/v1/projects/{name}/clusters/{cluster}/namespaces\").\n\t\t\tTo(apiHandler.handleNewGetNamespaces).\n\t\t\tWrites(v1.NamespaceList{}).\n\t\t\tDoc(\"new get project list\").\n\t\t\tReturns(200, \"OK\", v1.NamespaceList{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/appdeployment\").\n\t\t\tTo(apiHandler.handleDeploy).\n\t\t\tReads(deployment.AppDeploymentSpec{}).\n\t\t\tWrites(deployment.AppDeploymentSpec{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/configuration\").\n\t\t\tTo(apiHandler.handleGetPlatformConfiguration).\n\t\t\tWrites(configmap.ConfigMapDetail{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/cani\").\n\t\t\tTo(apiHandler.handleCanI).\n\t\t\tReads(authv1.SelfSubjectAccessReviewSpec{}).\n\t\t\tWrites(common.CanIResponse{}).\n\t\t\tDoc(\"Validates access for user\").\n\t\t\tReturns(200, \"OK\", common.CanIResponse{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/caniadmin\").\n\t\t\tTo(apiHandler.handleCanIAdmin).\n\t\t\tWrites(common.CanIResponse{}).\n\t\t\tDoc(\"Validates access for admin user\").\n\t\t\tReturns(200, \"OK\", common.CanIResponse{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/apis\").\n\t\t\tTo(apiHandler.handleGetAPIGroups).\n\t\t\tWrites(metav1.APIGroupList{}).\n\t\t\tDoc(\"Fetches a list of API groups available\").\n\t\t\tReturns(200, \"OK\", metav1.APIGroupList{}))\n\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.POST(\"/appdeployment/validate/name\").\n\t//\t\tTo(apiHandler.handleNameValidity).\n\t//\t\tReads(validation.AppNameAppNameValiditySpecValiditySpec{}).\n\t//\t\tWrites(validation.AppNameValidity{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.POST(\"/appdeployment/validate/imagereference\").\n\t//\t\tTo(apiHandler.handleImageReferenceValidity).\n\t//\t\tReads(validation.ImageReferenceValiditySpec{}).\n\t//\t\tWrites(validation.ImageReferenceValidity{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.POST(\"/appdeployment/validate/protocol\").\n\t//\t\tTo(apiHandler.handleProtocolValidity).\n\t//\t\tReads(validation.ProtocolValiditySpec{}).\n\t//\t\tWrites(validation.ProtocolValidity{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/appdeployment/protocols\").\n\t//\t\tTo(apiHandler.handleGetAvailableProcotols).\n\t//\t\tWrites(deployment.Protocols{}))\n\t//\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.POST(\"/appdeploymentfromfile\").\n\t//\t\tTo(apiHandler.handleDeployFromFile).\n\t//\t\tReads(deployment.AppDeploymentFromFileSpec{}).\n\t//\t\tWrites(deployment.AppDeploymentFromFileResponse{}))\n\t//\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/replicationcontroller\").\n\t//\t\tTo(apiHandler.handleGetReplicationControllerList).\n\t//\t\tWrites(replicationcontroller.ReplicationControllerList{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/replicationcontroller/{namespace}\").\n\t//\t\tTo(apiHandler.handleGetReplicationControllerList).\n\t//\t\tWrites(replicationcontroller.ReplicationControllerList{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/replicationcontroller/{namespace}/{replicationController}\").\n\t//\t\tTo(apiHandler.handleGetReplicationControllerDetail).\n\t//\t\tWrites(replicationcontroller.ReplicationControllerDetail{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.POST(\"/replicationcontroller/{namespace}/{replicationController}/update/pod\").\n\t//\t\tTo(apiHandler.handleUpdateReplicasCount).\n\t//\t\tReads(replicationcontroller.ReplicationControllerSpec{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/replicationcontroller/{namespace}/{replicationController}/pod\").\n\t//\t\tTo(apiHandler.handleGetReplicationControllerPods).\n\t//\t\tWrites(pod.PodList{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/replicationcontroller/{namespace}/{replicationController}/event\").\n\t//\t\tTo(apiHandler.handleGetReplicationControllerEvents).\n\t//\t\tWrites(common.EventList{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/replicationcontroller/{namespace}/{replicationController}/service\").\n\t//\t\tTo(apiHandler.handleGetReplicationControllerServices).\n\t//\t\tWrites(resourceService.ServiceList{}))\n\t//\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/workload\").\n\t//\t\tTo(apiHandler.handleGetWorkloads).\n\t//\t\tWrites(workload.Workloads{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/workload/{namespace}\").\n\t//\t\tTo(apiHandler.handleGetWorkloads).\n\t//\t\tWrites(workload.Workloads{}))\n\t//\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/cluster\").\n\t//\t\tTo(apiHandler.handleGetCluster).\n\t//\t\tWrites(cluster.Cluster{}))\n\t//\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/discovery\").\n\t//\t\tTo(apiHandler.handleGetDiscovery).\n\t//\t\tWrites(discovery.Discovery{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/discovery/{namespace}\").\n\t//\t\tTo(apiHandler.handleGetDiscovery).\n\t//\t\tWrites(discovery.Discovery{}))\n\t//\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/config\").\n\t//\t\tTo(apiHandler.handleGetConfig).\n\t//\t\tWrites(config.Config{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/config/{namespace}\").\n\t//\t\tTo(apiHandler.handleGetConfig).\n\t//\t\tWrites(config.Config{}))\n\t//\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/replicaset\").\n\t//\t\tTo(apiHandler.handleGetReplicaSets).\n\t//\t\tWrites(replicaset.ReplicaSetList{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/replicaset/{namespace}\").\n\t//\t\tTo(apiHandler.handleGetReplicaSets).\n\t//\t\tWrites(replicaset.ReplicaSetList{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/replicaset/{namespace}/{replicaSet}\").\n\t//\t\tTo(apiHandler.handleGetReplicaSetDetail).\n\t//\t\tWrites(replicaset.ReplicaSetDetail{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/replicaset/{namespace}/{replicaSet}/pod\").\n\t//\t\tTo(apiHandler.handleGetReplicaSetPods).\n\t//\t\tWrites(pod.PodList{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/replicaset/{namespace}/{replicaSet}/event\").\n\t//\t\tTo(apiHandler.handleGetReplicaSetEvents).\n\t//\t\tWrites(common.EventList{}))\n\t//\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/pod\").\n\t//\t\tTo(apiHandler.handleGetPods).\n\t//\t\tWrites(pod.PodList{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/pod/{namespace}\").\n\t//\t\tTo(apiHandler.handleGetPods).\n\t//\t\tWrites(pod.PodList{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/pod/{namespace}/{pod}\").\n\t//\t\tTo(apiHandler.handleGetPodDetail).\n\t//\t\tWrites(pod.PodDetail{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/pod/{namespace}/{pod}/container\").\n\t\t\tTo(apiHandler.handleGetPodContainers).\n\t\t\tWrites(pod.PodDetail{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/pod/{namespace}/{pod}/event\").\n\t//\t\tTo(apiHandler.handleGetPodEvents).\n\t//\t\tWrites(common.EventList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/pod/{namespace}/{pod}/shell/{container}\").\n\t\t\tTo(apiHandler.handleExecShell).\n\t\t\tWrites(TerminalResponse{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/pod/{namespace}/{pod}/persistentvolumeclaim\").\n\t//\t\tTo(apiHandler.handleGetPodPersistentVolumeClaims).\n\t//\t\tWrites(persistentvolumeclaim.PersistentVolumeClaimList{}))\n\t//\n\n\t// region Deployment\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/deployment\").\n\t\t\tTo(apiHandler.handleGetDeployments).\n\t\t\tWrites(deployment.DeploymentList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/deployment/{namespace}\").\n\t\t\tTo(apiHandler.handleGetDeployments).\n\t\t\tWrites(deployment.DeploymentList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/deployment/{namespace}/{deployment}\").\n\t\t\tTo(apiHandler.handleGetDeploymentDetail).\n\t\t\tWrites(deployment.DeploymentDetail{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/deployment/{namespace}/{deployment}\").\n\t\t\tTo(apiHandler.handleUpdateDeploymentDetail).\n\t\t\tWrites(appsv1.Deployment{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/deployment/{namespace}/{deployment}/start\").\n\t\t\tTo(apiHandler.handleStartStopDeployment).\n\t\t\tDoc(\"start deployment\").\n\t\t\tReturns(http.StatusNoContent, \"OK\", struct{}{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/deployment/{namespace}/{deployment}/stop\").\n\t\t\tTo(apiHandler.handleStartStopDeployment).\n\t\t\tDoc(\"stop deployment\").\n\t\t\tReturns(http.StatusNoContent, \"OK\", struct{}{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/deployment/{namespace}/{deployment}/yaml\").\n\t\t\tTo(apiHandler.handleUpdateDeploymentDetailYaml).\n\t\t\tWrites(appsv1.Deployment{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/deployment/{namespace}/{deployment}/replicas\").\n\t\t\tTo(apiHandler.handleUpdateDeploymentReplicas).\n\t\t\tWrites(deployment.DeploymentReplica{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/deployment/{namespace}/{deployment}/network\").\n\t\t\tTo(apiHandler.handleUpdateDeploymentNetwork).\n\t\t\tWrites(appsv1.Deployment{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/deployment/{namespace}/{deployment}/container/{container}/\").\n\t\t\tTo(apiHandler.handlePutDeploymentContainer).\n\t\t\tWrites(appsv1.Deployment{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/deployment/{namespace}/{deployment}/container/{container}/image\").\n\t\t\tTo(apiHandler.handleUpdateDeploymentContainerImage).\n\t\t\tWrites(appsv1.Deployment{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/deployment/{namespace}/{deployment}/container/{container}/resources\").\n\t\t\tTo(apiHandler.handleUpdateDeploymentContainerResources).\n\t\t\tWrites(appsv1.Deployment{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/deployment/{namespace}/{deployment}/container/{container}/env\").\n\t\t\tTo(apiHandler.handleUpdateDeploymentContainerEnv).\n\t\t\tWrites(appsv1.Deployment{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/deployment/{namespace}/{deployment}/container/{container}/volumeMount/\").\n\t\t\tTo(apiHandler.handleCreateDeploymentVolumeMount).\n\t\t\tWrites(appsv1.Deployment{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/deployment/{namespace}/{deployment}/event\").\n\t\t\tTo(apiHandler.handleGetDeploymentEvents).\n\t\t\tWrites(common.EventList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/deployment/{namespace}/{deployment}/pods\").\n\t\t\tTo(apiHandler.handleGetDeploymentPods).\n\t\t\tWrites(pod.PodList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/deployment/{namespace}/{deployment}/oldreplicaset\").\n\t\t\tTo(apiHandler.handleGetDeploymentOldReplicaSets).\n\t\t\tWrites(replicaset.ReplicaSetList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/deployment/{namespace}/{deployment}/actions/rollback\").\n\t\t\tTo(apiHandler.handleRollBackDeploymentToRevision).\n\t\t\tReads(common.RevisionDetail{}).\n\t\t\tWrites(appsv1.Deployment{}).\n\t\t\tDoc(\"rollback deployment to special revision\").\n\t\t\tReturns(200, \"OK\", appsv1.Deployment{}))\n\n\t// endregion\n\n\t// region Scale\n\t//\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.PUT(\"/scale/{kind}/{namespace}/{name}/\").\n\t//\t\tTo(apiHandler.handleScaleResource).\n\t//\t\tWrites(scaling.ReplicaCounts{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/scale/{kind}/{namespace}/{name}\").\n\t//\t\tTo(apiHandler.handleGetReplicaCount).\n\t//\t\tWrites(scaling.ReplicaCounts{}))\n\t// endregion\n\n\t// region Deamonset\n\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/daemonset\").\n\t//\t\tTo(apiHandler.handleGetDaemonSetList).\n\t//\t\tWrites(daemonset.DaemonSetList{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/daemonset/{namespace}\").\n\t//\t\tTo(apiHandler.handleGetDaemonSetList).\n\t//\t\tWrites(daemonset.DaemonSetList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/daemonset/{namespace}/{daemonset}\").\n\t\t\tTo(apiHandler.handleGetDaemonSetDetail).\n\t\t\tWrites(appsv1.DaemonSet{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/daemonset/{namespace}/{daemonset}\").\n\t\t\tTo(apiHandler.handleUpdateDaemonSetDetail).\n\t\t\tWrites(appsv1.DaemonSet{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/daemonset/{namespace}/{daemonset}/yaml\").\n\t\t\tTo(apiHandler.handleUpdateDaemonSetDetail).\n\t\t\tWrites(appsv1.Deployment{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/daemonset/{namespace}/{daemonset}/pods\").\n\t\t\tTo(apiHandler.handleGetDaemonSetPods).\n\t\t\tWrites(pod.PodList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/daemonset/{namespace}/{daemonset}/container/{container}/\").\n\t\t\tTo(apiHandler.handlePutDaemonSetContainer).\n\t\t\tWrites(appsv1.DaemonSet{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/daemonset/{namespace}/{daemonset}/container/{container}/image\").\n\t\t\tTo(apiHandler.handleUpdateDaemonSetContainerImage).\n\t\t\tWrites(appsv1.DaemonSet{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/daemonset/{namespace}/{daemonset}/container/{container}/env\").\n\t\t\tTo(apiHandler.handleUpdateDaemonSetContainerEnv).\n\t\t\tWrites(appsv1.DaemonSet{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/daemonset/{namespace}/{daemonset}/container/{container}/resources\").\n\t\t\tTo(apiHandler.handleUpdateDaemonSetContainerResource).\n\t\t\tWrites(appsv1.DaemonSet{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/daemonset/{namespace}/{daemonset}/container/{container}/volumeMount/\").\n\t\t\tTo(apiHandler.handleCreateDaemonSetVolumeMount).\n\t\t\tWrites(appsv1.DaemonSet{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/daemonset/{namespace}/{daemonSet}/service\").\n\t//\t\tTo(apiHandler.handleGetDaemonSetServices).\n\t//\t\tWrites(resourceService.ServiceList{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/daemonset/{namespace}/{daemonSet}/event\").\n\t//\t\tTo(apiHandler.handleGetDaemonSetEvents).\n\t//\t\tWrites(common.EventList{}))\n\n\t// endregion\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/horizontalpodautoscaler\").\n\t\t\tTo(apiHandler.handleGetHorizontalPodAutoscalerList).\n\t\t\tWrites(horizontalpodautoscaler.HorizontalPodAutoscalerList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/horizontalpodautoscaler/{namespace}\").\n\t\t\tTo(apiHandler.handleGetHorizontalPodAutoscalerList).\n\t\t\tWrites(horizontalpodautoscaler.HorizontalPodAutoscalerList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/horizontalpodautoscaler/{namespace}/{horizontalpodautoscaler}\").\n\t\t\tTo(apiHandler.handleGetHorizontalPodAutoscalerDetail).\n\t\t\tWrites(horizontalpodautoscaler.HorizontalPodAutoscalerDetail{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/horizontalpodautoscaler/{namespace}\").\n\t\t\tTo(apiHandler.handleCreateHorizontalPodAutoscaler).\n\t\t\tReads(horizontalpodautoscaler.HorizontalPodAutoscalerDetail{}).\n\t\t\tWrites(horizontalpodautoscaler.HorizontalPodAutoscalerDetail{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/horizontalpodautoscaler/{namespace}/{horizontalpodautoscaler}\").\n\t\t\tTo(apiHandler.handleUpdateHorizontalPodAutoscaler).\n\t\t\tWrites(horizontalpodautoscaler.HorizontalPodAutoscalerDetail{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.DELETE(\"/horizontalpodautoscaler/{namespace}/{horizontalpodautoscaler}\").\n\t\t\tTo(apiHandler.handleDeleteHorizontalPodAutoscaler).\n\t\t\tWrites(horizontalpodautoscaler.HorizontalPodAutoscalerDetail{}))\n\t//\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/job\").\n\t//\t\tTo(apiHandler.handleGetJobList).\n\t//\t\tWrites(job.JobList{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/job/{namespace}\").\n\t//\t\tTo(apiHandler.handleGetJobList).\n\t//\t\tWrites(job.JobList{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/job/{namespace}/{name}\").\n\t//\t\tTo(apiHandler.handleGetJobDetail).\n\t//\t\tWrites(job.JobDetail{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/job/{namespace}/{name}/pod\").\n\t//\t\tTo(apiHandler.handleGetJobPods).\n\t//\t\tWrites(pod.PodList{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/job/{namespace}/{name}/event\").\n\t//\t\tTo(apiHandler.handleGetJobEvents).\n\t//\t\tWrites(common.EventList{}))\n\t//\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/cronjob\").\n\t//\t\tTo(apiHandler.handleGetCronJobList).\n\t//\t\tWrites(cronjob.CronJobList{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/cronjob/{namespace}\").\n\t//\t\tTo(apiHandler.handleGetCronJobList).\n\t//\t\tWrites(cronjob.CronJobList{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/cronjob/{namespace}/{name}\").\n\t//\t\tTo(apiHandler.handleGetCronJobDetail).\n\t//\t\tWrites(cronjob.CronJobDetail{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/cronjob/{namespace}/{name}/job\").\n\t//\t\tTo(apiHandler.handleGetCronJobJobs).\n\t//\t\tWrites(job.JobList{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/cronjob/{namespace}/{name}/event\").\n\t//\t\tTo(apiHandler.handleGetCronJobEvents).\n\t//\t\tWrites(common.EventList{}))\n\t//\n\n\t// region Namespace\n\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.POST(\"/namespace\").\n\t//\t\tTo(apiHandler.handleCreateNamespace).\n\t//\t\tReads(ns.NamespaceSpec{}).\n\t//\t\tWrites(ns.NamespaceSpec{}))\n\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/namespace/{name}\").\n\t//\t\tTo(apiHandler.handleGetNamespaceDetail).\n\t//\t\tWrites(ns.NamespaceDetail{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/namespace/{name}/event\").\n\t//\t\tTo(apiHandler.handleGetNamespaceEvents).\n\t//\t\tWrites(common.EventList{}))\n\t//\n\t// endregion\n\n\t// region Secret\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/secret\").\n\t\t\tTo(apiHandler.handleGetSecretList).\n\t\t\tWrites(secret.SecretList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/secret/{namespace}\").\n\t\t\tTo(apiHandler.handleGetSecretList).\n\t\t\tWrites(secret.SecretList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/secret/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleGetSecretDetail).\n\t\t\tWrites(secret.SecretDetail{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/secret/{name}/resources\").\n\t\t\tTo(apiHandler.handleGetSecretRelatedResources).\n\t\t\tWrites(secret.ResourceList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/secret/{namespace}/{name}/resources\").\n\t\t\tTo(apiHandler.handleGetSecretRelatedResources).\n\t\t\tWrites(secret.ResourceList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/secret/{name}\").\n\t\t\tTo(apiHandler.handleUpdateSecret).\n\t\t\tWrites(secret.SecretDetail{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/secret/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleUpdateSecret).\n\t\t\tWrites(secret.SecretDetail{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/secret\").\n\t\t\tTo(apiHandler.handleCreateSecret).\n\t\t\tReads(secret.SecretDetail{}).\n\t\t\tWrites(secret.Secret{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/secret/{namespace}\").\n\t\t\tTo(apiHandler.handleCreateSecret).\n\t\t\tReads(secret.SecretDetail{}).\n\t\t\tWrites(secret.Secret{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.DELETE(\"/secret/{name}\").\n\t\t\tTo(apiHandler.handleDeleteSecret).\n\t\t\tWrites(secret.SecretDetail{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.DELETE(\"/secret/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleDeleteSecret).\n\t\t\tWrites(secret.SecretDetail{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/secret/{namespace}/{name}/actions/tradeapp\").\n\t\t\tTo(apiHandler.handleUpdateSecretBelongApp).\n\t\t\tReads(common.AppNameDetail{}).\n\t\t\tWrites(secret.SecretDetail{}).\n\t\t\tDoc(\"update secret belongs app\").\n\t\t\tReturns(200, \"OK\", secret.SecretDetail{}))\n\t// endregion\n\n\t// region Configmap\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/configmap\").\n\t\t\tTo(apiHandler.handleGetConfigMapList).\n\t\t\tWrites(configmap.ConfigMapList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/configmap/{namespace}\").\n\t\t\tTo(apiHandler.handleGetConfigMapList).\n\t\t\tWrites(configmap.ConfigMapList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/configmap/{namespace}/{configmap}\").\n\t\t\tTo(apiHandler.handleGetConfigMapDetail).\n\t\t\tWrites(configmap.ConfigMapDetail{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/configmap/{namespace}\").\n\t\t\tTo(apiHandler.handleCreateConfigMap).\n\t\t\tReads(configmap.ConfigMapDetail{}).\n\t\t\tWrites(configmap.ConfigMapDetail{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/configmap/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleUpdateConfigMap).\n\t\t\tWrites(configmap.ConfigMapDetail{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.DELETE(\"/configmap/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleDeleteConfigMap).\n\t\t\tWrites(configmap.ConfigMapDetail{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/configmap/{namespace}/{name}/actions/tradeapp\").\n\t\t\tTo(apiHandler.handleUpdateConfigMapBelongApp).\n\t\t\tReads(common.AppNameDetail{}).\n\t\t\tWrites(configmap.ConfigMapDetail{}).\n\t\t\tDoc(\"update configmap belongs app\").\n\t\t\tReturns(200, \"OK\", configmap.ConfigMapDetail{}))\n\t// endregion\n\n\t//\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/service\").\n\t//\t\tTo(apiHandler.handleGetServiceList).\n\t//\t\tWrites(resourceService.ServiceList{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/service/{namespace}\").\n\t//\t\tTo(apiHandler.handleGetServiceList).\n\t//\t\tWrites(resourceService.ServiceList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/service/{namespace}/{service}\").\n\t\t\tTo(apiHandler.handleGetServiceDetail))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/service/{namespace}/{service}/pod\").\n\t//\t\tTo(apiHandler.handleGetServicePods).\n\t//\t\tWrites(pod.PodList{}))\n\t//\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/ingress\").\n\t//\t\tTo(apiHandler.handleGetIngressList).\n\t//\t\tWrites(ingress.IngressList{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/ingress/{namespace}\").\n\t//\t\tTo(apiHandler.handleGetIngressList).\n\t//\t\tWrites(ingress.IngressList{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/ingress/{namespace}/{name}\").\n\t//\t\tTo(apiHandler.handleGetIngressDetail).\n\t//\t\tWrites(ingress.IngressDetail{}))\n\t//\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/statefulset\").\n\t//\t\tTo(apiHandler.handleGetStatefulSetList).\n\t//\t\tWrites(statefulset.StatefulSetList{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/statefulset/{namespace}\").\n\t//\t\tTo(apiHandler.handleGetStatefulSetList).\n\t//\t\tWrites(statefulset.StatefulSetList{}))\n\n\t// region Statefulset\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/statefulset/{namespace}/{statefulset}\").\n\t\t\tTo(apiHandler.handleGetStatefulSetDetail).\n\t\t\tWrites(appsv1.StatefulSet{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/statefulset/{namespace}/{statefulset}\").\n\t\t\tTo(apiHandler.handleUpdateStatefulSetDetail).\n\t\t\tWrites(appsv1.StatefulSet{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/statefulset/{namespace}/{statefulset}/start\").\n\t\t\tTo(apiHandler.handleStartStopStatefulSet).\n\t\t\tDoc(\"start statefulset\").\n\t\t\tReturns(http.StatusNoContent, \"OK\", struct{}{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/statefulset/{namespace}/{statefulset}/stop\").\n\t\t\tTo(apiHandler.handleStartStopStatefulSet).\n\t\t\tDoc(\"stop statefulset\").\n\t\t\tReturns(http.StatusNoContent, \"OK\", struct{}{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/statefulset/{namespace}/{statefulset}/yaml\").\n\t\t\tTo(apiHandler.handleUpdateStatefulSetDetail).\n\t\t\tWrites(appsv1.StatefulSet{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/statefulset/{namespace}/{statefulset}/replicas\").\n\t\t\tTo(apiHandler.handleUpdateStatefulSetReplicas).\n\t\t\tWrites(appsv1.StatefulSet{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/statefulset/{namespace}/{statefulset}/pods\").\n\t\t\tTo(apiHandler.handleGetStatefulSetPods).\n\t\t\tWrites(pod.PodList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/statefulset/{namespace}/{statefulset}/container/{container}/\").\n\t\t\tTo(apiHandler.handlePutStatefulSetContainer).\n\t\t\tWrites(appsv1.StatefulSet{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/statefulset/{namespace}/{statefulset}/container/{container}/image\").\n\t\t\tTo(apiHandler.handleUpdateStatefulSetContainerImage).\n\t\t\tWrites(appsv1.StatefulSet{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/statefulset/{namespace}/{statefulset}/container/{container}/env\").\n\t\t\tTo(apiHandler.handleUpdateStatefulSetContainerEnv).\n\t\t\tWrites(appsv1.StatefulSet{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/statefulset/{namespace}/{statefulset}/container/{container}/resources\").\n\t\t\tTo(apiHandler.handleUpdateStatefulSetContainerResource).\n\t\t\tWrites(appsv1.StatefulSet{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/statefulset/{namespace}/{statefulset}/container/{container}/volumeMount/\").\n\t\t\tTo(apiHandler.handleCreateStatefulSetVolumeMount).\n\t\t\tWrites(appsv1.StatefulSet{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/statefulset/{namespace}/{statefulset}/event\").\n\t//\t\tTo(apiHandler.handleGetStatefulSetEvents).\n\t//\t\tWrites(common.EventList{}))\n\n\t// endregion\n\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/node\").\n\t//\t\tTo(apiHandler.handleGetNodeList).\n\t//\t\tWrites(node.NodeList{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/node/{name}\").\n\t//\t\tTo(apiHandler.handleGetNodeDetail).\n\t//\t\tWrites(node.NodeDetail{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/node/{name}/event\").\n\t//\t\tTo(apiHandler.handleGetNodeEvents).\n\t//\t\tWrites(common.EventList{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/node/{name}/pod\").\n\t//\t\tTo(apiHandler.handleGetNodePods).\n\t//\t\tWrites(pod.PodList{}))\n\t//\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.DELETE(\"/_raw/{kind}/namespace/{namespace}/name/{name}\").\n\t//\t\tTo(apiHandler.handleDeleteResource))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/_raw/{kind}/namespace/{namespace}/name/{name}\").\n\t//\t\tTo(apiHandler.handleGetResource))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.PUT(\"/_raw/{kind}/namespace/{namespace}/name/{name}\").\n\t//\t\tTo(apiHandler.handlePutResource))\n\t//\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.DELETE(\"/_raw/{kind}/name/{name}\").\n\t//\t\tTo(apiHandler.handleDeleteResource))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/_raw/{kind}/name/{name}\").\n\t//\t\tTo(apiHandler.handleGetResource))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.PUT(\"/_raw/{kind}/name/{name}\").\n\t//\t\tTo(apiHandler.handlePutResource))\n\t//\n\n\t// region RBAC\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/rbac/role\").\n\t\t\tTo(apiHandler.handleGetRbacRoleList).\n\t\t\tWrites(rbacroles.RbacRoleList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/rbac/rolebinding\").\n\t\t\tTo(apiHandler.handleGetRbacRoleBindingList).\n\t\t\tWrites(rbacrolebindings.RbacRoleBindingList{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/rolebinding/{namespace}\").\n\t\t\tTo(apiHandler.handleListRoleBindingsOriginal).\n\t\t\tWrites(rolebinding.RoleBindingList{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/rolebinding/{namespace}\").\n\t\t\tTo(apiHandler.handleCreateRoleBinding).\n\t\t\tDoc(\"creates a rolebinding\").\n\t\t\tWrites(rbacv1.RoleBinding{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.DELETE(\"/rolebinding/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleDeleteRoleBindingsOriginal).\n\t\t\tDoc(\"delete a rolebinding\").\n\t\t\tWrites(rbacv1.RoleBinding{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/rbac/status\").\n\t//\t\tTo(apiHandler.handleRbacStatus).\n\t//\t\tWrites(validation.RbacStatus{}))\n\n\t// endregion\n\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/persistentvolume\").\n\t//\t\tTo(apiHandler.handleGetPersistentVolumeList).\n\t//\t\tWrites(persistentvolume.PersistentVolumeList{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/persistentvolume/{persistentvolume}\").\n\t//\t\tTo(apiHandler.handleGetPersistentVolumeDetail).\n\t//\t\tWrites(persistentvolume.PersistentVolumeDetail{}))\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/persistentvolume/namespace/{namespace}/name/{persistentvolume}\").\n\t//\t\tTo(apiHandler.handleGetPersistentVolumeDetail).\n\t//\t\tWrites(persistentvolume.PersistentVolumeDetail{}))\n\t//\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/persistentvolumeclaim/\").\n\t//\t\tTo(apiHandler.handleGetPersistentVolumeClaimList).\n\t//\t\tWrites(persistentvolumeclaim.PersistentVolumeClaimList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/persistentvolumeclaim/{namespace}\").\n\t\t\tTo(apiHandler.handleGetPersistentVolumeClaimList).\n\t\t\tWrites(persistentvolumeclaim.PersistentVolumeClaimList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/persistentvolumeclaim/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleGetPersistentVolumeClaimDetail).\n\t\t\tWrites(persistentvolumeclaim.PersistentVolumeClaimDetail{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/persistentvolumeclaim/{namespace}\").\n\t\t\tTo(apiHandler.handleCreatePersistentVolumeClaim).\n\t\t\tReads(persistentvolumeclaim.PersistentVolumeClaimDetail{}).\n\t\t\tWrites(persistentvolumeclaim.PersistentVolumeClaimDetail{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/persistentvolumeclaim/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleUpdatePersistentVolumeClaim).\n\t\t\tWrites(persistentvolumeclaim.PersistentVolumeClaimDetail{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.DELETE(\"/persistentvolumeclaim/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleDeletePersistentVolumeClaim).\n\t\t\tWrites(persistentvolumeclaim.PersistentVolumeClaimDetail{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/persistentvolumeclaim/{namespace}/{name}/actions/tradeapp\").\n\t\t\tTo(apiHandler.handleUpdatePersistentVolumeClaimBelongApp).\n\t\t\tReads(common.AppNameDetail{}).\n\t\t\tWrites(persistentvolumeclaim.PersistentVolumeClaimDetail{}).\n\t\t\tDoc(\"update persistentvolumeclaim belongs app\").\n\t\t\tReturns(200, \"OK\", persistentvolumeclaim.PersistentVolumeClaimDetail{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/storageclass\").\n\t\t\tTo(apiHandler.handleGetStorageClassList).\n\t\t\tWrites(storageclass.StorageClassList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/storageclass/{storageclass}\").\n\t\t\tTo(apiHandler.handleGetStorageClass).\n\t\t\tWrites(storageclass.StorageClass{}))\n\n\t// apiV1Ws.Route(\n\t// \tapiV1Ws.GET(\"/storageclass/{storageclass}/persistentvolume\").\n\t// \t\tTo(apiHandler.handleGetStorageClassPersistentVolumes).\n\t// \t\tWrites(persistentvolume.PersistentVolumeList{}))\n\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/log/source/{namespace}/{resourceName}/{resourceType}\").\n\t//\t\tTo(apiHandler.handleLogSource).\n\t//\t\tWrites(controller.LogSources{}))\n\n\t// region log\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/log/{namespace}/{pod}\").\n\t\t\tTo(apiHandler.handleLogs).\n\t\t\tWrites(logs.LogDetails{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/log/{namespace}/{pod}/{container}\").\n\t\t\tTo(apiHandler.handleLogs).\n\t\t\tWrites(logs.LogDetails{}))\n\t//\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/log/file/{namespace}/{pod}/{container}\").\n\t\t\tTo(apiHandler.handleLogFile).\n\t\t\tWrites(logs.LogDetails{}))\n\t// endregion\n\n\t//\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/overview/\").\n\t//\t\tTo(apiHandler.handleOverview).\n\t//\t\tWrites(overview.Overview{}))\n\t//\n\t//apiV1Ws.Route(\n\t//\tapiV1Ws.GET(\"/overview/{namespace}\").\n\t//\t\tTo(apiHandler.handleOverview).\n\t//\t\tWrites(overview.Overview{}))\n\t//\n\n\t// region others\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/others\").\n\t\t\tTo(apiHandler.handleOtherResourcesList).\n\t\t\tWrites(other.ResourceList{}).\n\t\t\tDoc(\"get all resources\").\n\t\t\tParam(restful.QueryParameter(\"filterBy\", \"filter option separated by comma. For example parameter1,value1,parameter2,value2 - means that the data should be filtered by parameter1 equals value1 and parameter2 equals value2\").\n\t\t\t\tDataType(\"string\").\n\t\t\t\tAllowableValues(map[string]string{\n\t\t\t\t\t\"name\": \"search by name partial match\",\n\t\t\t\t\t\"namespace\": \"filter by namespace\",\n\t\t\t\t\t\"kind\": \"filter by kind\",\n\t\t\t\t\t\"scope\": \"allowed value `namespaced` and `clustered` filter by if a resource is namespaced\",\n\t\t\t\t})).\n\t\t\tParam(restful.QueryParameter(\"sortBy\", \"sort option separated by comma. For example a,parameter1,d,parameter2 - means that the data should be sorted by parameter1 (ascending) and later sort by parameter2 (descending)\").\n\t\t\t\tDataType(\"string\").\n\t\t\t\tAllowableValues(map[string]string{\n\t\t\t\t\t\"name\": \"\",\n\t\t\t\t\t\"namespace\": \"\",\n\t\t\t\t\t\"kind\": \"\",\n\t\t\t\t\t\"creationTimestamp\": \"\",\n\t\t\t\t})).\n\t\t\tParam(restful.QueryParameter(\"itemsPerPage\", \"items per page\").\n\t\t\t\tDataType(\"integer\")).\n\t\t\tParam(restful.QueryParameter(\"page\", \"page number\").DataType(\"integer\")).\n\t\t\tReturns(200, \"OK\", other.ResourceList{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/others\").\n\t\t\tTo(apiHandler.handleOtherResourceCreate).\n\t\t\tDoc(\"create a resource\").\n\t\t\tReads([]unstructured.Unstructured{}).\n\t\t\tConsumes(restful.MIME_JSON).\n\t\t\tReturns(200, \"OK\", CreateResponse{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/releases\").\n\t\t\tTo(apiHandler.handleReleaseCreate).\n\t\t\tDoc(\"create a release\").\n\t\t\tReads([]unstructured.Unstructured{}).\n\t\t\tConsumes(restful.MIME_JSON).\n\t\t\tReturns(200, \"OK\", []unstructured.Unstructured{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/releases/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleGetReleaseDetail).\n\t\t\tDoc(\"get a release\").\n\t\t\tReads(release.ReleaseDetails{}).\n\t\t\tConsumes(restful.MIME_JSON).\n\t\t\tReturns(200, \"OK\", release.ReleaseDetails{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/others/{group}/{version}/{kind}/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleOtherResourceDetail).\n\t\t\tWrites(other.OtherResourceDetail{}).\n\t\t\tDoc(\"get a resource detail with events\").\n\t\t\tReturns(200, \"OK\", other.OtherResourceDetail{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.DELETE(\"/others/{group}/{version}/{kind}/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleOtherResourceDetail).\n\t\t\tDoc(\"delete a resource\"))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/others/{group}/{version}/{kind}/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleOtherResourceDetail).\n\t\t\tDoc(\"update a resource with whole resource json\").\n\t\t\tReads(unstructured.Unstructured{}).\n\t\t\tConsumes(restful.MIME_JSON))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PATCH(\"/others/{group}/{version}/{kind}/{namespace}/{name}/{field}\").\n\t\t\tTo(apiHandler.handleOtherResourcePatch).\n\t\t\tDoc(\"update resource annotations or labels\").\n\t\t\tReads(other.FieldPayload{}).\n\t\t\tConsumes(restful.MIME_JSON))\n\t// endregion\n\n\t// ---- DEVOPS APIS ----\n\n\t// region Jenkins\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/jenkinses\").\n\t\t\tTo(apiHandler.handleGetJenkins).\n\t\t\tWrites(jenkins.JenkinsList{}).\n\t\t\tDoc(\"get jenkins list\").\n\t\t\tReturns(200, \"OK\", jenkins.JenkinsList{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/jenkinses/{name}\").\n\t\t\tTo(apiHandler.handleRetriveJenkins).\n\t\t\tWrites(v1alpha1.Jenkins{}).\n\t\t\tDoc(\"retrieve jenkins config\").\n\t\t\tReturns(200, \"OK\", v1alpha1.Jenkins{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/jenkinses/{name}/resources\").\n\t\t\tTo(apiHandler.handleGetJenkinsResources).\n\t\t\tWrites(common.ResourceList{}).\n\t\t\tDoc(\"retrieve resources associated with jenkins\").\n\t\t\tReturns(200, \"OK\", common.ResourceList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.DELETE(\"/jenkinses/{name}\").\n\t\t\tTo(apiHandler.handleDeleteJenkins).\n\t\t\tWrites(jenkins.Jenkins{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/jenkinses/{name}\").\n\t\t\tTo(apiHandler.handlePutJenkins).\n\t\t\tWrites(v1alpha1.Jenkins{}).\n\t\t\tDoc(\"update jenkins config\").\n\t\t\tReturns(200, \"OK\", v1alpha1.Jenkins{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/jenkinses\").\n\t\t\tTo(apiHandler.handleCreateJenkins).\n\t\t\tWrites(v1alpha1.Jenkins{}).\n\t\t\tDoc(\"update jenkins config\").\n\t\t\tReturns(200, \"OK\", v1alpha1.Jenkins{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/jenkinsbinding\").\n\t\t\tTo(apiHandler.handleGetJenkinsBindingList).\n\t\t\tWrites(jenkinsbinding.JenkinsBindingList{}).\n\t\t\tDoc(\"get jenkinsbinding list\").\n\t\t\tReturns(200, \"OK\", jenkinsbinding.JenkinsBindingList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/jenkinsbinding/{namespace}\").\n\t\t\tTo(apiHandler.handleGetJenkinsBindingList).\n\t\t\tWrites(jenkinsbinding.JenkinsBindingList{}).\n\t\t\tDoc(\"get namespaced jenkinsbinding list\").\n\t\t\tReturns(200, \"OK\", jenkinsbinding.JenkinsBindingList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/jenkinsbinding/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleGetJenkinsBinding).\n\t\t\tDoc(\"get jenkinsbinding details\").\n\t\t\tWrites(v1alpha1.JenkinsBinding{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/jenkinsbinding/{namespace}/{name}/croncheck\").\n\t\t\tTo(apiHandler.handleCronCheck).\n\t\t\tDoc(\"cron syntax check\").\n\t\t\tWrites(jenkinsbinding.CronCheckResult{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/jenkinsbinding/{namespace}/{name}/resources\").\n\t\t\tTo(apiHandler.handleGetJenkinsBindingResources).\n\t\t\tWrites(common.ResourceList{}).\n\t\t\tDoc(\"retrieve resources associated with jenkinsbinding\").\n\t\t\tReturns(200, \"OK\", common.ResourceList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.DELETE(\"/jenkinsbinding/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleDeleteJenkinsBinding).\n\t\t\tWrites(v1alpha1.JenkinsBinding{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/jenkinsbinding/{namespace}\").\n\t\t\tTo(apiHandler.handleCreateJenkinsBinding).\n\t\t\tWrites(v1alpha1.JenkinsBinding{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/jenkinsbinding/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleUpdateJenkinsBinding).\n\t\t\tWrites(v1alpha1.JenkinsBinding{}))\n\t// endregion\n\n\t//domain\n\t// region DomainBinding\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/domainbinding\").\n\t\t\tTo(apiHandler.handleGetDomainBindingList).\n\t\t\tWrites(domainbinding.DomainBindingList{}).\n\t\t\tDoc(\"get domianbinding list\"))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/domainbinding\").\n\t\t\tTo(apiHandler.handleCreateDomainBinding).\n\t\t\tWrites(domainbinding.DomainBindingDetail{}).\n\t\t\tDoc(\"create domainbinding\"))\n\tdomainBindDetailURI := \"/domainbinding/{name}\"\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(domainBindDetailURI).\n\t\t\tTo(apiHandler.handleGetDomainBindingDetail).\n\t\t\tWrites(domainbinding.DomainBindingDetail{}).\n\t\t\tDoc(\"get domainbinding detail\"))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(domainBindDetailURI).\n\t\t\tTo(apiHandler.handleUpdateDomainBindingDetail).\n\t\t\tWrites(domainbinding.DomainBindingDetail{}).\n\t\t\tDoc(\"update domainbinding detail\"))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.DELETE(domainBindDetailURI).\n\t\t\tTo(apiHandler.handleDeleteDomainBindingDetail).\n\t\t\tDoc(\"delete domainbinding detailt\"))\n\t// endregion\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/chart/{name}\").\n\t\t\tTo(apiHandler.handleGetChartDetail).\n\t\t\tWrites(catalog.Chart{}).\n\t\t\tDoc(\"get chart detail\"))\n\n\t// region PipelineTemplate\n\t// PipelineTemplateSync\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/pipelinetemplatesync/{namespace}\").\n\t\t\tTo(apiHandler.handleGetPipelineTemplateSyncList).\n\t\t\tWrites(pipelinetemplatesync.PipelineTemplateSyncList{}).\n\t\t\tDoc(\"get pipelineTemplateSync list\").\n\t\t\tReturns(200, \"OK\", pipelinetemplatesync.PipelineTemplateSyncList{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/pipelinetemplatesync/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleGetPipelineTemplateSync).\n\t\t\tWrites(pipelinetemplatesync.PipelineTemplateSync{}).\n\t\t\tDoc(\"get detail of specific PipelineTemplateSync\").\n\t\t\tReturns(200, \"OK\", pipelinetemplatesync.PipelineTemplateSync{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/pipelinetemplatesync/{namespace}\").\n\t\t\tTo(apiHandler.handleCreatePipelineTemplateSync).\n\t\t\tWrites(pipelinetemplatesync.PipelineTemplateSync{}).\n\t\t\tDoc(\"create a pipelineTemplateSync\").\n\t\t\tReturns(200, \"OK\", pipelinetemplatesync.PipelineTemplateSync{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/pipelinetemplatesync/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleUpdatePipelineTemplateSync).\n\t\t\tWrites(pipelinetemplatesync.PipelineTemplateSync{}).\n\t\t\tDoc(\"update a pipelineTemplateSync\").\n\t\t\tReturns(200, \"OK\", pipelinetemplatesync.PipelineTemplateSync{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.DELETE(\"/pipelinetemplatesync/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleDeletePipelineTemplateSync).\n\t\t\tWrites(struct{}{}).\n\t\t\tDoc(\"delete a PipelineTemplateSync\").\n\t\t\tReturns(200, \"OK\", struct{}{}))\n\n\t// PipelineTaskTemplate\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/pipelinetasktemplate/{namespace}\").\n\t\t\tTo(apiHandler.handleGetPipelineTaskTemplateList).\n\t\t\tWrites(pipelinetasktemplate.PipelineTaskTemplateList{}).\n\t\t\tDoc(\"get a list of PipelineTaskTemplate\").\n\t\t\tReturns(200, \"OK\", pipelinetasktemplate.PipelineTaskTemplate{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/pipelinetasktemplate/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleGetPipelineTaskTemplate).\n\t\t\tWrites(pipelinetasktemplate.PipelineTaskTemplate{}).\n\t\t\tDoc(\"get a PipelineTaskTemplate\").\n\t\t\tReturns(200, \"OK\", pipelinetasktemplate.PipelineTaskTemplate{}))\n\n\t// ClusterPipelineTemplate\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/clusterpipelinetemplate\").\n\t\t\tTo(apiHandler.handleGetClusterPipelineTemplateList).\n\t\t\tWrites(clusterpipelinetemplate.ClusterPipelineTemplateList{}).\n\t\t\tDoc(\"get a list of ClusterPipelineTemplate\").\n\t\t\tReturns(200, \"OK\", clusterpipelinetemplate.ClusterPipelineTemplateList{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/clusterpipelinetemplate/{name}\").\n\t\t\tTo(apiHandler.handleGetClusterPipelineTemplate).\n\t\t\tWrites(clusterpipelinetemplate.ClusterPipelineTemplate{}).\n\t\t\tDoc(\"get a ClusterPipelineTemplate\").\n\t\t\tReturns(200, \"OK\", clusterpipelinetemplate.ClusterPipelineTemplate{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/clusterpipelinetemplate/{name}/preview\").\n\t\t\tTo(apiHandler.handlePreviewClusterPipelineTemplate).\n\t\t\tWrites(clusterpipelinetemplate.PreviewOptions{}).\n\t\t\tDoc(\"preview a ClusterPipelineTemplate\").\n\t\t\tReturns(200, \"OK\", \"\"))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/clusterpipelinetemplate/{name}/exports\").\n\t\t\tTo(apiHandler.handlerExportsClusterPiplineTemplate).\n\t\t\tWrites(clusterpipelinetemplate.PipelineExportedVariables{}).\n\t\t\tDoc(\"get the exports in clusterpipelinetemplate\").\n\t\t\tReturns(200, \"OK\", clusterpipelinetemplate.PipelineExportedVariables{}))\n\n\t// PipelineTemplateSync\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/pipelinetemplatesync/{namespace}\").\n\t\t\tTo(apiHandler.handleGetPipelineTemplateSyncList).\n\t\t\tWrites(pipelinetemplatesync.PipelineTemplateSyncList{}).\n\t\t\tDoc(\"get pipelineTemplateSync list\").\n\t\t\tReturns(200, \"OK\", pipelinetemplatesync.PipelineTemplateSyncList{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/pipelinetemplatesync/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleGetPipelineTemplateSync).\n\t\t\tWrites(pipelinetemplatesync.PipelineTemplateSync{}).\n\t\t\tDoc(\"get detail of specific PipelineTemplateSync\").\n\t\t\tReturns(200, \"OK\", pipelinetemplatesync.PipelineTemplateSync{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/pipelinetemplatesync/{namespace}\").\n\t\t\tTo(apiHandler.handleCreatePipelineTemplateSync).\n\t\t\tWrites(pipelinetemplatesync.PipelineTemplateSync{}).\n\t\t\tDoc(\"create a pipelineTemplateSync\").\n\t\t\tReturns(200, \"OK\", pipelinetemplatesync.PipelineTemplateSync{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/pipelinetemplatesync/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleUpdatePipelineTemplateSync).\n\t\t\tWrites(pipelinetemplatesync.PipelineTemplateSync{}).\n\t\t\tDoc(\"update a pipelineTemplateSync\").\n\t\t\tReturns(200, \"OK\", pipelinetemplatesync.PipelineTemplateSync{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.DELETE(\"/pipelinetemplatesync/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleDeletePipelineTemplateSync).\n\t\t\tWrites(struct{}{}).\n\t\t\tDoc(\"delete a PipelineTemplateSync\").\n\t\t\tReturns(200, \"OK\", struct{}{}))\n\n\t// PipelineTaskTemplate\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/pipelinetasktemplate/{namespace}\").\n\t\t\tTo(apiHandler.handleGetPipelineTaskTemplateList).\n\t\t\tWrites(pipelinetasktemplate.PipelineTaskTemplateList{}).\n\t\t\tDoc(\"get a list of PipelineTaskTemplate\").\n\t\t\tReturns(200, \"OK\", pipelinetasktemplate.PipelineTaskTemplate{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/pipelinetasktemplate/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleGetPipelineTaskTemplate).\n\t\t\tWrites(pipelinetasktemplate.PipelineTaskTemplate{}).\n\t\t\tDoc(\"get a PipelineTaskTemplate\").\n\t\t\tReturns(200, \"OK\", pipelinetasktemplate.PipelineTaskTemplate{}))\n\n\t// PipelineTemplate\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/pipelinetemplate/{namespace}\").\n\t\t\tTo(apiHandler.handleGetPipelineTemplateList).\n\t\t\tWrites(pipelinetemplate.PipelineTemplateList{}).\n\t\t\tDoc(\"get a list of PipelineTemplate\").\n\t\t\tReturns(200, \"OK\", pipelinetemplate.PipelineTemplateList{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/pipelinetemplate/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleGetPipelineTemplate).\n\t\t\tWrites(pipelinetemplate.PipelineTemplate{}).\n\t\t\tDoc(\"get a PipelineTemplate\").\n\t\t\tReturns(200, \"OK\", pipelinetemplate.PipelineTemplate{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"pipelinetemplate/{namespace}/{name}/preview\").\n\t\t\tTo(apiHandler.handlePreviewPipelineTemplate).\n\t\t\tWrites(pipelinetemplate.PreviewOptions{}).\n\t\t\tDoc(\"jenkinsfile preview from PipelineTemplate\").\n\t\t\tReturns(200, \"OK\", \"\"))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/pipelinetemplate/{namespace}/{name}/exports\").\n\t\t\tTo(apiHandler.handlerExportsPiplineTemplate).\n\t\t\tWrites(clusterpipelinetemplate.PipelineExportedVariables{}).\n\t\t\tDoc(\"get the exports in pipelinetemplate\").\n\t\t\tReturns(200, \"OK\", clusterpipelinetemplate.PipelineExportedVariables{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/pipelinetemplatecategories/{namespace}\").\n\t\t\tTo(apiHandler.handlePipelinetemplatecategories).\n\t\t\tWrites(pipelinetemplate.PipelineTemplateCategoryList{}).\n\t\t\tDoc(\"get a PipelineTemplate\").\n\t\t\tReturns(200, \"OK\", pipelinetemplate.PipelineTemplateCategoryList{}))\n\n\t// endregion\n\n\t// region Pipeline\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/pipelineconfig/{namespace}\").\n\t\t\tTo(apiHandler.handleGetPipelineConfigList).\n\t\t\tWrites(pipelineconfig.PipelineConfigList{}).\n\t\t\tDoc(\"get namespaced pipelineconfig list\").\n\t\t\tReturns(200, \"OK\", pipelineconfig.PipelineConfigList{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/pipelineconfig/{namespace}\").\n\t\t\tTo(apiHandler.handleCreatePipelineConfig).\n\t\t\tWrites(pipelineconfig.PipelineConfigDetail{}).\n\t\t\tDoc(\"creates namespaced pipelineconfig\").\n\t\t\tReturns(200, \"OK\", pipelineconfig.PipelineConfigDetail{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/pipelineconfig/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleGetPipelineConfigDetail).\n\t\t\tWrites(pipelineconfig.PipelineConfig{}).\n\t\t\tDoc(\"get pipeline config details\").\n\t\t\tReturns(200, \"OK\", pipelineconfig.PipelineConfigDetail{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/pipelineconfig/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleUpdatePipelineConfig).\n\t\t\tWrites(pipelineconfig.PipelineConfig{}).\n\t\t\tDoc(\"update pipeline config\").\n\t\t\tReturns(200, \"OK\", pipelineconfig.PipelineConfigDetail{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.DELETE(\"/pipelineconfig/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleDeletePipelineConfig).\n\t\t\tWrites(struct{}{}).\n\t\t\tDoc(\"deletes a pipeline config\").\n\t\t\tReturns(200, \"OK\", struct{}{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/pipelineconfig/{namespace}/{name}/trigger\").\n\t\t\tTo(apiHandler.handleTriggerPipelineConfig).\n\t\t\tWrites(pipelineconfig.PipelineConfigTrigger{}).\n\t\t\tDoc(\"triggers pipeline\").\n\t\t\tReturns(200, \"OK\", pipelineconfig.PipelineTriggerResponse{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/pipelineconfig/{namespace}/{name}/preview\").\n\t\t\tTo(apiHandler.handlePreviewPipelineConfig).\n\t\t\tWrites(pipelineconfig.PipelineConfigDetail{}).\n\t\t\tDoc(\"jenkinsfile preview\").\n\t\t\tReturns(200, \"OK\", \"\"))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/pipelineconfig/{namespace}/{name}/scan\").\n\t\t\tTo(apiHandler.handleScanPipelineConfig).\n\t\t\tDoc(\"scan multi-branch\").\n\t\t\tReturns(200, \"OK\", \"\"))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/pipelineconfig/{namespace}/{name}/logs\").\n\t\t\tParam(restful.PathParameter(\"namespace\", \"Namespace to use\")).\n\t\t\tParam(restful.PathParameter(\"name\", \"Pipeline name to filter scope\")).\n\t\t\tParam(restful.QueryParameter(\"start\", \"Start offset to fetch logs\")).\n\t\t\tTo(apiHandler.handlePipelineConfigLogs).\n\t\t\tDoc(\"gets scan logs for multi-branch pipeline\").\n\t\t\tReturns(200, \"OK\", v1alpha1.PipelineConfigLog{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/pipeline/{namespace}\").\n\t\t\tTo(apiHandler.handleGetPipelineList).\n\t\t\tWrites(pipeline.PipelineList{}).\n\t\t\tDoc(\"get namespaced pipeline list\").\n\t\t\tReturns(200, \"OK\", pipeline.PipelineList{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/pipeline/{namespace}/{name}\").\n\t\t\tParam(restful.QueryParameter(\"withFreshStages\", \"Whether to retrieve newest stages from Jenkins\")).\n\t\t\tTo(apiHandler.handleGetPipelineDetail).\n\t\t\tWrites(pipeline.Pipeline{}).\n\t\t\tDoc(\"get pipeline details\").\n\t\t\tReturns(200, \"OK\", pipeline.Pipeline{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.DELETE(\"/pipeline/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleDeletePipeline).\n\t\t\tWrites(struct{}{}).\n\t\t\tDoc(\"deletes a pipeline\").\n\t\t\tReturns(200, \"OK\", struct{}{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/pipeline/{namespace}/{name}/retry\").\n\t\t\tTo(apiHandler.handleRetryPipelineDetail).\n\t\t\tWrites(pipeline.RetryRequest{}).\n\t\t\tDoc(\"retries a pipeline\").\n\t\t\tReturns(200, \"OK\", v1alpha1.Pipeline{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/pipeline/{namespace}/{name}/abort\").\n\t\t\tTo(apiHandler.handleAbortPipeline).\n\t\t\tWrites(pipeline.AbortRequest{}).\n\t\t\tDoc(\"aborts a pipeline\").\n\t\t\tReturns(200, \"OK\", v1alpha1.Pipeline{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/pipeline/{namespace}/{name}/logs\").\n\t\t\tParam(restful.PathParameter(\"namespace\", \"Namespace to use\")).\n\t\t\tParam(restful.PathParameter(\"name\", \"Pipeline name to filter scope\")).\n\t\t\tParam(restful.QueryParameter(\"start\", \"Start offset to fetch logs\")).\n\t\t\tParam(restful.QueryParameter(\"stage\", \"Stage to fetch logs from\")).\n\t\t\tParam(restful.QueryParameter(\"step\", \"Step to fetch logs from. Can be combined with stage\")).\n\t\t\tTo(apiHandler.handlePipelineLogs).\n\t\t\tDoc(\"gets logs for pipeline\").\n\t\t\tReturns(200, \"OK\", v1alpha1.PipelineLog{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/pipeline/{namespace}/{name}/tasks\").\n\t\t\tParam(restful.PathParameter(\"namespace\", \"Namespace to use\")).\n\t\t\tParam(restful.PathParameter(\"name\", \"Pipeline name to filter scope\")).\n\t\t\tParam(restful.QueryParameter(\"stage\", \"Stage to fetch steps from. If not provided will return all stages\")).\n\t\t\tTo(apiHandler.handlePipelineTasks).\n\t\t\tDoc(\"gets steps for pipeline\").\n\t\t\tReturns(200, \"OK\", v1alpha1.PipelineTask{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/pipeline/{namespace}/{name}/inputs\").\n\t\t\tTo(apiHandler.handlePipelineInput).\n\t\t\tWrites(pipeline.InputOptions{}).\n\t\t\tDoc(\"response a input request which in a pipeline\").\n\t\t\tReturns(200, \"OK\", pipeline.InputResponse{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/pipeline/{namespace}/{name}/testreports\").\n\t\t\tParam(restful.QueryParameter(\"start\", \"Start offset to fetch test report items\")).\n\t\t\tParam(restful.QueryParameter(\"limit\", \"Limit of number to fetch test report items\")).\n\t\t\tTo(apiHandler.handlePipelineTestReports).\n\t\t\tDoc(\"response a input request which in a pipeline\").\n\t\t\tReturns(200, \"OK\", pipeline.PipelineTestReports{}))\n\n\t// endregion\n\n\t// region CodeRepository\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/codereposervice\").\n\t\t\tTo(apiHandler.handleCreateCodeRepoService).\n\t\t\tWrites(codereposervice.CodeRepoServiceList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.DELETE(\"/codereposervice/{name}\").\n\t\t\tTo(apiHandler.handleDeleteCodeRepoService).\n\t\t\tWrites(codereposervice.CodeRepoService{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/codereposervice/{name}\").\n\t\t\tTo(apiHandler.handleUpdateCodeRepoService).\n\t\t\tWrites(v1alpha1.CodeRepoService{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/codereposervice\").\n\t\t\tTo(apiHandler.handleGetCodeRepoServiceList).\n\t\t\tWrites(codereposervice.CodeRepoServiceList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/codereposervices\").\n\t\t\tTo(apiHandler.handleGetCodeRepoServiceList).\n\t\t\tWrites(codereposervice.CodeRepoServiceList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/codereposervice/{name}\").\n\t\t\tTo(apiHandler.handleGetCodeRepoServiceDetail).\n\t\t\tWrites(v1alpha1.CodeRepoService{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/codereposervice/{name}/resources\").\n\t\t\tTo(apiHandler.handleGetCodeRepoServiceResourceList).\n\t\t\tWrites(common.ResourceList{}).\n\t\t\tDoc(\"retrieve resources associated with codereposervice\").\n\t\t\tReturns(200, \"OK\", common.ResourceList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/codereposervice/{name}/secrets\").\n\t\t\tTo(apiHandler.handleGetCodeRepoServiceSecretList).\n\t\t\tWrites(secret.SecretList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/coderepobinding/{namespace}\").\n\t\t\tTo(apiHandler.handleCreateCodeRepoBinding).\n\t\t\tWrites(v1alpha1.CodeRepoBinding{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.DELETE(\"/coderepobinding/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleDeleteCodeRepoBinding).\n\t\t\tWrites(v1alpha1.CodeRepoBinding{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/coderepobinding/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleUpdateCodeRepoBinding).\n\t\t\tWrites(struct{}{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/coderepobinding\").\n\t\t\tTo(apiHandler.handleGetCodeRepoBindingList).\n\t\t\tWrites(coderepobinding.CodeRepoBindingList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/coderepobinding/{namespace}\").\n\t\t\tTo(apiHandler.handleGetCodeRepoBindingList).\n\t\t\tWrites(coderepobinding.CodeRepoBindingList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/coderepobinding/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleGetCodeRepoBindingDetail).\n\t\t\tWrites(v1alpha1.CodeRepoBinding{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/coderepobinding/{namespace}/{name}/resources\").\n\t\t\tTo(apiHandler.handleGetCodeRepoBindingResources).\n\t\t\tWrites(common.ResourceList{}).\n\t\t\tDoc(\"retrieve resources associated with coderepobinding\").\n\t\t\tReturns(200, \"OK\", common.ResourceList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/coderepobinding/{namespace}/{name}/secrets\").\n\t\t\tTo(apiHandler.handleGetCodeRepoBindingSecretList).\n\t\t\tWrites(secret.SecretList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/coderepobinding/{namespace}/{name}/repositories\").\n\t\t\tTo(apiHandler.handleGetCodeRepositoryListInBinding).\n\t\t\tWrites(coderepository.CodeRepositoryList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/coderepobinding/{namespace}/{name}/remote-repositories\").\n\t\t\tTo(apiHandler.handleGetRemoteRepositoryList).\n\t\t\tWrites(coderepository.CodeRepositoryList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/coderepository/{namespace}\").\n\t\t\tTo(apiHandler.handleGetCodeRepositoryList).\n\t\t\tWrites(coderepository.CodeRepositoryList{}).\n\t\t\tDoc(\"get namespaced coderepository list\").\n\t\t\tReturns(200, \"OK\", coderepository.CodeRepositoryList{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/coderepository/{namespace}/{name}/branches\").\n\t\t\tParam(restful.PathParameter(\"sortBy\", \"sort option. The choices are creationTime\")).\n\t\t\tParam(restful.PathParameter(\"sortMode\", \"sort option. The choices are desc or asc\")).\n\t\t\tTo(apiHandler.HandleGetCodeRepositoryBranches).\n\t\t\tReturns(200, \"Get coderepo branch Successful\", v1alpha1.CodeRepoBranchResult{}))\n\n\t// endregion\n\n\t// region ToolChain\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/toolchain\").\n\t\t\tTo(apiHandler.handleGetToolChains).\n\t\t\tWrites(toolchain.ToolChainList{}).\n\t\t\tDoc(\"get namespaced coderepository list\").\n\t\t\tReturns(200, \"OK\", coderepository.CodeRepositoryList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/toolchain/bindings\").\n\t\t\tTo(apiHandler.handleGetToolChainBindings).\n\t\t\tWrites(toolchain.ToolChainBindingList{}).\n\t\t\tDoc(\"get toolchain binding list\").\n\t\t\tReturns(200, \"OK\", coderepository.CodeRepositoryList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/toolchain/bindings/{namespace}\").\n\t\t\tTo(apiHandler.handleGetToolChainBindings).\n\t\t\tWrites(toolchain.ToolChainBindingList{}).\n\t\t\tDoc(\"get namespaced toolchain binding list\").\n\t\t\tReturns(200, \"OK\", coderepository.CodeRepositoryList{}))\n\t// endregion\n\n\t// region callback\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/callback/oauth/{namespace}/secret/{secretNamespace}/{secretName}/codereposervice/{serviceName}\").\n\t\t\tTo(apiHandler.handleOAuthCallback).\n\t\t\tWrites(struct{}{}))\n\t// endregion\n\n\t// region ImageRegistry\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/imageregistry\").\n\t\t\tTo(apiHandler.handleCreateImageRegistry).\n\t\t\tWrites(imageregistry.ImageRegistryList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.DELETE(\"/imageregistry/{name}\").\n\t\t\tTo(apiHandler.handleDeleteImageRegsitry).\n\t\t\tWrites(imageregistry.ImageRegistry{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/imageregistry/{name}\").\n\t\t\tTo(apiHandler.handleUpdateImageRegistry).\n\t\t\tWrites(v1alpha1.ImageRegistry{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/imageregistry\").\n\t\t\tTo(apiHandler.handleGetImageRegistryList).\n\t\t\tWrites(imageregistry.ImageRegistryList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/imageregistry/{name}\").\n\t\t\tTo(apiHandler.handleGetImageRegistryDetail).\n\t\t\tWrites(v1alpha1.ImageRegistry{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/imageregistry/{name}/secrets\").\n\t\t\tTo(apiHandler.handleGetImageRegistrySecretList).\n\t\t\tWrites(secret.SecretList{}))\n\t// endregion\n\n\t// region ImageRegistryBinding\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/imageregistrybinding/{namespace}\").\n\t\t\tTo(apiHandler.handleCreateImageRegistryBinding).\n\t\t\tWrites(v1alpha1.ImageRegistryBinding{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(URLIMAGEREGISTRYBINDINGDETAIL).\n\t\t\tTo(apiHandler.handleUpdateImageRegistryBinding).\n\t\t\tWrites(v1alpha1.ImageRegistryBinding{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.DELETE(URLIMAGEREGISTRYBINDINGDETAIL).\n\t\t\tTo(apiHandler.handleDeleteImageRegistryBinding).\n\t\t\tWrites(v1alpha1.ImageRegistryBinding{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/imageregistrybinding\").\n\t\t\tTo(apiHandler.handleGetImageRegistryBindingList).\n\t\t\tWrites(imageregistrybinding.ImageRegistryBindingList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/imageregistrybinding/{namespace}\").\n\t\t\tTo(apiHandler.handleGetImageRegistryBindingList).\n\t\t\tWrites(imageregistrybinding.ImageRegistryBindingList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(URLIMAGEREGISTRYBINDINGDETAIL).\n\t\t\tTo(apiHandler.handleGetImageRegistryBindingDetail).\n\t\t\tWrites(v1alpha1.ImageRegistryBinding{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/imageregistrybinding/{namespace}/{name}/secrets\").\n\t\t\tTo(apiHandler.handleGetImageRegistryBindingSecretList).\n\t\t\tWrites(secret.SecretList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/imageregistrybinding/{namespace}/{name}/repositories\").\n\t\t\tTo(apiHandler.handleGetImageRepositoryListInBinding).\n\t\t\tWrites(imagerepository.ImageRepositoryList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/imageregistrybinding/{namespace}/{name}/remote-repositories\").\n\t\t\tTo(apiHandler.handleGetImageOriginRepositoryList).\n\t\t\tWrites(imagerepository.ImageRepositoryList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/imageregistrybinding/{namespace}/{name}/remote-repositories-project\").\n\t\t\tTo(apiHandler.handleGetImageOriginRepositoryProjectList).\n\t\t\tWrites(imagerepository.ImageRepositoryList{}))\n\t// endregion\n\n\t// region ImageRepository\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/imagerepository/{namespace}\").\n\t\t\tTo(apiHandler.handleGetImageRepositoryList).\n\t\t\tWrites(imagerepository.ImageRepositoryList{}).\n\t\t\tDoc(\"get namespaced imagerepository list\").\n\t\t\tReturns(200, \"OK\", imagerepository.ImageRepositoryList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/imagerepositoryproject/{namespace}\").\n\t\t\tTo(apiHandler.handleGetImageRepositoryProjectList).\n\t\t\tWrites(imagerepository.ImageRepositoryList{}).\n\t\t\tDoc(\"get namespaced imagerepository list\").\n\t\t\tReturns(200, \"OK\", imagerepository.ImageRepositoryList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/imagerepository/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleGetImageRepositoryDetail).\n\t\t\tWrites(v1alpha1.ImageRepository{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/imagerepository/{namespace}/{name}/tags\").\n\t\t\tParam(restful.PathParameter(\"sortBy\", \"sort option. The choices are creationTime\")).\n\t\t\tParam(restful.PathParameter(\"sortMode\", \"sort option. The choices are desc or asc\")).\n\t\t\tTo(apiHandler.HandleGetImageTags).\n\t\t\tReturns(200, \"Get Image tags Successful\", v1alpha1.ImageTagResult{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"imagerepository/{namespace}/{name}/security\").\n\t\t\tParam(restful.PathParameter(\"tag\", \"Scan image tag name\")).\n\t\t\tTo(apiHandler.HandleScanImage).\n\t\t\tReturns(200, \"Create Scan Image Job Successful.\", v1alpha1.ImageResult{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"imagerepository/{namespace}/{name}/security\").\n\t\t\tParam(restful.PathParameter(\"tag\", \"Get image vulnerability tag name\")).\n\t\t\tTo(apiHandler.HandleGetVulnerability).\n\t\t\tReturns(200, \"Get Image Vulnerability Successful\", v1alpha1.VulnerabilityList{}))\n\t// endregion\n\n\t// region microservicesenvironments\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/microservicesenvironments\").\n\t\t\tFilter(mw.Product(ACPServiceFramework)).\n\t\t\tTo(apiHandler.handleMicroservicesEnvironmentList).\n\t\t\tWrites(asfClient.MicroservicesEnvironmentList{}).\n\t\t\tDoc(\"get microservicesenvironment list\").\n\t\t\tReturns(200, \"OK\", asfClient.MicroservicesEnvironmentList{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/microservicesenvironments/{name}\").\n\t\t\tFilter(mw.Product(ACPServiceFramework)).\n\t\t\tWrites(microservicesenvironment.MicroservicesEnvironmentDetail{}).\n\t\t\tTo(apiHandler.handleGetMicroservicesEnviromentDetail).\n\t\t\tDoc(\"get microservicesenvironments detail by name\").\n\t\t\tReturns(200, \"OK\", microservicesenvironment.MicroservicesEnvironmentDetail{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/microservicescomponent/{namespace}/{name}\").\n\t\t\tFilter(mw.Product(ACPServiceFramework)).\n\t\t\tTo(apiHandler.handlePutMicroservicesComponent).\n\t\t\tWrites(asfClient.MicroservicesComponent{}).\n\t\t\tDoc(\"install component\").\n\t\t\tReturns(200, \"OK\", asfClient.MicroservicesComponent{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/microservicescomponent/{namespace}/{name}\").\n\t\t\tFilter(mw.Product(ACPServiceFramework)).\n\t\t\tTo(apiHandler.handlePutMicroservicesComponent).\n\t\t\tWrites(asfClient.MicroservicesComponent{}).\n\t\t\tDoc(\"update component\").\n\t\t\tReturns(200, \"OK\", asfClient.MicroservicesComponent{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/microservicescomponent/{namespace}/{name}/start\").\n\t\t\tFilter(mw.Product(ACPServiceFramework)).\n\t\t\tTo(apiHandler.handlePutMicroservicesComponentStart).\n\t\t\tWrites(asfClient.MicroservicesComponent{}).\n\t\t\tDoc(\"start component\").\n\t\t\tReturns(200, \"OK\", asfClient.MicroservicesComponent{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/microservicescomponent/{namespace}/{name}/stop\").\n\t\t\tFilter(mw.Product(ACPServiceFramework)).\n\t\t\tTo(apiHandler.handlePutMicroservicesComponentStop).\n\t\t\tWrites(asfClient.MicroservicesComponentList{}).\n\t\t\tDoc(\"stop component\").\n\t\t\tReturns(200, \"OK\", asfClient.MicroservicesComponent{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/microservicesapps\").\n\t\t\tFilter(mw.Product(ACPServiceFramework)).\n\t\t\tWrites(microservicesapplication.MicroservicesApplicationList{}).\n\t\t\tTo(apiHandler.handleGetMicroservicesApps).\n\t\t\tDoc(\"get microservicesenvironments detail by name\").\n\t\t\tReturns(200, \"OK\", microservicesapplication.MicroservicesApplicationList{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/microservicesconfigs\").\n\t\t\tWrites(microservicesconfiguration.MicroservicesConfigurationList{}).\n\t\t\tTo(apiHandler.handleGetMicroservicesConfigs).\n\t\t\tDoc(\"get microservicesenvironments detail by name\").\n\t\t\tReturns(200, \"OK\", microservicesconfiguration.MicroservicesConfigurationList{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/domains\").\n\t\t\tWrites(domain.DomainList{}).\n\t\t\tTo(apiHandler.handleGetDomainList).\n\t\t\tDoc(\"get microservicesenvironments detail by name\").\n\t\t\tReturns(200, \"OK\", domain.DomainList{}))\n\n\t// endregion\n\n\t// region ProjectManagement\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/projectmanagement\").\n\t\t\tTo(apiHandler.handleCreateProjectManagement).\n\t\t\tWrites(v1alpha1.ProjectManagement{}).\n\t\t\tDoc(\"create a projectmanagement\").\n\t\t\tReturns(200, \"OK\", v1alpha1.ProjectManagement{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.DELETE(URLProjectManagementDetails).\n\t\t\tTo(apiHandler.handleDeleteProjectManagement).\n\t\t\tWrites(struct{}{}).\n\t\t\tDoc(\"delete a projectmanagement\").\n\t\t\tReturns(200, \"OK\", struct{}{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(URLProjectManagementDetails).\n\t\t\tTo(apiHandler.handleUpdateProjectManagement).\n\t\t\tWrites(v1alpha1.ProjectManagement{}).\n\t\t\tDoc(\"update a projectmanagement\").\n\t\t\tReturns(200, \"OK\", v1alpha1.ProjectManagement{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/projectmanagement\").\n\t\t\tTo(apiHandler.handleGetProjectManagementList).\n\t\t\tWrites(projectmanagement.ProjectManagementList{}).\n\t\t\tDoc(\"get projectmanagement list\").\n\t\t\tReturns(200, \"OK\", projectmanagement.ProjectManagementList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(URLProjectManagementDetails).\n\t\t\tTo(apiHandler.handleGetProjectManagementDetail).\n\t\t\tWrites(v1alpha1.ProjectManagement{}).\n\t\t\tDoc(\"get a projectmanagement\").\n\t\t\tReturns(200, \"OK\", v1alpha1.ProjectManagement{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/projectmanagementbinding/{namespace}\").\n\t\t\tTo(apiHandler.handleCreateProjectManagementBinding).\n\t\t\tWrites(v1alpha1.ProjectManagementBinding{}).\n\t\t\tDoc(\"create a projectmanagementbinding\").\n\t\t\tReturns(200, \"OK\", v1alpha1.ProjectManagementBinding{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.DELETE(URLProjectManagementBindingDetails).\n\t\t\tTo(apiHandler.handleDeleteProjectManagementBinding).\n\t\t\tWrites(struct{}{}).\n\t\t\tDoc(\"delete a projectmanagementbinding\").\n\t\t\tReturns(200, \"OK\", struct{}{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(URLProjectManagementBindingDetails).\n\t\t\tTo(apiHandler.handleUpdateProjectManagementBinding).\n\t\t\tWrites(v1alpha1.ProjectManagementBinding{}).\n\t\t\tDoc(\"update a projectmanagementbinding\").\n\t\t\tReturns(200, \"OK\", v1alpha1.ProjectManagementBinding{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/projectmanagementbinding\").\n\t\t\tTo(apiHandler.handleGetProjectManagementBindingList).\n\t\t\tWrites(projectmanagementbinding.ProjectManagementBindingList{}).\n\t\t\tDoc(\"get projectmanagementbinding list in all namespaces\").\n\t\t\tReturns(200, \"OK\", projectmanagementbinding.ProjectManagementBindingList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/projectmanagementbinding/{namespace}\").\n\t\t\tTo(apiHandler.handleGetProjectManagementBindingList).\n\t\t\tWrites(projectmanagementbinding.ProjectManagementBindingList{}).\n\t\t\tDoc(\"get projectmanagementbinding list in one namespace\").\n\t\t\tReturns(200, \"OK\", projectmanagementbinding.ProjectManagementBindingList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(URLProjectManagementBindingDetails).\n\t\t\tTo(apiHandler.handleGetProjectManagementBindingDetail).\n\t\t\tWrites(v1alpha1.ProjectManagementBinding{}).\n\t\t\tDoc(\"get a projectmanagementbinding\").\n\t\t\tReturns(200, \"OK\", v1alpha1.ProjectManagementBinding{}))\n\t// endregion\n\n\t// region TestTool\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/testtool\").\n\t\t\tTo(apiHandler.handleCreateTestTool).\n\t\t\tWrites(v1alpha1.TestTool{}).\n\t\t\tDoc(\"create a testtool\").\n\t\t\tReturns(200, \"OK\", v1alpha1.TestTool{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.DELETE(URLTestToolDetails).\n\t\t\tTo(apiHandler.handleDeleteTestTool).\n\t\t\tWrites(struct{}{}).\n\t\t\tDoc(\"delete a testtool\").\n\t\t\tReturns(200, \"OK\", struct{}{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(URLTestToolDetails).\n\t\t\tTo(apiHandler.handleUpdateTestTool).\n\t\t\tWrites(v1alpha1.TestTool{}).\n\t\t\tDoc(\"update a testtool\").\n\t\t\tReturns(200, \"OK\", v1alpha1.TestTool{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/testtool\").\n\t\t\tTo(apiHandler.handleGetTestToolList).\n\t\t\tWrites(testtool.TestToolList{}).\n\t\t\tDoc(\"get testtool list\").\n\t\t\tReturns(200, \"OK\", testtool.TestToolList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(URLTestToolDetails).\n\t\t\tTo(apiHandler.handleGetTestToolDetail).\n\t\t\tWrites(v1alpha1.TestTool{}).\n\t\t\tDoc(\"get a testtool\").\n\t\t\tReturns(200, \"OK\", v1alpha1.TestTool{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/testtoolbinding/{namespace}\").\n\t\t\tTo(apiHandler.handleCreateTestToolBinding).\n\t\t\tWrites(v1alpha1.TestToolBinding{}).\n\t\t\tDoc(\"create a testtoolbinding\").\n\t\t\tReturns(200, \"OK\", v1alpha1.TestToolBinding{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.DELETE(URLTestToolBindingDetails).\n\t\t\tTo(apiHandler.handleDeleteTestToolBinding).\n\t\t\tWrites(struct{}{}).\n\t\t\tDoc(\"delete a testtoolbinding\").\n\t\t\tReturns(200, \"OK\", struct{}{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(URLTestToolBindingDetails).\n\t\t\tTo(apiHandler.handleUpdateTestToolBinding).\n\t\t\tWrites(v1alpha1.TestToolBinding{}).\n\t\t\tDoc(\"update a testtoolbinding\").\n\t\t\tReturns(200, \"OK\", v1alpha1.TestToolBinding{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/testtoolbinding\").\n\t\t\tTo(apiHandler.handleGetTestToolBindingList).\n\t\t\tWrites(testtoolbinding.TestToolBindingList{}).\n\t\t\tDoc(\"get testtoolbinding list in all namespaces\").\n\t\t\tReturns(200, \"OK\", testtoolbinding.TestToolBindingList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/testtoolbinding/{namespace}\").\n\t\t\tTo(apiHandler.handleGetTestToolBindingList).\n\t\t\tWrites(testtoolbinding.TestToolBindingList{}).\n\t\t\tDoc(\"get testtoolbinding list in one namespace\").\n\t\t\tReturns(200, \"OK\", testtoolbinding.TestToolBindingList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(URLTestToolBindingDetails).\n\t\t\tTo(apiHandler.handleGetTestToolBindingDetail).\n\t\t\tWrites(v1alpha1.TestToolBinding{}).\n\t\t\tDoc(\"get a testtoolbinding\").\n\t\t\tReturns(200, \"OK\", v1alpha1.TestToolBinding{}))\n\t// endregion\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/microservicesconfigs\").\n\t\t\tFilter(mw.Product(ACPServiceFramework)).\n\t\t\tWrites(microservicesconfiguration.MicroservicesConfigurationList{}).\n\t\t\tTo(apiHandler.handleGetMicroservicesConfigs).\n\t\t\tDoc(\"get microservicesenvironments detail by name\").\n\t\t\tReturns(200, \"OK\", microservicesconfiguration.MicroservicesConfigurationList{}))\n\n\t// region Statistics\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/statistics/pipeline/{namespace}\").\n\t\t\tTo(apiHandler.handleGetPipelineStatistics).\n\t\t\tWrites(struct{}{}).\n\t\t\tDoc(\"get the statistics info of pipeline\").\n\t\t\tReturns(200, \"OK\", struct{}{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/statistics/stage/{namespace}\").\n\t\t\tTo(apiHandler.handleGetStageStatistics).\n\t\t\tWrites(struct{}{}).\n\t\t\tDoc(\"get the statistics info of stage\").\n\t\t\tReturns(200, \"OK\", struct{}{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/statistics/codequality/{namespace}\").\n\t\t\tTo(apiHandler.handleGetCodeQualityStatistics).\n\t\t\tWrites(struct{}{}).\n\t\t\tDoc(\"get the statistics info of stage\").\n\t\t\tReturns(200, \"OK\", struct{}{}))\n\n\t// endregion\n\n\t// region CodeQualityTool\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/codequalitytool\").\n\t\t\tTo(apiHandler.handleCreateCodeQualityTool).\n\t\t\tWrites(v1alpha1.CodeQualityTool{}).\n\t\t\tDoc(\"create a code quality tool\").\n\t\t\tReturns(200, \"OK\", v1alpha1.CodeQualityTool{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.DELETE(URLCODEQUALITYTOOLDETAIL).\n\t\t\tTo(apiHandler.handleDeleteCodeQualityTool).\n\t\t\tWrites(v1alpha1.CodeQualityTool{}).\n\t\t\tDoc(\"delete a code quality tool with name\").\n\t\t\tReturns(200, \"OK\", v1alpha1.CodeQualityTool{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(URLCODEQUALITYTOOLDETAIL).\n\t\t\tTo(apiHandler.handleUpdateCodeQualityTool).\n\t\t\tWrites(v1alpha1.CodeQualityTool{}).\n\t\t\tDoc(\"update a code quality tool with name\").\n\t\t\tReturns(200, \"OK\", v1alpha1.CodeQualityTool{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(URLCODEQUALITYTOOLDETAIL).\n\t\t\tTo(apiHandler.handleGetCodeQualityTool).\n\t\t\tWrites(v1alpha1.CodeQualityTool{}).\n\t\t\tDoc(\"get a code quality tool with name\").\n\t\t\tReturns(200, \"OK\", v1alpha1.CodeQualityTool{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/codequalitytool\").\n\t\t\tTo(apiHandler.handleListCodeQualityTool).\n\t\t\tWrites(v1alpha1.CodeQualityTool{}).\n\t\t\tDoc(\"list code quality tools\").\n\t\t\tReturns(200, \"OK\", codequalitytool.CodeQualityToolList{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/codequalitybinding/{namespace}\").\n\t\t\tTo(apiHandler.handleCreateCodeQualityBinding).\n\t\t\tWrites(v1alpha1.CodeQualityBinding{}).\n\t\t\tDoc(\"create a code quality binding\").\n\t\t\tReturns(200, \"OK\", v1alpha1.CodeQualityBinding{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/codequalitybinding/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleUpdateCodeQualityBinding).\n\t\t\tDoc(\"update a code quality binding with name\").\n\t\t\tReturns(200, \"OK\", v1alpha1.CodeQualityBinding{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/codequalitybinding/{namespace}\").\n\t\t\tTo(apiHandler.handleGetCodeQualityBindingList).\n\t\t\tDoc(\"get namespaced code quality binding list\").\n\t\t\tReturns(200, \"OK\", codequalitybinding.CodeQualityBindingList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/codequalitybinding\").\n\t\t\tTo(apiHandler.handleGetCodeQualityBindingList).\n\t\t\tDoc(\"get all code quality binding list\").\n\t\t\tReturns(200, \"OK\", codequalitybinding.CodeQualityBindingList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/codequalitybinding/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleGetCodeQualityBindingDetail).\n\t\t\tDoc(\"get code quality binding with name\").\n\t\t\tReturns(200, \"OK\", v1alpha1.CodeQualityBinding{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/codequalitybinding/{namespace}/{name}/projects\").\n\t\t\tTo(apiHandler.handleGetCodeQualityProjectListInBinding).\n\t\t\tDoc(\"get code quality project list in binding\").\n\t\t\tReturns(200, \"OK\", codequalityproject.CodeQualityProjectList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/codequalitybinding/{namespace}/{name}/secrets\").\n\t\t\tTo(apiHandler.handleGetCodeQualityBindingSecretList).\n\t\t\tDoc(\"get bind secret list\").\n\t\t\tReturns(200, \"OK\", secret.SecretList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.DELETE(\"/codequalitybinding/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleDeleteCodeQualityBinding).\n\t\t\tDoc(\"delete code quality binding with name\").\n\t\t\tReturns(200, \"OK\", common.ResourceList{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/codequalityproject/{namespace}\").\n\t\t\tTo(apiHandler.handleCreateCodeQualityProject).\n\t\t\tDoc(\"create a code quality project\").\n\t\t\tReturns(200, \"OK\", v1alpha1.CodeQualityProject{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/codequalityproject/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleUpdateCodeQualityProject).\n\t\t\tDoc(\"update a code quality project with name\").\n\t\t\tReturns(200, \"OK\", v1alpha1.CodeQualityProject{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/codequalityproject/{namespace}\").\n\t\t\tTo(apiHandler.handleGetCodeQualityProjectList).\n\t\t\tDoc(\"create a code quality project\").\n\t\t\tReturns(200, \"OK\", codequalityproject.CodeQualityProjectList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/codequalityproject/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleGetCodeQualityProjectDetail).\n\t\t\tDoc(\"create a code quality project\").\n\t\t\tReturns(200, \"OK\", v1alpha1.CodeQualityProject{}))\n\n\t//region asm\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/service/{namespace}\").\n\t\t\tTo(apiHandler.handleGetServiceListByProject).\n\t\t\tWrites(resourceService.ServiceNameList{}))\n\t//endregion\n\n\t// region asm\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/servicemesh/graphs/{namespace}\").\n\t\t\tTo(apiHandler.handleGetNamespaceGraph).\n\t\t\tDoc(\"get namespace service graph\").\n\t\t\tReturns(200, \"OK\", servicegraph.Graph{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/servicemesh/metrics\").\n\t\t\tTo(apiHandler.handleGetMetrics).\n\t\t\tDoc(\"get metrics from given options\").\n\t\t\tReturns(200, \"ok\", \"\"))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/servicemesh/nodegraphs\").\n\t\t\tTo(apiHandler.handleGetNodeGraph).\n\t\t\tDoc(\"get namespace service graph\").\n\t\t\tReturns(200, \"OK\", servicegraph.Graph{}))\n\t//endregion\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/microservice/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleGetMicroserviceRelation).\n\t\t\tDoc(\"get microservice deployment and svc relation\"))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/microservice/{namespace}/{name}/service\").\n\t\t\tTo(apiHandler.handleCreateMicroserviceSvc))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/microservice/{namespace}/{name}/service/{servicename}\").\n\t\t\tTo(apiHandler.handleUpdateMicroserviceSvc))\n\n\t// destinationrule\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/destinationrule/{namespace}\").\n\t\t\tTo(apiHandler.handleListDestinationRule).\n\t\t\tDoc(\"get namespace destination rule\"),\n\t)\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/destinationrule/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleGetDestinationRuleDetail),\n\t)\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/destinationruleinfohost/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleGetDestinationRuleInfoHost),\n\t)\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/destinationrule/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleUpdateDestinationRule))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.DELETE(\"/destinationrule/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleDeleteDestinationRule),\n\t)\n\n\t// virtualservice\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/virtualservice/{namespace}\").\n\t\t\tTo(apiHandler.handleListVirtualService))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/virtualservice/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleGetVirtualService))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/virtualservicehost/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleGetVirtualServiceByHost))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/virtualservice/{namespace}\").\n\t\t\tTo(apiHandler.handleCreateVirtualService),\n\t)\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/virtualservice/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleUpdateVirtualService))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.DELETE(\"/virtualservice/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleDeleteVirtualService),\n\t)\n\n\t// Policy\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/policy/{namespace}\").\n\t\t\tTo(apiHandler.handleListPolicy))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/policy/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleGetPolicy))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/policy/{namespace}\").\n\t\t\tTo(apiHandler.handleCreatePolicy),\n\t)\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/policy/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleUpdatePolicy))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.DELETE(\"/policy/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleDeletePolicy),\n\t)\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/gateway/{namespace}\").\n\t\t\tTo(apiHandler.handleListGateways))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/gateway/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleGetGateway))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/gateway/{namespace}\").\n\t\t\tTo(apiHandler.handleCreateGateway))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/gateway/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleUpdateGateway))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.DELETE(\"/gateway/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleDeleteGateway))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/asmclusterconfig/{name}\").\n\t\t\tTo(apiHandler.handleGetASMClusterConfig))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/asmclusterconfig/{name}\").\n\t\t\tTo(apiHandler.handleUpdateASMClusterConfig))\n\t// endregion\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/artifactregistrymanagers\").\n\t\t\tTo(apiHandler.handleCreateArtifactRegistryManager).\n\t\t\tWrites(artifactregistrymanager.ArtifactRegistryManager{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.DELETE(\"/artifactregistrymanagers/{name}\").\n\t\t\tTo(apiHandler.handleDeleteArtifactRegistryManager).\n\t\t\tWrites(artifactregistrymanager.ArtifactRegistryManager{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/artifactregistrymanagers/{name}\").\n\t\t\tTo(apiHandler.handleUpdateArtifactRegistryManager).\n\t\t\tWrites(v1alpha1.ArtifactRegistryManager{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/artifactregistrymanagers\").\n\t\t\tTo(apiHandler.handleGetArtifactRegistryManagerList).\n\t\t\tWrites(artifactregistrymanager.ArtifactRegistryManagerList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/artifactregistrymanagers/{name}\").\n\t\t\tTo(apiHandler.handleGetArtifactRegistryManagerDetail).\n\t\t\tWrites(v1alpha1.ArtifactRegistryManager{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/artifactregistries\").\n\t\t\tTo(apiHandler.handleCreateArtifactRegistry).\n\t\t\tWrites(artifactregistry.ArtifactRegistry{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.DELETE(\"/artifactregistries/{name}\").\n\t\t\tTo(apiHandler.handleDeleteArtifactRegistry).\n\t\t\tWrites(artifactregistry.ArtifactRegistry{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/artifactregistries/{name}\").\n\t\t\tTo(apiHandler.handleUpdateArtifactRegistry).\n\t\t\tWrites(v1alpha1.ArtifactRegistry{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/artifactregistries\").\n\t\t\tTo(apiHandler.handleGetArtifactRegistryList).\n\t\t\tWrites(artifactregistry.ArtifactRegistryList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/artifactregistries/{name}\").\n\t\t\tTo(apiHandler.handleGetArtifactRegistryDetail).\n\t\t\tWrites(v1alpha1.ArtifactRegistry{}))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/artifactregistrybindings\").\n\t\t\tTo(apiHandler.handleCreateArtifactRegistryBinding).\n\t\t\tWrites(artifactregistrybinding.ArtifactRegistryBinding{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/artifactregistrybindings/{namespace}\").\n\t\t\tTo(apiHandler.handleCreateArtifactRegistryBinding).\n\t\t\tWrites(artifactregistrybinding.ArtifactRegistryBinding{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.DELETE(\"/artifactregistrybindings/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleDeleteArtifactRegistryBinding).\n\t\t\tWrites(artifactregistrybinding.ArtifactRegistryBinding{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/artifactregistrybindings/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleUpdateArtifactRegistryBinding).\n\t\t\tWrites(v1alpha1.ArtifactRegistryBinding{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/artifactregistrybindings/{namespace}\").\n\t\t\tTo(apiHandler.handleGetArtifactRegistryBindingList).\n\t\t\tWrites(artifactregistrybinding.ArtifactRegistryBindingList{}))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/artifactregistrybindings/{namespace}/{name}\").\n\t\t\tTo(apiHandler.handleGetArtifactRegistryBindingDetail).\n\t\t\tWrites(v1alpha1.ArtifactRegistryBinding{}))\n\n\t//common route\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/common/{resource}\").\n\t\t\tTo(apiHandler.handlePostCommonResource).\n\t\t\tWrites(make(map[string]interface{})))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.DELETE(\"/common/{resource}/{name}\").\n\t\t\tTo(apiHandler.handleDeleteCommonResource).\n\t\t\tWrites(make(map[string]interface{})))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/common/{resource}/{name}\").\n\t\t\tTo(apiHandler.handlePutCommonResource).\n\t\t\tWrites(make(map[string]interface{})))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/common/{resource}\").\n\t\t\tTo(apiHandler.handleGetCommonResourceList).\n\t\t\tWrites(make(map[string]interface{})))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/common/{resource}/{name}\").\n\t\t\tTo(apiHandler.handleGetCommonResource).\n\t\t\tWrites(make(map[string]interface{})))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/common/namespace/{namespace}/{resource}\").\n\t\t\tTo(apiHandler.handlePostCommonResource).\n\t\t\tWrites(make(map[string]interface{})))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.DELETE(\"/common/namespace/{namespace}/{resource}/{name}\").\n\t\t\tTo(apiHandler.handleDeleteCommonResource).\n\t\t\tWrites(make(map[string]interface{})))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.PUT(\"/common/namespace/{namespace}/{resource}/{name}\").\n\t\t\tTo(apiHandler.handlePutCommonResource).\n\t\t\tWrites(make(map[string]interface{})))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/common/namespace/{namespace}/{resource}\").\n\t\t\tTo(apiHandler.handleGetCommonResourceList).\n\t\t\tWrites(make(map[string]interface{})))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/common/namespace/{namespace}/{resource}/{name}\").\n\t\t\tTo(apiHandler.handleGetCommonResource).\n\t\t\tWrites(make(map[string]interface{})))\n\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/common/{resource}/{name}/sub/{sub}\").\n\t\t\tTo(apiHandler.handleGetCommonResourceSub).\n\t\t\tWrites(make(map[string]interface{})))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/common/{resource}/{name}/sub/{sub}\").\n\t\t\tTo(apiHandler.handlePostCommonResourceSub).\n\t\t\tWrites(make(map[string]interface{})))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.GET(\"/common/namespace/{namespace}/{resource}/{name}/sub/{sub}\").\n\t\t\tTo(apiHandler.handleGetCommonResourceSub).\n\t\t\tWrites(make(map[string]interface{})))\n\tapiV1Ws.Route(\n\t\tapiV1Ws.POST(\"/common/namespace/{namespace}/{resource}/{name}/sub/{sub}\").\n\t\t\tTo(apiHandler.handlePostCommonResourceSub).\n\t\t\tWrites(make(map[string]interface{})))\n\n\tAddAppCoreUrl(apiV1Ws, apiHandler)\n\treturn wsContainer, nil\n}", "func MakeHandler(s IdentityService, logger kitlog.Logger) http.Handler {\n\topts := []kithttp.ServerOption{\n\t\tkithttp.ServerErrorHandler(transport.NewLogErrorHandler(logger)),\n\t\tkithttp.ServerErrorEncoder(encodeError),\n\t}\n\n\tcreateUserHandler := kithttp.NewServer(\n\t\tmakeCreateUserEndpoint(s),\n\t\tdecodeCreateUserRequest,\n\t\tencodeResponse,\n\t\topts...,\n\t)\n\n\tr := mux.NewRouter()\n\n\tr.Handle(\"/v1/users\", createUserHandler).Methods(\"POST\")\n\n\treturn r\n}", "func (a *annotator) makeHandler() http.Handler {\n\tr := httprouter.New()\n\n\tr.Handler(\"GET\", \"/\", http.RedirectHandler(\"/ui/\", http.StatusPermanentRedirect))\n\n\tr.GET(\"/api/dump\", a.dump)\n\tr.GET(\"/api/save\", a.save)\n\n\tr.GET(\"/api/items/:index\", a.getOccurrence)\n\tr.PUT(\"/api/items/:index\", a.putAnswer)\n\n\tr.GET(\"/api/term\", a.getName)\n\tr.GET(\"/api/terms\", a.listNames)\n\n\tr.GET(\"/api/randomindex\", a.randomIndex)\n\tr.GET(\"/api/statistics\", a.statistics)\n\n\tr.GET(\"/ui/*path\", ui)\n\n\treturn r\n}", "func MakeKitHttpHandler(_ context.Context, endpoint endpoint.Endpoint, logger log.Logger) http.Handler {\n\tr := mux.NewRouter()\n\toptions := []kitHttp.ServerOption{\n\t\tkitHttp.ServerErrorLogger(logger),\n\t\tkitHttp.ServerErrorEncoder(encodeError),\n\t\tkitHttp.ServerBefore(\n\t\t\tkitJwt.HTTPToContext(), auth.HTTPToContext(),\n\t\t\tauth.LangHTTPToContext(), auth.AuthorizationHTTPToContext()),\n\t}\n\t//开始初始化路由\n\tfor _, routerMap := range router.Router {\n\t\tr.Methods(routerMap.Method).Path(routerMap.Path).Handler(kitHttp.NewServer(\n\t\t\tendpoint,\n\t\t\tDecodeBasicRequest,\n\t\t\tEncodeBasicResponse,\n\t\t\toptions...,\n\t\t))\n\t}\n\t//结束初始化路由\n\treturn r\n}", "func ApiHandler(w http.ResponseWriter, r *http.Request) {\n\tif r.Method == \"POST\" {\n\t\t// Parsing JSON from request\n\t\tvar req RequestJSON;\n\t\tif err := json.NewDecoder(r.Body).Decode(&req); err != nil {\n\t\t\tlog.Printf(\"ApiHandler: Something went wrong when decoding the JSON object.\\n%v\\n\", err)\n\t\t\thttp.Error(w, \"Bad request\", http.StatusBadRequest)\n\t\t}\n\t\t\n\t\t// Call different handlers depending on type of request\n\t\tswitch req.Request {\n\t\t\tcase \"Timeline\":\n\t\t\t\t// Check the session cookie to see if user is authenticated and session is valid\n\t\t\t\trespCookie := CheckCookie(r)\n\t\t\t\tif respCookie.Status == false {\n\t\t\t\t\tsave(w, &respCookie)\n\t\t\t\t}\n\n\t\t\t\tres := timelineHandler(req)\n\t\t\t\tsave(w, &res)\n\t\t\tcase \"Auth\":\n\t\t\t\tres := loginHandler(req, w, r)\n\t\t\t\tsave(w, &res)\n\t\t\tcase \"User\":\n\t\t\t\t// Check the session cookie to see if user is authenticated and session is valid\n\t\t\t\trespCookie := CheckCookie(r)\n\t\t\t\tif respCookie.Status == false {\n\t\t\t\t\tsave(w, &respCookie)\n\t\t\t\t}\n\n\t\t\t\tres := userHandler(req)\n\t\t\t\tsave(w, &res)\n\t\t}\n\t} else {\n\t\thttp.Error(w, \"This method is not allowed.\", 403)\n\t}\n}", "func MakeHTTPSHandler(s Service) http.Handler {\n\tr := chi.NewRouter()\n\n\t//Obtener personas por su identificador\n\tgetPersonByHandler := kithttp.NewServer(\n\t\tmakeGetPersonByIDEndPoint(s),\n\t\tgetPersonByIDRequestDecoder,\n\t\tkithttp.EncodeJSONResponse,\n\t)\n\tr.Method(http.MethodGet, \"/id/{id}\", getPersonByHandler)\n\n\t//Obtener personas paginadas\n\tgetPersonHandler := kithttp.NewServer(\n\t\tmakeGetPersonsEndPoint(s),\n\t\tgetPersonsRequestDecoder,\n\t\tkithttp.EncodeJSONResponse,\n\t)\n\tr.Method(http.MethodPost, \"/paginated\", getPersonHandler)\n\n\t//Agregar a una persona\n\taddPersonHandler := kithttp.NewServer(\n\t\tmakeAddPersonEndpoint(s),\n\t\taddPersonRequestDecoder,\n\t\tkithttp.EncodeJSONResponse,\n\t)\n\tr.Method(http.MethodPost, \"/insert\", addPersonHandler)\n\n\t//Actualizar personas\n\tupdatePersonHandler := kithttp.NewServer(\n\t\tmakeUpdatePersonEndpoint(s),\n\t\tupdatePersonRequestDecoder,\n\t\tkithttp.EncodeJSONResponse,\n\t)\n\tr.Method(http.MethodPut, \"/update\", updatePersonHandler)\n\n\t//Eliminar PERSONA\n\tdeletePersonHandler := kithttp.NewServer(\n\t\tmakeDeletePersonEndPoint(s),\n\t\tdeletePersonRequestDecoder,\n\t\tkithttp.EncodeJSONResponse,\n\t)\n\tr.Method(http.MethodDelete, \"/delete/{id}\", deletePersonHandler)\n\treturn r\n}", "func MakeHttpHandler(ctx context.Context, endpoints endpoint.StringEndpoints, zipkinTracer *gozipkin.Tracer, logger log.Logger) http.Handler {\n\tr := mux.NewRouter()\n\n\tzipkinServer := zipkin.HTTPServerTrace(zipkinTracer, zipkin.Name(\"http-transport\"))\n\n\toptions := []kithttp.ServerOption{\n\t\tkithttp.ServerErrorLogger(logger),\n\t\tkithttp.ServerErrorEncoder(kithttp.DefaultErrorEncoder),\n\t\tzipkinServer,\n\t}\n\n\tr.Methods(\"POST\").Path(\"/op/{type}/{a}/{b}\").Handler(kithttp.NewServer(\n\t\tendpoints.StringEndpoint,\n\t\tdecodeStringRequest,\n\t\tencodeStringResponse,\n\t\toptions...,\n\t))\n\n\tr.Path(\"/metrics\").Handler(promhttp.Handler())\n\n\t// create health check handler\n\tr.Methods(\"GET\").Path(\"/health\").Handler(kithttp.NewServer(\n\t\tendpoints.HealthCheckEndpoint,\n\t\tdecodeHealthCheckRequest,\n\t\tencodeStringResponse,\n\t\toptions...,\n\t))\n\n\treturn r\n}", "func CreateHandler(config *Config) http.Handler {\n\trouter := httprouter.New()\n\trouter.RedirectTrailingSlash = false\n\n\trouter.GET(\"/\", indexHandler{config: config}.Handle)\n\n\tfor name, pkg := range config.Packages {\n\t\thandle := packageHandler{\n\t\t\tpkgName: name,\n\t\t\tpkg: pkg,\n\t\t\tconfig: config,\n\t\t}.Handle\n\t\trouter.GET(fmt.Sprintf(\"/%s\", name), handle)\n\t\trouter.GET(fmt.Sprintf(\"/%s/*path\", name), handle)\n\t}\n\n\treturn router\n}", "func (ws *WebServer) apiHandler(w http.ResponseWriter, r *auth.AuthenticatedRequest) {\n\n\taction := ws.getAPIAction(r.URL.Path)\n\tdata := r.URL.Query()\n\n\t// This is dirty and should be fixed: log needs ws' log\n\tif action == \"get/log\" {\n\t\tws.getLog(w, ws.controller, data)\n\t\treturn\n\t}\n\n\tapiFunc, ok := API_FUNCMAP[action]\n\tif !ok {\n\t\thttp.Error(w, \"Unrecognized API action:\"+action, http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tresp, err := apiFunc(w, ws.controller, data)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusExpectationFailed)\n\t\treturn\n\t}\n\n\tw.Write(resp)\n}", "func MakeHandler(ts Service, logger kitlog.Logger) http.Handler {\n\tr := mux.NewRouter()\n\n\topts := []kithttp.ServerOption{\n\t\tkithttp.ServerErrorLogger(logger),\n\t\tkithttp.ServerErrorEncoder(encodeError),\n\t}\n\n\ttrackCargoHandler := kithttp.NewServer(\n\t\tmakeTrackCargoEndpoint(ts),\n\t\tdecodeTrackCargoRequest,\n\t\tencodeResponse,\n\t\topts...,\n\t)\n\n\tr.Handle(\"/tracking/v1/cargos/{id}\", trackCargoHandler).Methods(\"GET\")\n\n\treturn r\n}", "func MakeHTTPHandler(endpoints TodoEndpoints) http.Handler {\n\n\toptions := []httptransport.ServerOption{\n\t\t// httptransport.ServerErrorLogger(logger),\n\t\thttptransport.ServerErrorEncoder(encodeError),\n\t}\n\n\tjwtOptions := middleware.JWTOptions{\n\t\tSecret: JWTSecret(),\n\t\tAuthFunc: func(ctx context.Context, claims jwt.MapClaims) (context.Context, error) {\n\t\t\t// verify claims\n\t\t\tif username, ok := claims[\"username\"]; ok {\n\t\t\t\tuserCtx := context.WithValue(ctx, \"username\", username)\n\t\t\t\treturn userCtx, nil\n\t\t\t}\n\t\t\treturn ctx, errors.New(\"No username\")\n\t\t},\n\t}\n\n\tr := chi.NewRouter()\n\tr.Use(chiMiddleware.Logger)\n\tr.Use(chiMiddleware.StripSlashes)\n\tr.Use(middleware.JWT(jwtOptions))\n\tr.Use(middleware.DefaultEtag)\n\tr.Use(chiMiddleware.DefaultCompress)\n\n\ttodoRouter := chi.NewRouter()\n\n\ttodoRouter.Get(\"/\", httptransport.NewServer(\n\t\tendpoints.GetAllForUserEndPoint,\n\t\tdecodeGetRequest,\n\t\tencodeResponse,\n\t\toptions...,\n\t).ServeHTTP)\n\n\ttodoRouter.Get(\"/{id}\", httptransport.NewServer(\n\t\tendpoints.GetByIDEndpoint,\n\t\tdecodeGetByIDRequest,\n\t\tencodeResponse,\n\t\toptions...,\n\t).ServeHTTP)\n\n\ttodoRouter.Post(\"/\", httptransport.NewServer(\n\t\tendpoints.AddEndpoint,\n\t\tdecodeAddRequest,\n\t\tencodeResponse,\n\t\toptions...,\n\t).ServeHTTP)\n\n\ttodoRouter.Put(\"/{id}\", httptransport.NewServer(\n\t\tendpoints.UpdateEndpoint,\n\t\tdecodeUpdateRequest,\n\t\tencodeResponse,\n\t\toptions...,\n\t).ServeHTTP)\n\n\ttodoRouter.Delete(\"/{id}\", httptransport.NewServer(\n\t\tendpoints.DeleteEndpoint,\n\t\tdecodeDeleteRequest,\n\t\tencodeResponse,\n\t\toptions...,\n\t).ServeHTTP)\n\n\tr.Mount(\"/api/todos\", todoRouter)\n\n\treturn r\n}", "func MakeHandler(service Service, logger kitlog.Logger, ml machineLearning.Service) http.Handler {\n\topts := gokit.GetServerOpts(logger, ml)\n\n\talertHandler := kithttp.NewServer(makeAlertEndpoint(service), gokit.DecodeString, gokit.EncodeResponse, opts...)\n\timageAlertHandler := kithttp.NewServer(makeImageAlertEndpoint(service), gokit.DecodeFromBase64, gokit.EncodeResponse, opts...)\n\n\theartbeatAlertHandler := kithttp.NewServer(makeHeartbeatMessageEncpoint(service), gokit.DecodeString, gokit.EncodeResponse, opts...)\n\theartbeatImageHandler := kithttp.NewServer(makeImageHeartbeatEndpoint(service), gokit.DecodeFromBase64, gokit.EncodeResponse, opts...)\n\n\tbusienssAlertHandler := kithttp.NewServer(makeBusinessAlertEndpoint(service), gokit.DecodeString, gokit.EncodeResponse, opts...)\n\n\talertErrorHandler := kithttp.NewServer(makeAlertErrorHandler(service), gokit.DecodeString, gokit.EncodeResponse, opts...)\n\n\tr := mux.NewRouter()\n\n\tr.Handle(\"/alert/\", alertHandler).Methods(\"POST\")\n\tr.Handle(\"/alert/image\", imageAlertHandler).Methods(\"POST\")\n\n\tr.Handle(\"/alert/heartbeat\", heartbeatAlertHandler).Methods(\"POST\")\n\tr.Handle(\"/alert/heartbeat/image\", heartbeatImageHandler).Methods(\"POST\")\n\n\tr.Handle(\"/alert/error\", alertErrorHandler).Methods(\"POST\")\n\n\tr.Handle(\"/alert/business\", busienssAlertHandler).Methods(\"POST\")\n\n\treturn r\n}", "func (api *API) Handle(w http.ResponseWriter, request *Request) {\n\t// Publish the list of resources at root\n\tif request.URL.Path == api.prefix {\n\t\t// TODO alphabetical?\n\t\tresponse := make(map[string]string)\n\t\tfor name, _ := range api.resources {\n\t\t\t// TODO base url? link?\n\t\t\tresponse[name] = fmt.Sprintf(\"%s%s\", api.prefix, name)\n\t\t}\n\t\tw.Header().Set(\"Content-Type\", request.Encoding.MediaType())\n\t\tw.Write(request.Encoding.Encode(response))\n\t\treturn\n\t}\n\n\t// Parse the API parameters and build the request object\n\tresource, params, _ := api.routes.getValue(request.URL.Path)\n\tif resource == nil {\n\t\thttp.NotFound(w, request.Request)\n\t\treturn\n\t}\n\n\t// Build the new argo request instance\n\t// GetEncoder and GetDecoder should live in the argo Request constructor\n\trequest.Params = params\n\n\tvar response Response\n\tvar err *APIError\n\n\t// If there are no parameters\n\tmethod := method(request.Method)\n\tif len(params) == 0 {\n\t\tswitch method {\n\t\tcase GET:\n\t\t\tresponse, err = resource.List(request)\n\t\tcase POST:\n\t\t\tresponse, err = resource.Post(request)\n\t\tdefault:\n\t\t\terr = MetaError(\n\t\t\t\t400,\n\t\t\t\t\"unsupported collection method: %s\",\n\t\t\t\tmethod,\n\t\t\t)\n\t\t}\n\t} else {\n\t\tswitch method {\n\t\tcase GET:\n\t\t\tresponse, err = resource.Get(request)\n\t\tcase PATCH:\n\t\t\tresponse, err = resource.Patch(request)\n\t\tcase DELETE:\n\t\t\tresponse, err = resource.Delete(request)\n\t\tdefault:\n\t\t\terr = MetaError(\n\t\t\t\t400,\n\t\t\t\t\"unsupported item method: %s\",\n\t\t\t\tmethod,\n\t\t\t)\n\t\t}\n\t}\n\tif err != nil {\n\t\terr.Write(w, request.Encoding)\n\t\treturn\n\t}\n\tif response == nil {\n\t\tw.WriteHeader(http.StatusNoContent) // 204\n\t\treturn\n\t}\n\t// Always set the media type\n\tw.Header().Set(\"Content-Type\", request.Encoding.MediaType())\n\tw.Write(request.Encoding.Encode(response))\n}", "func MakeHandler(service Service, logger log.Logger) http.Handler {\n\topts := []khttp.ServerOption{\n\t\tkhttp.ServerErrorLogger(logger),\n\t\tkhttp.ServerErrorEncoder(kit.EncodeError),\n\t}\n\n\troutes := kit.Routes{\n\t\tkit.Route{\n\t\t\tName: \"UUID\",\n\t\t\tMethod: \"GET\",\n\t\t\tPattern: \"/five/v1/uuid\",\n\t\t\tHandler: khttp.NewServer(\n\t\t\t\tmakeUuidEndpoint(service),\n\t\t\t\tdecodeUuidRequest,\n\t\t\t\tkit.Encode,\n\t\t\t\topts...,\n\t\t\t),\n\t\t\tQueries: []string{\n\t\t\t\t\"namespace\",\n\t\t\t\t\"{namespace:[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}}\",\n\t\t\t\t\"name\",\n\t\t\t\t\"{name:*}\",\n\t\t\t},\n\t\t},\n\t}\n\n\treturn kit.AddRoutes(routes...)\n}", "func MakeHTTPHandler(ctx context.Context, endpoint Endpoints, logger log.Logger) http.Handler {\n\n\tr := mux.NewRouter()\n\toptions := []httptransport.ServerOption{\n\t\thttptransport.ServerErrorLogger(logger),\n\t\thttptransport.ServerErrorEncoder(encodeError),\n\t}\n\n\tr.Methods(\"GET\").Path(\"/api/v1/getip/{id}\").Handler(httptransport.NewServer(\n\t\tendpoint.GetBestProxyIPEndpoint,\n\t\tdecodeGetBestProxyIPRequest,\n\t\tencodeResponse,\n\t\toptions...,\n\t))\n\n\t//POST api get best proxy ip\n\tr.Methods(\"POST\").Path(\"/api/v1/bestapi\").Handler(httptransport.NewServer(\n\t\tendpoint.PostBestProxyIPEndpoint,\n\t\tdecodePostBestProxyIPRequest,\n\t\tencodeResponse,\n\t\toptions...,\n\t))\n\n\treturn r\n}", "func ApiHandler() handler.HandlerContract {\n\trepo := repository.NewRepository()\n\tusecase := usecase.NewUsecase(config.Orm, repo)\n\treturn handler.NewHandler(usecase)\n}", "func API(log *log.Logger) http.Handler {\n\t// func NewHandler(logger *log.Logger, db *sqlx.DB) *Handlers {\n\tapp := web.NewApp(log)\n\n\t// Register helloworld endpoint.\n\ts := Specimen{log: log}\n\tapp.TreeMux.Handle(\"GET\", \"/hello\", s.HelloWorld)\n\treturn app\n}", "func MakeHTTPHandler(s Service, logger log.Logger) http.Handler {\n\tr := mux.NewRouter()\n\tendpoints := MakeServerEndpoints(s)\n\toptions := []httptransport.ServerOption{\n\t\thttptransport.ServerErrorLogger(logger),\n\t\thttptransport.ServerErrorEncoder(encodeError),\n\t}\n\n\tr.Methods(\"GET\").Path(\"/_health\").Handler(httptransport.NewServer(\n\t\tendpoints.GetHealthEndpoint,\n\t\tnoopDecodeRequest,\n\t\tencodeResponse,\n\t\toptions...,\n\t))\n\n\tr.Methods(\"GET\").Path(\"/zerglings/\").Handler(httptransport.NewServer(\n\t\tendpoints.GetZerglingsEndpoint,\n\t\tnoopDecodeRequest,\n\t\tencodeResponse,\n\t\toptions...,\n\t))\n\n\tr.Methods(\"POST\").Path(\"/zerglings/\").Handler(httptransport.NewServer(\n\t\tendpoints.PostZerglingsEndpoint,\n\t\tnoopDecodeRequest,\n\t\tencodeResponse,\n\t\toptions...,\n\t))\n\n\tr.Methods(\"GET\").Path(\"/zerglings/{id}\").Handler(httptransport.NewServer(\n\t\tendpoints.GetZerglingByIDEndpoint,\n\t\tdecodeGetZerglingByIDRequest,\n\t\tencodeResponse,\n\t\toptions...,\n\t))\n\n\tr.Methods(\"POST\").Path(\"/zerglings/{id}\").Handler(httptransport.NewServer(\n\t\tendpoints.PostZerglingActionEndpoint,\n\t\tdecodePostZerglingActionRequest,\n\t\tencodeResponse,\n\t\toptions...,\n\t))\n\treturn r\n}", "func Handler(request events.APIGatewayProxyRequest) (events.APIGatewayProxyResponse, error) {\n\tvar (\n\t\tdatas []model.Todo\n\t\terr error\n\t\tb []byte\n\t)\n\n\ttodo := model.NewTodo()\n\tif datas, err = todo.List(); err != nil {\n\t\treturn response.Error(err)\n\t}\n\tif b, err = json.Marshal(datas); err != nil {\n\t\treturn response.Error(err)\n\t}\n\treturn response.Success(string(b))\n}", "func MakeHTTPHandler(endpoints Endpoints, tracer stdopentracing.Tracer,\n\tlogger log.Logger) http.Handler {\n\tr := mux.NewRouter()\n\toptions := []httptransport.ServerOption{\n\t\thttptransport.ServerErrorEncoder(errorEncoder),\n\t}\n\n\tCreateSoireeHTTPHandler(endpoints, tracer, logger, r, options)\n\tDeliverOrderHTTPHandler(endpoints, tracer, logger, r, options)\n\tGetOrderHTTPHandler(endpoints, tracer, logger, r, options)\n\tGetOrdersBySoireeHTTPHandler(endpoints, tracer, logger, r, options)\n\tSearchOrdersHTTPHandler(endpoints, tracer, logger, r, options)\n\tPutOrderHTTPHandler(endpoints, tracer, logger, r, options)\n\tGetSoireesHTTPHandler(endpoints, tracer, logger, r, options)\n\tGetStatHTTPHandler(endpoints, tracer, logger, r, options)\n\tLoginProHTTPHandler(endpoints, tracer, logger, r, options)\n\tRegisterProHTTPHandler(endpoints, tracer, logger, r, options)\n\tCreateEstabHTTPHandler(endpoints, tracer, logger, r, options)\n\tUpdateEstabHTTPHandler(endpoints, tracer, logger, r, options)\n\tUpdateProHTTPHandler(endpoints, tracer, logger, r, options)\n\tCreateConsoHTTPHandler(endpoints, tracer, logger, r, options)\n\tCreateMenuHTTPHandler(endpoints, tracer, logger, r, options)\n\tGetConsoHTTPHandler(endpoints, tracer, logger, r, options)\n\tGetConsoByOrderIDHTTPHandler(endpoints, tracer, logger, r, options)\n\tGetMenuHTTPHandler(endpoints, tracer, logger, r, options)\n\tGetEstablishmentTypeHTTPHandler(endpoints, tracer, logger, r, options)\n\tGetSoireeOrdersHTTPHandler(endpoints, tracer, logger, r, options)\n\tGetProEstablishmentsHTTPHandler(endpoints, tracer, logger, r, options)\n\tGetAnalysePHTTPHandler(endpoints, tracer, logger, r, options)\n\tDeleteEstabHTTPHandler(endpoints, tracer, logger, r, options)\n\tDeleteSoireeHTTPHandler(endpoints, tracer, logger, r, options)\n\n\treturn r\n}", "func Handler(ctx context.Context, req events.APIGatewayProxyRequest) (Response, error) {\n\tvar buf bytes.Buffer\n\n\tvar message string\n\tmessage = req.Path\n\n\tlog.Print(fmt.Sprint(\"Called with path: \", req.Path))\n\tstatusCode := 200\n\n\t// Could use a third party routing library at this point, but being hacky for now\n\titems := strings.Split(req.Path, \"/\")\n\tvar item string\n\tif len(items) > 1 {\n\t\titem = strings.Join(items[2:], \"/\")\n\t}\n\n\t// If we actually have an action to take\n\tif len(items) >= 1 {\n\t\tswitch items[1] {\n\t\tcase \"list\":\n\t\t\titems, err := List()\n\t\t\tif err != nil {\n\t\t\t\tstatusCode = 500\n\t\t\t\tmessage = fmt.Sprint(err)\n\t\t\t} else {\n\t\t\t\tmessage = strings.Join(items, \"\\n\")\n\t\t\t}\n\t\tcase \"add\":\n\t\t\t// Should probably be doing this on PUT or POST only\n\t\t\terr := Add(item)\n\t\t\tif err != nil {\n\t\t\t\tstatusCode = 500\n\t\t\t\tmessage = fmt.Sprint(err)\n\t\t\t} else {\n\t\t\t\tmessage = \"Added\"\n\t\t\t}\n\n\t\tcase \"complete\":\n\t\t\t// Should only be doing this on POST, but demo\n\t\t\terr := Complete(item)\n\t\t\tif err != nil {\n\t\t\t\tstatusCode = 500\n\t\t\t\tmessage = fmt.Sprint(err)\n\t\t\t} else {\n\t\t\t\tmessage = \"Completed\"\n\t\t\t}\n\t\t}\n\t}\n\n\tbody, err := json.Marshal(map[string]interface{}{\n\t\t\"message\": message,\n\t})\n\tif err != nil {\n\t\treturn Response{StatusCode: 404}, err\n\t}\n\tjson.HTMLEscape(&buf, body)\n\n\tresp := Response{\n\t\tStatusCode: statusCode,\n\t\tIsBase64Encoded: false,\n\t\tBody: buf.String(),\n\t\tHeaders: map[string]string{\n\t\t\t\"Content-Type\": \"application/json\",\n\t\t\t\"X-MyCompany-Func-Reply\": \"hello-handler\",\n\t\t},\n\t}\n\n\treturn resp, nil\n}", "func makeRouteHandler() http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tswitch r.Method {\n\t\tcase \"GET\":\n\t\t\thandleGetRequest(w, r)\n\t\tcase \"POST\":\n\t\t\thandlePostRequest(w, r)\n\t\tcase \"PUT\":\n\t\t\thandlePutRequest(w, r)\n\t\tdefault:\n\t\t\tfmt.Fprint(w, \"Invalid request\")\n\t\t}\n\t}\n}", "func MakeHTTPSHandler(s Service) http.Handler {\n\tr := chi.NewRouter()\n\n\t// Registrar curso a la bd\n\taddCursoHandler := kithttp.NewServer(\n\t\tmakeAddCursoEndPoint(s),\n\t\taddCursoRequestDecoder,\n\t\tkithttp.EncodeJSONResponse,\n\t)\n\tr.Method(http.MethodPost, \"/registrar\", addCursoHandler)\n\n\t// Obtene curso por el ID\n\tgetCursoByIDHandler := kithttp.NewServer(\n\t\tmakeGetCursoByIDEndPoint(s),\n\t\tgetCursoByIDRequestDecoder,\n\t\tkithttp.EncodeJSONResponse,\n\t)\n\tr.Method(http.MethodGet, \"/id/{id}\", getCursoByIDHandler)\n\n\t// Actualizar el curso por ID\n\tupdateCursoByIDHandler := kithttp.NewServer(\n\t\tmakeUpdateCursoByIDEndPoint(s),\n\t\tupdateCursoRequestDecoder,\n\t\tkithttp.EncodeJSONResponse,\n\t)\n\tr.Method(http.MethodPut, \"/actualizar\", updateCursoByIDHandler)\n\n\t// Obtener todos los cursos\n\tgetAllCursoByIDHandler := kithttp.NewServer(\n\t\tmakeGetAllCursoEndPoint(s),\n\t\tgetAllCursoRequestDecoder,\n\t\tkithttp.EncodeJSONResponse,\n\t)\n\tr.Method(http.MethodGet, \"/allCurso\", getAllCursoByIDHandler)\n\n\t// Subir Imagen todos los cursos\n\tuploadFondoCursoByIDHandler := kithttp.NewServer(\n\t\tmakeUploadImageCursoEndPoint(s),\n\t\tuploadFondoCursoRequestDecoder,\n\t\tkithttp.EncodeJSONResponse,\n\t)\n\tr.Method(http.MethodPut, \"/fondo/id/{id}\", uploadFondoCursoByIDHandler)\n\n\t// Subir Imagen todos los cursos\n\tgetFondoCursoByIDHandler := kithttp.NewServer(\n\t\tmakeGetFondoCursoEndPoint(s),\n\t\tgetFondoCursoRequestDecoder,\n\t\tEncodeJSONResponseFileImgUpload,\n\t)\n\tr.Method(http.MethodGet, \"/fondo/id/{id}\", getFondoCursoByIDHandler)\n\n\treturn r\n}", "func Handler(ctx context.Context, req events.APIGatewayProxyRequest) (events.APIGatewayProxyResponse, error) {\n\tif httpAdapter == nil {\n\t\tswaggerSpec, err := loads.Embedded(restapi.SwaggerJSON, restapi.FlatSwaggerJSON)\n\t\tif err != nil {\n\t\t\tlog.Fatalln(err)\n\t\t}\n\n\t\tapi := operations.NewExampleAppAPI(swaggerSpec)\n\t\tserver := restapi.NewServer(api)\n\t\tserver.ConfigureAPI()\n\n\t\t// see https://github.com/go-swagger/go-swagger/issues/962#issuecomment-478382896\n\t\thttpAdapter = httpadapter.New(server.GetHandler())\n\t}\n\treturn httpAdapter.ProxyWithContext(ctx, req)\n}", "func Handler(ctx context.Context, request events.APIGatewayProxyRequest,\n\tdynamoDB *dynamodb.DynamoDB, cfg config.Configuration) (\n\tevents.APIGatewayProxyResponse, error) {\n\n\t//Instantiate item API Handler\n\tih, err := item.New(dynamoDB, cfg.AWS.DynamoDB.Table.Store)\n\tif err != nil {\n\t\treturn web.GetResponse(ctx, err.Error(), http.StatusInternalServerError)\n\t}\n\n\tlog.Debug().Msgf(\"Executing method %s for path: %s with body: %v\",\n\t\trequest.HTTPMethod, request.Path, request.Body)\n\n\tswitch request.HTTPMethod {\n\tcase http.MethodGet:\n\n\t\treturn getItems(ctx, request, ih)\n\n\t}\n\n\t//APIGateway would not allow the function to get to this point\n\t//Since all the supported http methods are in the switch\n\treturn web.GetResponse(ctx, struct{}{}, http.StatusMethodNotAllowed)\n\n}", "func NewHandler(ctx context.Context, f HandlerFunc) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tresp := f(r.Context(), w, r)\n\n\t\tif err := resp.Failure(); err != nil {\n\t\t\tlog.From(ctx).Error(\"handling api\", zap.Error(err))\n\t\t}\n\n\t\terr := json.NewEncoder(w).Encode(resp)\n\t\tif err != nil {\n\t\t\tHTTPErr(ctx, w, errors.Wrap(err, \"encoding response\"))\n\t\t}\n\t}\n}", "func API(shutdown chan os.Signal, log *log.Logger) http.Handler {\n\n\t// Construct the web.App which holds all routes as well as common Middleware.\n\tapp := web.NewApp(shutdown, log, webcontext.Env_Dev, mid.Logger(log))\n\n\tapp.Handle(\"GET\", \"/swagger/\", saasSwagger.WrapHandler)\n\tapp.Handle(\"GET\", \"/swagger/*\", saasSwagger.WrapHandler)\n\n\t/*\n\t\tOr can use SaasWrapHandler func with configurations.\n\t\turl := saasSwagger.URL(\"http://localhost:1323/swagger/doc.json\") //The url pointing to API definition\n\t\te.GET(\"/swagger/*\", saasSwagger.SaasWrapHandler(url))\n\t*/\n\n\treturn app\n}", "func Handler(c Config) (http.Handler, error) {\n\th, _, err := HandlerAPI(c)\n\treturn h, err\n}", "func Handler(w http.ResponseWriter, r *http.Request) {\n\treflection := InitReflection()\n\n\treflection.HTTP.TLS = r.TLS\n\treflection.HTTP.Version = r.Proto\n\n\tif body, err := ioutil.ReadAll(r.Body); err != nil {\n\t\tpanic(err)\n\t} else {\n\t\treflection.Request.Body = string(body)\n\t}\n\treflection.Request.Cookies = r.Cookies()\n\tfor key, value := range r.Header {\n\t\treflection.Request.Headers[key] = value\n\t}\n\treflection.Request.Method = r.Method\n\treflection.Request.Params = r.URL.Query()\n\treflection.Request.Path = r.URL.Path\n\n\treflection.Meta.UUID = w.Header().Get(\"Request-ID\")\n\treflection.Meta.Host = r.Host\n\treflection.Meta.RemoteAddr = r.RemoteAddr\n\treflection.Meta.Timestamp = time.Now().Format(\"2006-01-02T15:04:05-0700\")\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\n\tif response, err := json.Marshal(reflection); err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tlog.Println(err)\n\t\tw.Write([]byte(\"\\\"something went wrong\\\"\"))\n\t} else {\n\t\tw.WriteHeader(http.StatusOK)\n\t\tw.Write([]byte(fmt.Sprintf(\"%s\", response)))\n\t}\n}", "func API(cfg *APIConfig) http.Handler {\n\n\t// Construct the App which holds all routes as well as common Middleware.\n\tapp := NewApp(cfg.Shutdown, cfg.Log)\n\n\t// Register health check endpoint. This route is not authenticated.\n\tcheck := api.Check{Log: cfg.Log}\n\tapp.Get(\"/api/v1/health\", check.Health)\n\n\thandlers := api.Handlers{\n\t\tLog: cfg.Log,\n\t\tDemo: cfg.Demo,\n\t\tTokenStore: cfg.TokenStore,\n\t\tStorage: storage.NewAlertStorage(),\n\t}\n\n\t// Main application routes\n\tapp.Route(\"/api/v1\", func(r chi.Router) {\n\t\tr.Use(chiMiddleware.RequestID)\n\t\tr.Use(chiMiddleware.RealIP)\n\t\tr.Use(middleware.Logger(cfg.Log.Desugar()))\n\t\tr.Use(chiMiddleware.Recoverer)\n\t\tr.Use(chiMiddleware.Timeout(60 * time.Second))\n\t\tr.Use(middleware.Tracing)\n\n\t\tr.Group(func(r chi.Router) {\n\t\t\t// check the token for the event collector endpoint, even if reverse-proxy auth is enabled\n\t\t\tr.Use(middleware.CollectorTokenAuth(cfg.TokenStore, cfg.Log))\n\t\t\tr.Route(\"/events\", func(r chi.Router) {\n\t\t\t\tr.Post(\"/\", handlers.CreateEventBatch)\n\t\t\t})\n\t\t})\n\n\t\tr.Group(func(r chi.Router) {\n\t\t\t// these routes are protected via reverse-proxy auth\n\n\t\t\tr.Route(\"/tokens\", func(r chi.Router) {\n\t\t\t\tr.Get(\"/\", handlers.ListTokens)\n\t\t\t\tr.Post(\"/\", handlers.CreateToken)\n\t\t\t\tr.Delete(\"/{tokenID}\", handlers.DeleteToken)\n\t\t\t})\n\n\t\t\tr.Route(\"/alerts\", func(r chi.Router) {\n\t\t\t\tr.Get(\"/\", handlers.ListAlerts)\n\n\t\t\t\tr.Route(\"/{alertID}\", func(r chi.Router) {\n\t\t\t\t\tr.Post(\"/review\", handlers.ReviewAlert)\n\t\t\t\t})\n\t\t\t})\n\t\t})\n\t})\n\n\tapp.Route(\"/\", func(r chi.Router) {\n\t\tstaticHandler := web.AssetHandler(\"/\", \"build\")\n\n\t\tr.Get(\"/\", staticHandler.ServeHTTP)\n\t\tr.Get(\"/*\", staticHandler.ServeHTTP)\n\t})\n\n\treturn app\n}", "func MakeHTTPHandler(svc PodcastManageService, signingString string, logger log.Logger) http.Handler {\n\trouter := mux.NewRouter()\n\tendpoints := MakeServerEndpoints(svc)\n\tserverOptions := []kithttp.ServerOption{\n\t\tkithttp.ServerBefore(kitjwt.HTTPToContext()),\n\t\tkithttp.ServerErrorEncoder(encodeError),\n\t\tkithttp.ServerErrorLogger(logger),\n\t}\n\n\tkf := func(token *jwt.Token) (interface{}, error) {\n\t\treturn []byte(signingString), nil\n\t}\n\tclaimsFetcher := func() jwt.Claims {\n\t\treturn &TokenClaims{}\n\t}\n\tauthMiddleware := kitjwt.NewParser(kf, jwt.SigningMethodHS256, claimsFetcher)\n\n\trouter.Methods(\"POST\").Path(\"/register\").Handler(kithttp.NewServer(\n\t\tendpoints.CreateUserEndpoint,\n\t\tdecodeCreateUserRequest,\n\t\tencodeGenericResponse,\n\t\tserverOptions...,\n\t))\n\n\tgetUserEndpoint := endpoints.GetUserEndpoint\n\tgetUserEndpoint = authMiddleware(getUserEndpoint)\n\trouter.Methods(\"POST\").Path(\"/user\").Handler(kithttp.NewServer(\n\t\tgetUserEndpoint,\n\t\tdecodeGetUserRequest,\n\t\tencodeGenericResponse,\n\t\tserverOptions...,\n\t))\n\trouter.Methods(\"GET\").Path(\"/user/{user}\").Handler(kithttp.NewServer(\n\t\tgetUserEndpoint,\n\t\tdecodeGetUserRequestAlternate,\n\t\tencodeGenericResponse,\n\t\tserverOptions...,\n\t))\n\n\trouter.Methods(\"POST\").Path(\"/podcast\").Handler(kithttp.NewServer(\n\t\tendpoints.GetPodcastDetailsEndpoint,\n\t\tdecodeGetPodcastDetailsRequest,\n\t\tencodeGenericResponse,\n\t\tserverOptions...,\n\t))\n\n\tsubscribeEndpoint := endpoints.SubscribeEndpoint\n\tsubscribeEndpoint = authMiddleware(subscribeEndpoint)\n\trouter.Methods(\"POST\").Path(\"/subscribe\").Handler(kithttp.NewServer(\n\t\tsubscribeEndpoint,\n\t\tdecodeSubscribeRequest,\n\t\tencodeGenericResponse,\n\t\tserverOptions...,\n\t))\n\n\tunsubscribeEndpoint := endpoints.UnsubscribeEndpoint\n\tunsubscribeEndpoint = authMiddleware(unsubscribeEndpoint)\n\trouter.Methods(\"POST\").Path(\"/unsubscribe\").Handler(kithttp.NewServer(\n\t\tunsubscribeEndpoint,\n\t\tdecodeUnsubscribeRequest,\n\t\tencodeGenericResponse,\n\t\tserverOptions...,\n\t))\n\n\tupdatePodcastEndpoint := endpoints.UpdatePodcastEndpoint\n\tupdatePodcastEndpoint = authMiddleware(updatePodcastEndpoint)\n\trouter.Methods(\"POST\").Path(\"/update\").Handler(kithttp.NewServer(\n\t\tupdatePodcastEndpoint,\n\t\tdecodeUpdatePodcastRequest,\n\t\tencodeGenericResponse,\n\t\tserverOptions...,\n\t))\n\n\tsubscriptionsEndpoint := endpoints.GetUserSubscriptionsEndpoint\n\tsubscriptionsEndpoint = authMiddleware(subscriptionsEndpoint)\n\trouter.Methods(\"POST\").Path(\"/subscriptions\").Handler(kithttp.NewServer(\n\t\tsubscriptionsEndpoint,\n\t\tdecodeGetUserSubscriptionsRequest,\n\t\tencodeGenericResponse,\n\t\tserverOptions...,\n\t))\n\n\tsubscriptionEndpoint := endpoints.GetSubscriptionDetailsEndpoint\n\tsubscriptionEndpoint = authMiddleware(subscriptionEndpoint)\n\trouter.Methods(\"POST\").Path(\"/subscription\").Handler(kithttp.NewServer(\n\t\tsubscriptionEndpoint,\n\t\tdecodeGetSubscriptionDetailsRequest,\n\t\tencodeGenericResponse,\n\t\tserverOptions...,\n\t))\n\n\trouter.Methods(\"POST\").Path(\"/login\").Handler(kithttp.NewServer(\n\t\tendpoints.GetTokenEndpoint,\n\t\tdecodeGetTokenRequest,\n\t\tencodeGenericResponse,\n\t\tserverOptions...,\n\t))\n\treturn router\n}", "func makeHandler(fn func(http.ResponseWriter, *http.Request, *Server), s *Server) func(http.ResponseWriter, *http.Request) {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tfn(w, r, s)\n\t}\n}", "func makeHandler(handler func(http.ResponseWriter, *http.Request) error) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\terr := handler(w, r)\n\t\tif err != nil && err != errCustomNil {\n\t\t\tlog.Printf(\"%+v\", err)\n\t\t}\n\t\tif clientError.Code != 0 {\n\t\t\tif r.Method == \"HEAD\" {\n\t\t\t\tw.Header().Set(\"Content-Type\", contentTypeJSON)\n\t\t\t\tw.WriteHeader(clientError.Code)\n\t\t\t} else {\n\t\t\t\tresponseError(w)\n\t\t\t}\n\t\t}\n\t\tclientError.Code = 0\n\t\tclientError.Text = \"\"\n\t}\n}", "func HandlerApi(w http.ResponseWriter, r *http.Request) {\n\thttp.Header.Add(w.Header(), \"content-type\", \"application/json\")\n\tparts := strings.Split(r.URL.Path, \"/\")\n\tif len(parts) == 4 && parts[3] == \"\" {\n\t\tapi := _struct.Information{_struct.Uptime(), _struct.Description, _struct.Version}\n\t\tjson.NewEncoder(w).Encode(api)\n\t} else {\n\t\thttp.Error(w, http.StatusText(404), 404)\n\t}\n}", "func MakeHTTPHandler(s Service, logger log.Logger) http.Handler {\n\n\tr := mux.NewRouter()\n\te := MakeServerEndpoints(s)\n\toptions := []httptransport.ServerOption{\n\t\thttptransport.ServerErrorLogger(logger),\n\t\thttptransport.ServerErrorEncoder(encodeError),\n\t}\n\n\t// POST /projects/ Adds a new Project\n\t// GET /projects/:id Gets a Project\n\t// POST /projects/:id/stacks/\t\tAdd a new Stack to a project\n\t// GET /projects/:id/stacks/\t Get all stacks for a given project\n\n\tr.Methods(\"POST\").Path(\"/projects/\").Handler(httptransport.NewServer(\n\t\te.PostProjectEndpoint,\n\t\tdecodePostProjectRequest,\n\t\tencodeResponse,\n\t\toptions...,\n\t))\n\tr.Methods(\"GET\").Path(\"/projects/{id}\").Handler(httptransport.NewServer(\n\t\te.GetProjectEndpoint,\n\t\tdecodeGetProjectRequest,\n\t\tencodeResponse,\n\t\toptions...,\n\t))\n\tr.Methods(\"POST\").Path(\"/projects/{id}/stacks/\").Handler(httptransport.NewServer(\n\t\te.PostStackEndpoint,\n\t\tdecodePostStackRequest,\n\t\tencodeResponse,\n\t\toptions...,\n\t))\n\tr.Methods(\"GET\").Path(\"/projects/{id}/stacks/\").Handler(httptransport.NewServer(\n\t\te.GetProjectStacksEndpoint,\n\t\tdecodeGetProjectStacksRequest,\n\t\tencodeResponse,\n\t\toptions...,\n\t))\n\n\treturn r\n}", "func NewHandler(s service.Service) http.Handler {\n\tr := mux.NewRouter()\n\t// base handler\n\tbase := alice.New(newSetUserMid(s))\n\t// handler with auth required\n\tauthRequired := base.Append(newAuthRequiredMid)\n\n\th := &handler{s}\n\n\t// r.PathPrefix(\"/images\").Handler(httputil.NewSingleHostReverseProxy(proxyURL))\n\tr.Handle(\"/v1/login\", base.Then(errHandler(h.register))).Methods(http.MethodPost)\n\tr.Handle(\"/v1/me\", authRequired.Then(errHandler(h.me))).Methods(http.MethodGet)\n\tr.Handle(\"/v1/me\", authRequired.Then(errHandler(h.update))).Methods(http.MethodPatch)\n\tr.Handle(\"/v1/me/reacts\", authRequired.Then(errHandler(h.react))).Methods(http.MethodPost)\n\tr.Handle(\"/v1/me/abuses\", authRequired.Then(errHandler(h.reportAbuse))).Methods(http.MethodPost)\n\n\tr.Handle(\"/v1/me/discover-people\", authRequired.Then(errHandler(h.discoverPeople))).Methods(http.MethodGet)\n\n\tr.Handle(\"/v1/me/pictures\", authRequired.Then(errHandler(h.uploadPicture))).Methods(http.MethodPost)\n\tr.Handle(\"/v1/me/pictures\", authRequired.Then(errHandler(h.pictures))).Methods(http.MethodGet)\n\tr.Handle(\"/v1/me/pictures/{id}\", authRequired.Then(errHandler(h.deletePicture))).Methods(http.MethodDelete)\n\tr.Handle(\"/v1/me/pictures/{id}/profile\", authRequired.Then(errHandler(h.setProfilePicture))).Methods(http.MethodPut)\n\n\treturn r\n}", "func MakeXenditHttpHandler(ctx context.Context, endpointXendit endpointXendit.XenditEndpoints, logger log.Logger) http.Handler {\n\tr := mux.NewRouter()\n\toptions := []httpTransport.ServerOption{\n\t\thttpTransport.ServerErrorLogger(logger),\n\t\thttpTransport.ServerErrorEncoder(modelHttpXendit.EncodeError),\n\t}\n\n\tapiV1 := r.PathPrefix(\"/api/v1\").Subrouter()\n\n\t//1. POST - Create disbursement\n\tapiV1.Methods(\"POST\").Path(\"/disbursements\").Handler(httpTransport.NewServer(\n\t\tendpointXendit.CreateDisbursementEndpoint,\n\t\tmodelHttpXendit.DecodeCreateDisbursementRequest,\n\t\tmodelHttpXendit.EncodeResponse,\n\t\toptions...,\n\t))\n\n\t//2. GET - Get disbursement by id\n\tapiV1.Methods(\"GET\").Path(\"/disbursements/GetDisbursementById\").Handler(httpTransport.NewServer(\n\t\tendpointXendit.GetDisbursementByIdEndpoint,\n\t\tmodelHttpXendit.DecodeGetDisbursementByIdRequest,\n\t\tmodelHttpXendit.EncodeResponse,\n\t\toptions...,\n\t))\n\n\t//3. GET - Get disbursement by external_id\n\tapiV1.Methods(\"GET\").Path(\"/disbursements/GetDisbursementByExternalID\").Handler(httpTransport.NewServer(\n\t\tendpointXendit.GetDisbursementByExternalIDEndpoint,\n\t\tmodelHttpXendit.DecodeGetDisbursementByExternalIDRequest,\n\t\tmodelHttpXendit.EncodeResponse,\n\t\toptions...,\n\t))\n\n\t//4. POST - Disbursement callback\n\tapiV1.Methods(\"POST\").Path(\"/disbursement_callback_url\").Handler(httpTransport.NewServer(\n\t\tendpointXendit.DisbursementCallbackEndpoint,\n\t\tmodelHttpXendit.DecodeDisbursementCallbackRequest,\n\t\tmodelHttpXendit.EncodeResponse,\n\t\toptions...,\n\t))\n\n\t//5. POST - Create batch disbursement\n\tapiV1.Methods(\"POST\").Path(\"/batch_disbursements\").Handler(httpTransport.NewServer(\n\t\tendpointXendit.CreateBulkDisbursementEndpoint,\n\t\tmodelHttpXendit.DecodeCreateBulkDisbursementRequest,\n\t\tmodelHttpXendit.EncodeResponse,\n\t\toptions...,\n\t))\n\n\t//6. POST - Batch Disbursement callback\n\tapiV1.Methods(\"POST\").Path(\"/batch_disbursement_callback_url\").Handler(httpTransport.NewServer(\n\t\tendpointXendit.BulkDisbursementCallbackEndpoint,\n\t\tmodelHttpXendit.DecodeBulkDisbursementCallbackRequest,\n\t\tmodelHttpXendit.EncodeResponse,\n\t\toptions...,\n\t))\n\n\treturn apiV1\n}", "func NewHandler(db moira.Database, log moira.Logger, index moira.Searcher, config *api.Config, metricSourceProvider *metricSource.SourceProvider, webConfigContent []byte) http.Handler {\n\tdatabase = db\n\tsearchIndex = index\n\trouter := chi.NewRouter()\n\trouter.Use(render.SetContentType(render.ContentTypeJSON))\n\trouter.Use(moiramiddle.UserContext)\n\trouter.Use(moiramiddle.RequestLogger(log))\n\trouter.Use(middleware.NoCache)\n\n\trouter.NotFound(notFoundHandler)\n\trouter.MethodNotAllowed(methodNotAllowedHandler)\n\n\trouter.Route(\"/api\", func(router chi.Router) {\n\t\trouter.Use(moiramiddle.DatabaseContext(database))\n\t\trouter.Get(\"/config\", getWebConfig(webConfigContent))\n\t\trouter.Route(\"/user\", user)\n\t\trouter.With(moiramiddle.Triggers(config.LocalMetricTTL, config.RemoteMetricTTL)).Route(\"/trigger\", triggers(metricSourceProvider, searchIndex))\n\t\trouter.Route(\"/tag\", tag)\n\t\trouter.Route(\"/pattern\", pattern)\n\t\trouter.Route(\"/event\", event)\n\t\trouter.Route(\"/contact\", contact)\n\t\trouter.Route(\"/subscription\", subscription)\n\t\trouter.Route(\"/notification\", notification)\n\t\trouter.Route(\"/health\", health)\n\t\trouter.Route(\"/teams\", teams)\n\t})\n\tif config.EnableCORS {\n\t\treturn cors.AllowAll().Handler(router)\n\t}\n\treturn router\n}", "func (myAPI API) getHandler() http.Handler {\n\t// Add logging if there is a logWriter defined\n\tvar handler http.Handler = myAPI.router\n\tif myAPI.logWriter != nil {\n\t\thandler = handlers.LoggingHandler(myAPI.logWriter, myAPI.router)\n\t}\n\n\t// Add CORS stuff\n\thandler = handlers.CORS(\n\t\thandlers.AllowedOrigins([]string{\"*\"}),\n\t\thandlers.AllowedMethods([]string{\"GET\", \"POST\", \"PUT\", \"DELETE\"}),\n\t\thandlers.AllowedHeaders([]string{\"Content-Type\"}),\n\t)(handler)\n\n\treturn handler\n}", "func SetupHandlers(r *mux.Router) {\n\t//object operations\n\tr.HandleFunc(\"/v1/file/upload/{allocation}\", common.UserRateLimit(common.ToJSONResponse(WithConnection(UploadHandler))))\n\tr.HandleFunc(\"/v1/file/download/{allocation}\", common.UserRateLimit(common.ToByteStream(WithConnection(DownloadHandler)))).Methods(\"POST\")\n\tr.HandleFunc(\"/v1/file/rename/{allocation}\", common.UserRateLimit(common.ToJSONResponse(WithConnection(RenameHandler))))\n\tr.HandleFunc(\"/v1/file/copy/{allocation}\", common.UserRateLimit(common.ToJSONResponse(WithConnection(CopyHandler))))\n\tr.HandleFunc(\"/v1/file/attributes/{allocation}\", common.UserRateLimit(common.ToJSONResponse(WithConnection(UpdateAttributesHandler))))\n\tr.HandleFunc(\"/v1/dir/{allocation}\", common.UserRateLimit(common.ToJSONResponse(WithConnection(CreateDirHandler)))).Methods(\"POST\")\n\tr.HandleFunc(\"/v1/dir/{allocation}\", common.UserRateLimit(common.ToJSONResponse(WithConnection(CreateDirHandler)))).Methods(\"DELETE\")\n\tr.HandleFunc(\"/v1/dir/rename/{allocation}\", common.UserRateLimit(common.ToJSONResponse(WithConnection(CreateDirHandler)))).Methods(\"POST\")\n\n\tr.HandleFunc(\"/v1/connection/commit/{allocation}\", common.UserRateLimit(common.ToJSONResponse(WithConnection(CommitHandler))))\n\tr.HandleFunc(\"/v1/file/commitmetatxn/{allocation}\", common.UserRateLimit(common.ToJSONResponse(WithConnection(CommitMetaTxnHandler))))\n\tr.HandleFunc(\"/v1/file/collaborator/{allocation}\", common.UserRateLimit(common.ToJSONResponse(WithConnection(CollaboratorHandler))))\n\tr.HandleFunc(\"/v1/file/calculatehash/{allocation}\", common.UserRateLimit(common.ToJSONResponse(WithConnection(CalculateHashHandler))))\n\n\t//object info related apis\n\tr.HandleFunc(\"/allocation\", common.UserRateLimit(common.ToJSONResponse(WithConnection(AllocationHandler))))\n\tr.HandleFunc(\"/v1/file/meta/{allocation}\", common.UserRateLimit(common.ToJSONResponse(WithReadOnlyConnection(FileMetaHandler))))\n\tr.HandleFunc(\"/v1/file/stats/{allocation}\", common.UserRateLimit(common.ToJSONResponse(WithReadOnlyConnection(FileStatsHandler))))\n\tr.HandleFunc(\"/v1/file/list/{allocation}\", common.UserRateLimit(common.ToJSONResponse(WithReadOnlyConnection(ListHandler))))\n\tr.HandleFunc(\"/v1/file/objectpath/{allocation}\", common.UserRateLimit(common.ToJSONResponse(WithReadOnlyConnection(ObjectPathHandler))))\n\tr.HandleFunc(\"/v1/file/referencepath/{allocation}\", common.UserRateLimit(common.ToJSONResponse(WithReadOnlyConnection(ReferencePathHandler))))\n\tr.HandleFunc(\"/v1/file/objecttree/{allocation}\", common.UserRateLimit(common.ToJSONResponse(WithReadOnlyConnection(ObjectTreeHandler))))\n\tr.HandleFunc(\"/v1/file/refs/{allocation}\", common.UserRateLimit(common.ToJSONResponse(WithReadOnlyConnection(RefsHandler)))).Methods(\"GET\")\n\t//admin related\n\tr.HandleFunc(\"/_debug\", common.UserRateLimit(common.ToJSONResponse(DumpGoRoutines)))\n\tr.HandleFunc(\"/_config\", common.UserRateLimit(common.ToJSONResponse(GetConfig)))\n\tr.HandleFunc(\"/_stats\", common.UserRateLimit(stats.StatsHandler))\n\tr.HandleFunc(\"/_statsJSON\", common.UserRateLimit(common.ToJSONResponse(stats.StatsJSONHandler)))\n\tr.HandleFunc(\"/_cleanupdisk\", common.UserRateLimit(common.ToJSONResponse(WithReadOnlyConnection(CleanupDiskHandler))))\n\tr.HandleFunc(\"/getstats\", common.UserRateLimit(common.ToJSONResponse(stats.GetStatsHandler)))\n\n\t//marketplace related\n\tr.HandleFunc(\"/v1/marketplace/shareinfo/{allocation}\", common.UserRateLimit(common.ToJSONResponse(WithConnection(MarketPlaceShareInfoHandler))))\n}", "func Handler() http.Handler {\n\tapiMux := http.NewServeMux()\n\treturn apiMux\n}", "func New(o *Options, logger common.Logger, broker *broker.Broker) *Handler {\n\trouter := route.New()\n\n\th := &Handler{\n\t\toptions: o,\n\t\tlogger: logger,\n\t\trouter: router,\n\t\tbroker: broker,\n\t}\n\n\t// Setup HTTP endpoints\n\trouter.Get(\"/\", func(w http.ResponseWriter, r *http.Request) {\n\t\thttp.Redirect(w, r, \"/overview\", http.StatusFound)\n\t})\n\trouter.Get(\"/overview\", h.handleOverview)\n\trouter.Get(\"/logs\", func(w http.ResponseWriter, r *http.Request) {\n\t\thttp.Redirect(w, r, \"/logs/*\", http.StatusFound)\n\t})\n\trouter.Get(\"/logs/:pattern\", h.handleLogs)\n\trouter.Get(\"/request\", h.handleRequest)\n\trouter.Post(\"/request\", h.handleRequestPost)\n\n\t// Static files\n\trouter.Get(\"/static/*filepath\", route.FileServe(path.Join(o.AssetsPath, \"static\")))\n\n\t// Prometheus HTTP endpoint\n\trouter.Get(\"/metrics\", promhttp.HandlerFor(prometheus.Gatherers{prometheus.DefaultGatherer, broker.Monitoring.Registry}, promhttp.HandlerOpts{}).ServeHTTP)\n\n\t// cellaserv HTTP API\n\trouter.Get(\"/api/v1/request/:service/:method\", h.apiRequest)\n\trouter.Post(\"/api/v1/request/:service/:method\", h.apiRequest)\n\trouter.Post(\"/api/v1/publish/:event\", h.apiPublish)\n\trouter.Get(\"/api/v1/subscribe/:event\", h.apiSubscribe)\n\t// TODO(halfr): spy\n\n\t// Go debug\n\trouter.Get(\"/debug/*subpath\", handleDebug)\n\trouter.Post(\"/debug/*subpath\", handleDebug)\n\n\treturn h\n}", "func MakeHandlers(r *mux.Router) {\n\tservice := makeGorm()\n\n\tr.Handle(\"/v1/api/login\", handlers.Login(service)).Methods(\"POST\")\n}", "func setuphandlers() {\n\thttp.HandleFunc(\"/\", rootHandler)\n\thttp.HandleFunc(\"/status\", statusHandler)\n\thttp.HandleFunc(\"/stats\", statsHandler)\n\thttp.HandleFunc(\"/request\", requestHandler)\n}", "func make(metricsName string, h util.JSONRequestHandler) http.Handler {\n\treturn prometheus.InstrumentHandler(metricsName, util.MakeJSONAPI(h))\n}", "func Handler(opts ...Option) http.Handler {\n\treturn handlerFrom(compile(opts))\n}", "func Handler(request events.APIGatewayProxyRequest) (events.APIGatewayProxyResponse, error) {\n\n\tlog.Printf(\"Processing Lambda request %s\\n\", request.RequestContext.RequestID)\n\n\tuser, err := apigateway.GetOrCreateAuthenticatedUser(context.TODO(), &request)\n\tif err != nil {\n\t\treturn apigateway.ResponseUnsuccessful(401), errAuth\n\t}\n\n\tr := &SetUpRequest{}\n\terr = json.Unmarshal([]byte(request.Body), r)\n\tif err != nil {\n\t\tfmt.Printf(\"Could not parse body: %v.\\n\", request.Body)\n\t\treturn events.APIGatewayProxyResponse{StatusCode: 500}, errParse\n\t}\n\n\tid, ok := request.PathParameters[\"id\"]\n\tif !ok || id == \"\" {\n\t\treturn apigateway.ResponseUnsuccessful(400), errMissingParameter\n\t}\n\n\tobjID, err := primitive.ObjectIDFromHex(id)\n\tif err != nil {\n\t\treturn apigateway.ResponseUnsuccessful(400), errInvalidParameter\n\t}\n\n\tsetup, err := theilliminationgame.LoadGameSetUp(&objID)\n\tif err != nil {\n\t\tfmt.Printf(\"Error finding games: '%v'.\\n\", err)\n\t\treturn apigateway.ResponseUnsuccessful(500), err\n\t}\n\n\tvar result string\n\n\tif r.UpdateType == \"join\" {\n\t\tsetup.JoinGame(user)\n\t} else if r.UpdateType == \"option_add\" {\n\t\tresult = string(setup.AddOption(user, r.Option))\n\t} else if r.UpdateType == \"detailedoption_add\" {\n\t\tresult = string(setup.AddDetailedOption(user, r.Option, r.Description, r.Link))\n\t} else if r.UpdateType == \"option_update\" {\n\t\tif r.Updates == nil {\n\t\t\treturn apigateway.ResponseUnsuccessfulString(400, \"No Updates\"), err\n\t\t}\n\t\tsetup.UpdateOption(user, r.OptionIndex, r.Updates)\n\t} else if r.UpdateType == \"deactivate\" {\n\t\tsetup.Deactivate(user)\n\t} else if r.UpdateType == \"addtag\" {\n\t\tsetup.AddTag(user, *r.Tag)\n\t} else if r.UpdateType == \"removetag\" {\n\t\tsetup.RemoveTag(user, *r.Tag)\n\t} else {\n\t\tresult = \"Unknown update type\"\n\t}\n\n\tsetup, _ = theilliminationgame.LoadGameSetUp(&objID)\n\n\tresponse := &GameUpdateResponse{\n\t\tResult: result,\n\t\tGame: setup.Summary(user),\n\t}\n\n\tresp := apigateway.ResponseSuccessful(response)\n\treturn resp, nil\n}", "func apiHandler(c *RequestContext, w http.ResponseWriter, r *http.Request) (int, error) {\n\tif r.URL.Path == \"/auth/get\" {\n\t\treturn authHandler(c, w, r)\n\t}\n\n\tif r.URL.Path == \"/auth/renew\" {\n\t\treturn renewAuthHandler(c, w, r)\n\t}\n\n\tvalid, _ := validateAuth(c, r)\n\tif !valid {\n\t\treturn http.StatusForbidden, nil\n\t}\n\n\tc.Router, r.URL.Path = splitURL(r.URL.Path)\n\n\tif !c.User.Allowed(r.URL.Path) {\n\t\treturn http.StatusForbidden, nil\n\t}\n\n\tfor _, p := range c.FM.Plugins {\n\t\tcode, err := p.BeforeAPI(c, w, r)\n\t\tif code != 0 || err != nil {\n\t\t\treturn code, err\n\t\t}\n\t}\n\n\tif c.Router == \"checksum\" || c.Router == \"download\" {\n\t\tvar err error\n\t\tc.FI, err = getInfo(r.URL, c.FM, c.User)\n\t\tif err != nil {\n\t\t\treturn errorToHTTP(err, false), err\n\t\t}\n\t}\n\n\tvar code int\n\tvar err error\n\n\tswitch c.Router {\n\tcase \"download\":\n\t\tcode, err = downloadHandler(c, w, r)\n\tcase \"checksum\":\n\t\tcode, err = checksumHandler(c, w, r)\n\tcase \"command\":\n\t\tcode, err = command(c, w, r)\n\tcase \"search\":\n\t\tcode, err = search(c, w, r)\n\tcase \"resource\":\n\t\tcode, err = resourceHandler(c, w, r)\n\tcase \"users\":\n\t\tcode, err = usersHandler(c, w, r)\n\tcase \"commands\":\n\t\tcode, err = commandsHandler(c, w, r)\n\tcase \"plugins\":\n\t\tcode, err = pluginsHandler(c, w, r)\n\tdefault:\n\t\tcode = http.StatusNotFound\n\t}\n\n\tif code >= 300 || err != nil {\n\t\treturn code, err\n\t}\n\n\tfor _, p := range c.FM.Plugins {\n\t\tcode, err := p.AfterAPI(c, w, r)\n\t\tif code != 0 || err != nil {\n\t\t\treturn code, err\n\t\t}\n\t}\n\n\treturn code, err\n}", "func makeControllerHandler(manager *Manager, server *socketio.Server, nsp string, controllable Controllable) *ControllerHandler {\n\thandler := &ControllerHandler{\n\t\tmanager: manager,\n\t\tserver: server,\n\t\tnsp: nsp,\n\t\tcontroller: controllable,\n\t}\n\thandler.actionsHandler = makeActionsHandler(manager, handler)\n\n\thandler.controller.RegisterActions(handler.actionsHandler)\n\n\treturn handler\n}", "func APILogHandler(c echo.Context, req, res []byte) {\n\tc.Response().Header().Set(\"X-mobileloket-ResponseTime\", time.Now().Format(time.RFC3339))\n\treqTime, err := time.Parse(time.RFC3339, c.Request().Header.Get(\"X-mobileloket-RequestTime\"))\n\tvar elapstime time.Duration\n\tif err == nil {\n\t\telapstime = time.Since(reqTime)\n\t}\n\n\tvar handler string\n\tr := c.Echo().Routes()\n\tcpath := strings.Replace(c.Path(), \"/\", \"\", -1)\n\tfor _, v := range r {\n\t\tvpath := strings.Replace(v.Path, \"/\", \"\", -1)\n\t\tif vpath == cpath && v.Method == c.Request().Method {\n\t\t\thandler = v.Name\n\t\t\t// Handler for wrong route.\n\t\t\tif strings.Contains(handler, \"func1\") {\n\t\t\t\thandler = \"UndefinedRoute\"\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// Get Handler Name\n\tdir, file := path.Split(handler)\n\tfileStrings := strings.Split(file, \".\")\n\tpackHandler := dir + fileStrings[0]\n\tfuncHandler := strings.Replace(handler, packHandler+\".\", \"\", -1)\n\n\trespHeader, _ := json.Marshal(c.Response().Header())\n\treqHeader := httpdump.DumpRequest(c.Request())\n\n\tInfo().\n\t\tStr(\"Identifier\", viper.GetString(\"log_identifier\")+\"_http\").\n\t\tStr(\"package\", packHandler).\n\t\tInt64(\"elapsed_time\", elapstime.Nanoseconds()/int64(time.Millisecond)).\n\t\tStr(\"handler\", funcHandler).\n\t\tStr(\"ip\", c.RealIP()).\n\t\tStr(\"host\", c.Request().Host).\n\t\tStr(\"method\", c.Request().Method).\n\t\tStr(\"url\", c.Request().RequestURI).\n\t\tStr(\"request_time\", c.Request().Header.Get(\"X-mobileloket-RequestTime\")).\n\t\tStr(\"request_header\", reqHeader).\n\t\tStr(\"request\", string(req)).\n\t\tInt(\"httpcode\", c.Response().Status).\n\t\tStr(\"response_time\", c.Response().Header().Get(\"X-mobileloket-ResponseTime\")).\n\t\tStr(\"response_header\", string(respHeader)).\n\t\tStr(\"response\", string(res)).\n\t\tMsg(\"\")\n}", "func (s *Server) Handler() http.Handler {\n\trouter := chi.NewRouter()\n\trouter.Use(server.RecoverMiddleware)\n\trouter.Use(cors.New(corsOptions).Handler)\n\n\tif !s.conf.separateClientServer() {\n\t\t// Mount server for irmaclient\n\t\ts.attachClientEndpoints(router)\n\t}\n\n\tlog := server.LogOptions{Response: true, Headers: true, From: true}\n\trouter.NotFound(server.LogMiddleware(\"requestor\", log)(router.NotFoundHandler()).ServeHTTP)\n\trouter.MethodNotAllowed(server.LogMiddleware(\"requestor\", log)(router.MethodNotAllowedHandler()).ServeHTTP)\n\n\t// Group main API endpoints, so we can attach our request/response logger to it\n\t// while not adding it to the endpoints already added above (which do their own logging).\n\n\trouter.Group(func(r chi.Router) {\n\t\tr.Use(server.SizeLimitMiddleware)\n\t\tr.Use(server.TimeoutMiddleware([]string{\"/statusevents\"}, server.WriteTimeout))\n\t\tr.Use(cors.New(corsOptions).Handler)\n\t\tr.Use(server.LogMiddleware(\"requestor\", log))\n\n\t\t// Server routes\n\t\tr.Route(\"/session\", func(r chi.Router) {\n\t\t\tr.Post(\"/\", s.handleCreateSession)\n\t\t\tr.Route(\"/{requestorToken}\", func(r chi.Router) {\n\t\t\t\tr.Use(s.tokenMiddleware)\n\t\t\t\tr.Delete(\"/\", s.handleDelete)\n\t\t\t\tr.Get(\"/status\", s.handleStatus)\n\t\t\t\tr.Get(\"/statusevents\", s.handleStatusEvents)\n\t\t\t\tr.Get(\"/result\", s.handleResult)\n\t\t\t\t// Routes for getting signed JWTs containing the session result. Only work if configuration has a private key\n\t\t\t\tr.Get(\"/result-jwt\", s.handleJwtResult)\n\t\t\t\tr.Get(\"/getproof\", s.handleJwtProofs) // irma_api_server-compatible JWT\n\t\t\t})\n\t\t})\n\n\t\tr.Get(\"/publickey\", s.handlePublicKey)\n\t})\n\n\trouter.Group(func(r chi.Router) {\n\t\tr.Use(server.SizeLimitMiddleware)\n\t\tr.Use(server.TimeoutMiddleware(nil, server.WriteTimeout))\n\t\tr.Use(cors.New(corsOptions).Handler)\n\t\tr.Use(server.LogMiddleware(\"revocation\", log))\n\t\tr.Post(\"/revocation\", s.handleRevocation)\n\t})\n\n\treturn s.prefixRouter(router)\n}", "func handleAPI(at, jsonfile string) {\n\ttl := loadlib(jsonfile)\n\n\tif !strings.HasSuffix(at, \"/\") {\n\t\tat += \"/\"\n\t}\n\thandler := responders.DannResponder(tl)\n\n\thttp.Handle(at, http.StripPrefix(at, handler))\n}", "func New(this *Handler) *Handler {\n\ta := &API{Cfg: this.Cfg, Interactor: this.Interactor}\n\troutes(a).Register()\n\treturn this\n}", "func API() http.Handler {\n\n\t// Look at /kit/web/midware for middleware options.\n\ta := app.New()\n\n\t// Initialize the routes for the API.\n\ta.Handle(\"GET\", \"/1.0/test/names\", handlers.Test.List)\n\n\treturn a\n}", "func makeHandler(env env, fn func(http.ResponseWriter, *http.Request, env)) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tfn(w, r, env)\n\t}\n}", "func generateHandler(w http.ResponseWriter, r *http.Request) {\n\tpath := r.URL.Path[1:]\n\trequest := urlParser.ParseURL(path)\n\n\ttableName := request.TableName\n\tfields := request.Fields\n\n\t//only valid names are existing tables in db\n\tif !structs.ValidStruct[tableName] {\n\t\tfmt.Printf(\"\\\"%s\\\" table not found.\\n\", tableName)\n\n\t\thttp.NotFound(w, r)\n\t} else {\n\t\tfmt.Printf(\"\\\"%s\\\" table found.\\n\", tableName)\n\n\t\trows := sqlParser.GetRows(tableName, fields)\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\n\t\tif fields == \"\" {\n\t\t\tfmt.Printf(\"No fields\\n\")\n\t\t\tstructs.MapTableToJson(tableName, rows, w)\n\t\t} else {\n\t\t\tfmt.Printf(\"%s\\n\", fields)\n\t\t\tfieldArray := strings.Split(fields, \",\")\n\t\t\tstructFilter.MapCustomTableToJson(tableName, rows, w, fieldArray)\n\t\t}\n\t}\n}", "func Handler(config *config.ProxyConfig, cli *http.Client) http.HandlerFunc {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tstart := time.Now()\n\t\tproxyReq, reqSize, err := createRequest(r, config)\n\t\tif err != nil {\n\t\t\tHandleError(w, r, err.Error(), config.PrintLogs)\n\t\t\treturn\n\t\t}\n\n\t\trs, err := cli.Do(proxyReq)\n\t\tif err != nil {\n\t\t\tHandleError(w, r, fmt.Sprintf(\"[%s] error performing request to target: \"+err.Error(), config.ProxyName), config.PrintLogs)\n\t\t\treturn\n\t\t}\n\n\t\trsSize, err := writeResponse(w, rs, config)\n\t\tif err != nil {\n\t\t\tHandleError(w, r, err.Error(), config.PrintLogs)\n\t\t\treturn\n\t\t}\n\n\t\tif config.PrintLogs {\n\t\t\tPrintLog(start, reqSize, rsSize, r, config.CompactLogs)\n\t\t}\n\t})\n}", "func Handler(ctx context.Context, payload events.APIGatewayProxyRequest) (events.APIGatewayProxyResponse, error) {\n\tvar resp []byte\n\n\tswitch path := payload.Path; path {\n\tcase PRODUCT_PATH:\n\t\tswitch method := payload.HTTPMethod; method {\n\t\tcase GET:\n\t\t\tfmt.Printf(\"GET method for products.\\n\")\n\t\tcase POST:\n\t\t\tnewStock, err := event.CreateStock(payload.Body)\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\n\t\t\tresp, _ = json.Marshal(newStock)\n\t\tcase PUT:\n\t\t\tfmt.Printf(\"PUT method for products.\\n\")\n\t\tcase DELETE:\n\t\t\tfmt.Printf(\"DELETE method for products.\\n\")\n\t\t}\n\n\tcase STORE_PATH:\n\t\tswitch method := payload.HTTPMethod; method {\n\t\tcase GET:\n\t\t\tfmt.Printf(\"GET method for stocks.\\n\")\n\t\tcase POST:\n\t\t\tnewStockLoc, err := event.CreateStockLocation(payload.Body)\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\n\t\t\tresp, _ = json.Marshal(newStockLoc)\n\t\tcase PUT:\n\t\t\tfmt.Printf(\"PUT method for stocks.\\n\")\n\t\tcase DELETE:\n\t\t\tfmt.Printf(\"DELETE method for stocks.\\n\")\n\t\t}\n\tdefault:\n\t\tfmt.Printf(\"panik: %s.\\n\", path)\n\t}\n\n\treturn events.APIGatewayProxyResponse{\n\t\tBody: string(resp),\n\t\tStatusCode: 200,\n\t}, nil\n}", "func BuildHandler(logger log.Logger, db *dbcontext.DB, cfg *config.Config) http.Handler {\n\trouter := routing.New()\n\n\trouter.Use(\n\t\terrors.Handler(logger),\n\t\tcontent.TypeNegotiator(content.JSON),\n\t\tcors.Handler(cors.AllowAll),\n\t)\n\n\trg := router.Group(\"\")\n\n\tcityRepo := city.NewRepository(db, logger)\n\n\tcity.RegisterHandlers(rg,\n\t\tcity.NewService(cityRepo, logger),\n\t\tlogger,\n\t)\n\n\ttemperature.RegisterHandlers(rg,\n\t\ttemperature.NewService(temperature.NewRepository(db, logger), logger),\n\t\tlogger,\n\t)\n\n\tforecast.RegisterHandlers(rg,\n\t\tforecast.NewService(forecast.NewRepository(db, logger), logger),\n\t\tlogger,\n\t)\n\n\twebhook.RegisterHandlers(rg,\n\t\twebhook.NewService(webhook.NewRepository(db, logger, cityRepo), logger),\n\t\tlogger,\n\t)\n\n\treturn router\n}", "func Handler() http.Handler {\n\t// Allocate a new router. Gorilla router with O(n) complexity is used as there\n\t// is just one route. Replace it by a trie based multiplexer if the number\n\t// of routes is growing.\n\tr := mux.NewRouter()\n\tapi := r.Path(\"/api/\").Subrouter()\n\n\t// TODO: for type safety use http.Method{Name} constants instead if manually writing\n\t// method names when Go 1.7 is stable and no support of other versions is required.\n\tapi.HandleFunc(\n\t\t\"/recent_purchases/{username:[A-Za-z0-9_.-]+}\", wrap(handlers.PopularPurchases),\n\t).Methods(\"GET\")\n\n\treturn api\n}", "func Handler(request events.APIGatewayProxyRequest) (events.APIGatewayProxyResponse, error) {\n\n\n\tserver := os.Getenv(\"SERVER\")\n\n\t//Get the path parameter that was sent\n\tpath := request.Path\n\n\turl := \"http://\" + server + path\n\tresp, err := http.Get(url)\n if err != nil {\n\t\tlog.Fatalln(err)\n }\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\n if err != nil {\n\t\tlog.Fatalln(err)\n }\n\n\n\t//Generate message that want to be sent as body\n\t//message := fmt.Sprintf(\" { \\\"Message\\\" : \\\"Hello %s \\\" } \", url)\n\n\t//Returning response with AWS Lambda Proxy Response\n\treturn events.APIGatewayProxyResponse{Body: string(body), StatusCode: 200}, nil\n}", "func Handler(cs creating.Service, ls listing.Service, ds drawing.Service) http.Handler {\n\trouter := httprouter.New()\n\n\trouter.GET(\"/health\", health())\n\trouter.POST(\"/decks\", createDeck(cs))\n\trouter.GET(\"/decks/:id\", getDeck(ls))\n\trouter.PATCH(\"/decks/:id/draw/:amount\", drawCards(ds))\n\treturn router\n}", "func apiHttpHandler(c echo.Context) error {\n\turiPattern := c.Path()\n\tif _, ok := httpRoutingMap[uriPattern]; !ok {\n\t\treturn c.JSON(http.StatusOK, itineris.ResultNotImplemented.ToMap())\n\t}\n\thttpMethod := strings.ToUpper(c.Request().Method)\n\tif _, ok := httpRoutingMap[uriPattern][httpMethod]; !ok {\n\t\treturn c.JSON(http.StatusOK, itineris.ResultNotImplemented.ToMap())\n\t}\n\n\tapiName := httpRoutingMap[uriPattern][httpMethod]\n\tctx, auth, params := _parseRequest(apiName, c)\n\n\tapiResult := ApiRouter.CallApi(ctx, auth, params)\n\treturn c.JSON(http.StatusOK, apiResult.ToMap())\n}", "func handle() http.Handler {\n\tr := gin.Default()\n\n\tapi := r.Group(\"/api\")\n\t{\n\t\tapi.GET(\"/about\", aboutHandler)\n\n\t\tapi.POST(\"/group\", groupCreateHandler)\n\t\tapi.POST(\"/group/:id\", groupSensorHandler)\n\t\tapi.GET(\"/group/:id\", groupDataHandler)\n\t\tapi.GET(\"/group\", groupListHandler)\n\t\tapi.DELETE(\"/group/:id\", groupDeleteHandler)\n\t}\n\n\tr.NoRoute(func(c *gin.Context) {\n\t\tc.JSON(http.StatusNotFound, gin.H{\"error\": \"404 Not Found\"})\n\t})\n\n\treturn r\n}", "func NewHandler(conf *HandlerConfig) http.Handler {\n\tah := &apiHandler{conf}\n\tmux := http.NewServeMux()\n\tah.RegisterPublicDir(mux)\n\tah.RegisterEncoder(mux, \"csv\", &freegeoip.CSVEncoder{UseCRLF: true})\n\tah.RegisterEncoder(mux, \"xml\", &freegeoip.XMLEncoder{Indent: true})\n\tah.RegisterEncoder(mux, \"json\", &freegeoip.JSONEncoder{})\n\treturn mux\n}", "func makeHandler(fn func(http.ResponseWriter, *http.Request, string)) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tlog.Printf(\"%s %s %s\\n\", r.RemoteAddr, r.Method, r.URL)\n\t\tm := validPath.FindStringSubmatch(r.URL.Path)\n\t\tif m == nil && !(r.URL.Path == \"//\") {\n\t\t\thttp.NotFound(w, r)\n\t\t\treturn\n\t\t}\n\t\tfn(w, r, m[3])\n\t}\n}", "func SetupHandlers(ctx context.Context) {\n\thttp.Handle(\"/version\", VersionHandler(ctx))\n}", "func makeHandler(n *core.IpfsNode, l net.Listener, options ...corehttp.ServeOption) (http.Handler, error) {\n\ttopMux := http.NewServeMux()\n\tmux := topMux\n\tfor _, option := range options {\n\t\tvar err error\n\t\tmux, err = option(n, l, mux)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn topMux, nil\n}", "func (web *Web) Handler() http.Handler {\n\trouter := mux.NewRouter()\n\trouter.Handle(\"/\", httpHandler(web.handleIndex))\n\trouter.Handle(\"/auth\", httpHandler(web.handleAuth))\n\trouter.Handle(\"/auth/callback\", httpHandler(web.handleAuthCallback))\n\trouter.Handle(\"/auth/clear\", httpHandler(web.handleAuthClear))\n\trouter.Handle(\"/api/me\", httpHandler(web.handleAPIMe))\n\trouter.Handle(\"/api/checklist\", httpHandler(web.handleAPIChecklist))\n\trouter.Handle(\"/api/check\", httpHandler(web.handleAPICheck)).Methods(\"PUT\", \"DELETE\")\n\trouter.Handle(\"/{owner}/{repo}/pull/{number}\", httpHandler(web.handleChecklist))\n\trouter.Handle(\"/{owner}/{repo}/pull/{number}/{stage}\", httpHandler(web.handleChecklist))\n\trouter.PathPrefix(\"/js/\").Handler(http.FileServer(&assetfs.AssetFS{Asset: Asset, AssetDir: AssetDir, AssetInfo: AssetInfo}))\n\n\tif testToken := os.Getenv(\"PRCHECKLIST_TEST_GITHUB_TOKEN\"); testToken != \"\" {\n\t\trouter.Handle(\"/debug/auth-for-testing\", web.mkHandlerDebugAuthTesting(testToken))\n\t}\n\n\thandler := http.Handler(router)\n\n\tif behindProxy {\n\t\thandler = handlers.ProxyHeaders(handler)\n\t}\n\n\treturn web.oauthForwarder.Wrap(handler)\n}", "func (a *App) Handler() http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tvar (\n\t\t\tstatus int\n\t\t\tfinal string\n\t\t\taction string\n\t\t\tbody interface{}\n\t\t\tmatched *route\n\t\t\tresponse *Response\n\t\t)\n\t\treq := newRequest(r)\n\t\tfor _, route := range a.routes {\n\t\t\tif route.Match(req) != nil {\n\t\t\t\tif route.handler != nil {\n\t\t\t\t\troute.handler(w, r)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tmatched = route\n\t\t\t\tstatus, body, action = route.Respond(req)\n\t\t\t\tif status == 301 || status == 302 {\n\t\t\t\t\tresp, ok := body.(*Response)\n\t\t\t\t\tif ok {\n\t\t\t\t\t\tfinal = resp.Body.(string)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tfinal = body.(string)\n\t\t\t\t\t}\n\t\t\t\t\tresp.Headers.Set(\"Location\", final)\n\t\t\t\t\tresp.status = status\n\t\t\t\t\tresp.write(w)\n\t\t\t\t\treq.log(status, len(final))\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\trouteData := &RouteData{\n\t\t\tAction: action,\n\t\t\tVerb: r.Method,\n\t\t}\n\t\tif matched == nil {\n\t\t\tstatus = 404\n\t\t\tfinal = \"\"\n\t\t} else {\n\t\t\trouteData.ControllerName = pluralOf(matched.controller)\n\t\t}\n\t\tcontentType := req.ContentType()\n\n\t\tif resp, ok := body.(*Response); ok {\n\t\t\tresponse = resp\n\t\t\tif ct := response.Headers.Get(\"Content-Type\"); ct != contentType && ct != \"\" {\n\t\t\t\tcontentType = ct\n\t\t\t}\n\t\t} else {\n\t\t\tresponse = NewResponse(body)\n\t\t}\n\n\t\tstatus, final, mime, _ := a.Process(req, status, response.Body, contentType, routeData)\n\n\t\tresponse.status = status\n\t\tresponse.final = final\n\t\tresponse.Headers.Set(\"Content-Type\", mime)\n\t\tresponse.write(w)\n\t\treq.log(status, len(response.final))\n\t}\n}", "func makeHandler(handler func(http.ResponseWriter, *http.Request, string)) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\texp := validPath.FindStringSubmatch(r.URL.Path)\n\t\tif exp == nil {\n\t\t\thttp.NotFound(w, r)\n\t\t\tlogInfo(r.URL.Path, \"path not found\")\n\t\t\treturn\n\t\t}\n\t\thandler(w, r, exp[2])\n\t}\n}", "func NewHandler(ctx context.Context, rsService store.RulesetService, cfg Config) http.Handler {\n\ts := service{\n\t\trulesets: rsService,\n\t}\n\n\tvar logger zerolog.Logger\n\n\tif cfg.Logger != nil {\n\t\tlogger = *cfg.Logger\n\t} else {\n\t\tlogger = zerolog.New(os.Stderr).With().Timestamp().Logger()\n\t}\n\n\tif cfg.Timeout == 0 {\n\t\tcfg.Timeout = 5 * time.Second\n\t}\n\n\tif cfg.WatchTimeout == 0 {\n\t\tcfg.WatchTimeout = 30 * time.Second\n\t}\n\n\trs := rulesetService{\n\t\tservice: &s,\n\t\ttimeout: cfg.Timeout,\n\t\twatchTimeout: cfg.WatchTimeout,\n\t}\n\n\t// router\n\tmux := http.NewServeMux()\n\tmux.Handle(\"/rulesets/\", &rs)\n\n\t// middlewares\n\tchain := []func(http.Handler) http.Handler{\n\t\thlog.NewHandler(logger),\n\t\thlog.AccessHandler(func(r *http.Request, status, size int, duration time.Duration) {\n\t\t\thlog.FromRequest(r).Info().\n\t\t\t\tStr(\"method\", r.Method).\n\t\t\t\tStr(\"url\", r.URL.String()).\n\t\t\t\tInt(\"status\", status).\n\t\t\t\tInt(\"size\", size).\n\t\t\t\tDur(\"duration\", duration).\n\t\t\t\tMsg(\"request received\")\n\t\t}),\n\t\thlog.RemoteAddrHandler(\"ip\"),\n\t\thlog.UserAgentHandler(\"user_agent\"),\n\t\thlog.RefererHandler(\"referer\"),\n\t\tfunc(http.Handler) http.Handler {\n\t\t\treturn mux\n\t\t},\n\t}\n\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t// playing the middleware chain\n\t\tvar cur http.Handler\n\t\tfor i := len(chain) - 1; i >= 0; i-- {\n\t\t\tcur = chain[i](cur)\n\t\t}\n\n\t\t// serving the request\n\t\tcur.ServeHTTP(w, r.WithContext(ctx))\n\t})\n}", "func (self *OCSPResponder) makeHandler() func(w http.ResponseWriter, r *http.Request) {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tlog.Print(fmt.Sprintf(\"Got %s request from %s\", r.Method, r.RemoteAddr))\n\t\tif self.Strict && r.Header.Get(\"Content-Type\") != \"application/ocsp-request\" {\n\t\t\tlog.Println(\"Strict mode requires correct Content-Type header\")\n\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\tb := new(bytes.Buffer)\n\t\tswitch r.Method {\n\t\tcase \"POST\":\n\t\t\tb.ReadFrom(r.Body)\n\t\tcase \"GET\":\n\t\t\tlog.Println(r.URL.Path)\n\t\t\tgd, err := base64.StdEncoding.DecodeString(r.URL.Path[1:])\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tr := bytes.NewReader(gd)\n\t\t\tb.ReadFrom(r)\n\t\tdefault:\n\t\t\tlog.Println(\"Unsupported request method\")\n\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\t// parse request, verify, create response\n\t\tw.Header().Set(\"Content-Type\", \"application/ocsp-response\")\n\t\tresp, err := self.verify(b.Bytes())\n\t\tif err != nil {\n\t\t\tlog.Print(err)\n\t\t\t// technically we should return an ocsp error response. but this is probably fine\n\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t\tlog.Print(\"Writing response\")\n\t\tw.Write(resp)\n\t}\n}", "func HandlerAPI(c Config) (http.Handler, *operations.Kube4EdgeManagementAPI, error) {\n\tspec, err := loads.Analyzed(swaggerCopy(SwaggerJSON), \"\")\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"analyze swagger: %v\", err)\n\t}\n\tapi := operations.NewKube4EdgeManagementAPI(spec)\n\tapi.ServeError = errors.ServeError\n\tapi.Logger = c.Logger\n\n\tif c.APIKeyAuthenticator != nil {\n\t\tapi.APIKeyAuthenticator = c.APIKeyAuthenticator\n\t}\n\tif c.BasicAuthenticator != nil {\n\t\tapi.BasicAuthenticator = c.BasicAuthenticator\n\t}\n\tif c.BearerAuthenticator != nil {\n\t\tapi.BearerAuthenticator = c.BearerAuthenticator\n\t}\n\n\tapi.JSONConsumer = runtime.JSONConsumer()\n\tapi.JSONProducer = runtime.JSONProducer()\n\tapi.YggdrasilGetControlMessageForDeviceHandler = yggdrasil.GetControlMessageForDeviceHandlerFunc(func(params yggdrasil.GetControlMessageForDeviceParams) middleware.Responder {\n\t\tctx := params.HTTPRequest.Context()\n\t\treturn c.YggdrasilAPI.GetControlMessageForDevice(ctx, params)\n\t})\n\tapi.YggdrasilGetDataMessageForDeviceHandler = yggdrasil.GetDataMessageForDeviceHandlerFunc(func(params yggdrasil.GetDataMessageForDeviceParams) middleware.Responder {\n\t\tctx := params.HTTPRequest.Context()\n\t\treturn c.YggdrasilAPI.GetDataMessageForDevice(ctx, params)\n\t})\n\tapi.YggdrasilPostControlMessageForDeviceHandler = yggdrasil.PostControlMessageForDeviceHandlerFunc(func(params yggdrasil.PostControlMessageForDeviceParams) middleware.Responder {\n\t\tctx := params.HTTPRequest.Context()\n\t\treturn c.YggdrasilAPI.PostControlMessageForDevice(ctx, params)\n\t})\n\tapi.YggdrasilPostDataMessageForDeviceHandler = yggdrasil.PostDataMessageForDeviceHandlerFunc(func(params yggdrasil.PostDataMessageForDeviceParams) middleware.Responder {\n\t\tctx := params.HTTPRequest.Context()\n\t\treturn c.YggdrasilAPI.PostDataMessageForDevice(ctx, params)\n\t})\n\tapi.ServerShutdown = func() {}\n\treturn api.Serve(c.InnerMiddleware), api, nil\n}", "func NewHandler(chatServer *chat.Server, apiDocPath string) http.Handler {\n\tchatServerInstance = chatServer\n\n\thandler := rest.NewHTTPHandler()\n\tregisterPaths(handler)\n\tif apiDocPath != \"\" {\n\t\trest.ConfigureSwagger(apiDocPath, handler)\n\t}\n\treturn handler\n}", "func Create() http.Handler {\n\trouter := httprouter.New()\n\n\trouter.Handle(\"GET\", \"/\", middle.ResponseHandler(Hello))\n\trouter.Handle(\"POST\", \"/post\", middle.ResponseHandler(Hello))\n\trouter.Handle(\"GET\", \"/error\", middle.ResponseHandler(ErrorRoute))\n\trouter.Handle(\"GET\", \"/user-error\", middle.ResponseHandler(UserErrorRoute))\n\trouter.Handle(\"GET\", \"/multi-error\", middle.ResponseHandler(MultiErrorRoute))\n\trouter.Handle(\"GET\", \"/panic\", middle.ResponseHandler(Panic))\n\trouter.Handle(\"GET\", \"/version\", Version)\n\n\treturn alice.New(\n\t\tmiddle.RecoveryHandler,\n\t\tmiddle.FrameHandler,\n\t\tmiddle.RequestIDHandler,\n\t\tmiddle.RequestPathHandler,\n\t\tmiddle.BodyHandler).\n\t\tThen(router)\n}", "func CreateHttpApiHandler(client *client.Client) http.Handler {\n\tapiHandler := ApiHandler{client}\n\twsContainer := restful.NewContainer()\n\n\tdeployWs := new(restful.WebService)\n\tdeployWs.Path(\"/api/appdeployments\").\n\t\tConsumes(restful.MIME_JSON).\n\t\tProduces(restful.MIME_JSON)\n\tdeployWs.Route(\n\t\tdeployWs.POST(\"\").\n\t\t\tTo(apiHandler.handleDeploy).\n\t\t\tReads(AppDeploymentSpec{}).\n\t\t\tWrites(AppDeploymentSpec{}))\n\tdeployWs.Route(\n\t\tdeployWs.POST(\"/validate/name\").\n\t\t\tTo(apiHandler.handleNameValidity).\n\t\t\tReads(AppNameValiditySpec{}).\n\t\t\tWrites(AppNameValidity{}))\n\twsContainer.Add(deployWs)\n\n\treplicaSetWs := new(restful.WebService)\n\treplicaSetWs.Path(\"/api/replicasets\").\n\t\tConsumes(restful.MIME_JSON).\n\t\tProduces(restful.MIME_JSON)\n\treplicaSetWs.Route(\n\t\treplicaSetWs.GET(\"\").\n\t\t\tTo(apiHandler.handleGetReplicaSetList).\n\t\t\tWrites(ReplicaSetList{}))\n\treplicaSetWs.Route(\n\t\treplicaSetWs.GET(\"/{namespace}/{replicaSet}\").\n\t\t\tTo(apiHandler.handleGetReplicaSetDetail).\n\t\t\tWrites(ReplicaSetDetail{}))\n\treplicaSetWs.Route(\n\t\treplicaSetWs.POST(\"/{namespace}/{replicaSet}/update/pods\").\n\t\t\tTo(apiHandler.handleUpdateReplicasCount).\n\t\t\tReads(ReplicaSetSpec{}))\n\treplicaSetWs.Route(\n\t\treplicaSetWs.DELETE(\"/{namespace}/{replicaSet}\").\n\t\t\tTo(apiHandler.handleDeleteReplicaSet))\n\treplicaSetWs.Route(\n\t\treplicaSetWs.GET(\"/pods/{namespace}/{replicaSet}\").\n\t\t\tTo(apiHandler.handleGetReplicaSetPods).\n\t\t\tWrites(ReplicaSetPods{}))\n\twsContainer.Add(replicaSetWs)\n\n\tnamespacesWs := new(restful.WebService)\n\tnamespacesWs.Path(\"/api/namespaces\").\n\t\tConsumes(restful.MIME_JSON).\n\t\tProduces(restful.MIME_JSON)\n\tnamespacesWs.Route(\n\t\tnamespacesWs.POST(\"\").\n\t\t\tTo(apiHandler.handleCreateNamespace).\n\t\t\tReads(NamespaceSpec{}).\n\t\t\tWrites(NamespaceSpec{}))\n\tnamespacesWs.Route(\n\t\tnamespacesWs.GET(\"\").\n\t\t\tTo(apiHandler.handleGetNamespaces).\n\t\t\tWrites(NamespaceList{}))\n\twsContainer.Add(namespacesWs)\n\n\tlogsWs := new(restful.WebService)\n\tlogsWs.Path(\"/api/logs\").\n\t\tProduces(restful.MIME_JSON)\n\tlogsWs.Route(\n\t\tlogsWs.GET(\"/{namespace}/{podId}/{container}\").\n\t\t\tTo(apiHandler.handleLogs).\n\t\t\tWrites(Logs{}))\n\twsContainer.Add(logsWs)\n\n\teventsWs := new(restful.WebService)\n\teventsWs.Path(\"/api/events\").\n\t\tProduces(restful.MIME_JSON)\n\teventsWs.Route(\n\t\teventsWs.GET(\"/{namespace}/{replicaSet}\").\n\t\t\tTo(apiHandler.handleEvents).\n\t\t\tWrites(Events{}))\n\twsContainer.Add(eventsWs)\n\n\treturn wsContainer\n}", "func createHttpHandler(h http.HandlerFunc, method Method, defaultStatusCode int) HttpHandlerAdapter {\n\n\tadapter := HttpHandlerAdapter {\n\t\thandler: func (rw http.ResponseWriter, r *http.Request) {\n\t\t\tlogger.Infof(\" %s %s 200 OK \", r.Method, r.URL.Path )\n\t\t\th(rw, r)\n\t\t},\n\t\tdefaultStatusCode: defaultStatusCode,\n\t\tmethod: method};\n\n\treturn adapter;\n}", "func APIHandler(w http.ResponseWriter, r *http.Request) {\n\tfmt.Fprintf(w, \"Hello World\")\n}", "func Handler(request events.APIGatewayProxyRequest) (events.APIGatewayProxyResponse, error) {\n\tvar data briefStruct\n\tjson.Unmarshal([]byte(request.Body), &data)\n\n\tres := responseJson(data.Title, data.Text, briefText(data.Title, data.Text))\n\tfmt.Println(\"res: \", res)\n\treturn events.APIGatewayProxyResponse{Body: res, StatusCode: 200}, nil\n}", "func HTTPHandler(domain string, driver drivers.Driver) http.Handler {\n\tvar mux *router.Router\n\tvar api = minioAPI{}\n\tapi.driver = driver\n\tapi.domain = domain\n\n\tr := router.NewRouter()\n\tmux = getMux(api, r)\n\n\tvar conf = config.Config{}\n\tif err := conf.SetupConfig(); err != nil {\n\t\tlog.Fatal(iodine.New(err, map[string]string{\"domain\": domain}))\n\t}\n\n\th := timeValidityHandler(mux)\n\th = ignoreResourcesHandler(h)\n\th = validateRequestHandler(conf, h)\n\th = quota.BandwidthCap(h, 25*1024*1024, time.Duration(30*time.Minute))\n\th = quota.BandwidthCap(h, 100*1024*1024, time.Duration(24*time.Hour))\n\th = quota.RequestLimit(h, 100, time.Duration(30*time.Minute))\n\th = quota.RequestLimit(h, 1000, time.Duration(24*time.Hour))\n\th = quota.ConnectionLimit(h, 5)\n\treturn h\n}", "func makeHandler(fn func(http.ResponseWriter, *http.Request, string)) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tfmt.Println(\"handler:\", r.URL.Path)\n\t\tm := validPath.FindStringSubmatch(r.URL.Path)\n\t\tif m == nil {\n\t\t\thttp.NotFound(w, r)\n\t\t\treturn\n\t\t}\n\t\tfn(w, r, m[2])\n\t}\n}", "func Handler(req events.APIGatewayProxyRequest) (events.APIGatewayProxyResponse, error) {\n\t// Log body and pass to the DAO\n\tfmt.Printf(\"Received body: %v\\n\", req)\n\n\trequest := new(vm.GeneralRequest)\n\tresponse := request.Validate(req.Body)\n\tif response.Code != 0 {\n\t\treturn events.APIGatewayProxyResponse{Body: response.Marshal(), StatusCode: 500}, nil\n\t}\n\n\trequest.Date = time.Now().Unix()\n\n\tvar mainTable = \"main\"\n\tif value, ok := os.LookupEnv(\"dynamodb_table_main\"); ok {\n\t\tmainTable = value\n\t}\n\n\t// insert data into the DB\n\tdal.Insert(mainTable, request)\n\n\t// Log and return result\n\tfmt.Println(\"Wrote item: \", request)\n\treturn events.APIGatewayProxyResponse{Body: response.Marshal(), StatusCode: 200}, nil\n}", "func CreateHandler(svc ServiceModel) *http.Server {\n\treturn http.NewServer(\n\t\tmakeCreateEndpoint(svc),\n\t\tdecodeCreateRequest,\n\t\thttp.EncodeJSONResponse,\n\t)\n}", "func (hr *httpRouter) Handler() http.Handler {\n\n\tc, _ := console.New(console.Options{Color: true})\n\t_ = logger.Register(\"console\", logger.Config{Writer: c})\n\tcLogger, _ := logger.Get(\"console\")\n\tl := log.New(cLogger)\n\n\tfmt.Print(\"Loading Routes...\")\n\t//add files in a directory\n\tro := newHttpRouterExtended(hr)\n\n\tmw := middleware.Chain{}\n\n\t//adding files\n\tfor path, file := range hr.file {\n\t\tro.HandlerFunc(\"GET\", path, mw.Add(l.MW).Handle(\n\t\t\tfunc(w http.ResponseWriter, req *http.Request) {\n\t\t\t\thttp.ServeFile(w, req, hr.file[req.Context().Value(router.PATTERN).(string)])\n\t\t\t}))\n\t\tfmt.Printf(\"\\n\\x1b[32m %#v [GET]%v \\x1b[49m\\x1b[39m \", path, file)\n\t}\n\n\t// adding directories\n\tfor k, path := range hr.dir {\n\t\tfileServer := http.FileServer(http.Dir(path))\n\t\tpattern := k + \"/*filepath\"\n\t\tro.HandlerFunc(\"GET\", pattern, mw.Add(l.MW).Handle(\n\t\t\tfunc(w http.ResponseWriter, req *http.Request) {\n\t\t\t\t//disable directory listing\n\t\t\t\tif strings.HasSuffix(req.URL.Path, \"/\") {\n\t\t\t\t\thttp.NotFound(w, req)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif val, ok := req.Context().Value(router.PARAMS).(map[string][]string)[\"filepath\"]; ok {\n\t\t\t\t\treq.URL.Path = val[0]\n\t\t\t\t\tfileServer.ServeHTTP(w, req)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\thttp.NotFound(w, req)\n\t\t\t\treturn\n\n\t\t\t}))\n\t\tfmt.Printf(\"\\n\\x1b[32m %#v [GET]%v \\x1b[49m\\x1b[39m \", pattern, http.Dir(path))\n\t}\n\n\t//register all controller routes\n\tfor _, r := range hr.routes {\n\t\tfmt.Printf(\"\\n\\x1b[32m %#v :name \\x1b[49m\\x1b[39m \", r.pattern)\n\t\tfor method, fn := range r.controller.MappingBy(r.pattern) {\n\t\t\tif r.mws != nil {\n\t\t\t\tro.HandlerFunc(strings.ToUpper(method), r.pattern, r.mws.Handle(r.controller.ServeHTTP)) //TODO ????? error no url pattern\n\t\t\t} else {\n\t\t\t\tro.HandlerFunc(strings.ToUpper(method), r.pattern, r.controller.ServeHTTP)\n\t\t\t}\n\t\t\tfmt.Printf(\"\\x1b[32m [%v]%v name \\x1b[49m\\x1b[39m \", method, fn)\n\t\t}\n\t}\n\n\t//Not Found Handler\n\tif hr.notFound != nil {\n\t\tro.NotFound = hr.notFound\n\t}\n\n\treturn ro\n}", "func (c *Operation) registerHandler() {\n\t// Add more protocol endpoints here to expose them as controller API endpoints\n\tc.handlers = []Handler{\n\t\tsupport.NewHTTPHandler(login, http.MethodGet, c.login),\n\t\tsupport.NewHTTPHandler(settings, http.MethodGet, c.settings),\n\t\tsupport.NewHTTPHandler(getCreditScore, http.MethodGet, c.getCreditScore),\n\t\tsupport.NewHTTPHandler(callback, http.MethodGet, c.callback),\n\t\tsupport.NewHTTPHandler(oidcRedirectPath, http.MethodGet, c.oidcRedirect),\n\n\t\t// issuer rest apis (html decoupled)\n\t\tsupport.NewHTTPHandler(authPath, http.MethodGet, c.auth),\n\t\tsupport.NewHTTPHandler(searchPath, http.MethodGet, c.search),\n\t\tsupport.NewHTTPHandler(verifyDIDAuthPath, http.MethodPost, c.verifyDIDAuthHandler),\n\t\tsupport.NewHTTPHandler(createCredentialPath, http.MethodPost, c.createCredentialHandler),\n\t\tsupport.NewHTTPHandler(generateCredentialPath, http.MethodPost, c.generateCredentialHandler),\n\n\t\t// chapi\n\t\tsupport.NewHTTPHandler(revoke, http.MethodPost, c.revokeVC),\n\t\tsupport.NewHTTPHandler(generate, http.MethodPost, c.generateVC),\n\n\t\t// didcomm\n\t\tsupport.NewHTTPHandler(didcommToken, http.MethodPost, c.didcommTokenHandler),\n\t\tsupport.NewHTTPHandler(didcommCallback, http.MethodGet, c.didcommCallbackHandler),\n\t\tsupport.NewHTTPHandler(didcommCredential, http.MethodPost, c.didcommCredentialHandler),\n\t\tsupport.NewHTTPHandler(didcommAssuranceData, http.MethodPost, c.didcommAssuraceHandler),\n\n\t\tsupport.NewHTTPHandler(didcommInit, http.MethodGet, c.initiateDIDCommConnection),\n\t\tsupport.NewHTTPHandler(didcommUserEndpoint, http.MethodGet, c.getIDHandler),\n\n\t\t// oidc\n\t\tsupport.NewHTTPHandler(oauth2GetRequestPath, http.MethodGet, c.createOIDCRequest),\n\t\tsupport.NewHTTPHandler(oauth2CallbackPath, http.MethodGet, c.handleOIDCCallback),\n\n\t\t// JSON-LD contexts API\n\t\tsupport.NewHTTPHandler(jsonldcontextrest.AddContextPath, http.MethodPost, c.addJSONLDContextHandler),\n\t}\n}" ]
[ "0.7168402", "0.710997", "0.7024364", "0.6932593", "0.69240314", "0.68977165", "0.6896971", "0.6886619", "0.6849388", "0.6845259", "0.68446195", "0.68355274", "0.68299645", "0.68141025", "0.67243904", "0.66944796", "0.6646774", "0.66209537", "0.6620859", "0.65832055", "0.6569689", "0.65395766", "0.652062", "0.65021247", "0.6462788", "0.64536965", "0.64242166", "0.63914245", "0.63766676", "0.6372274", "0.6369267", "0.63682705", "0.6338454", "0.63330984", "0.6328081", "0.6311857", "0.6280727", "0.6265576", "0.62614733", "0.62610936", "0.62513906", "0.6245651", "0.6233802", "0.6225261", "0.6224931", "0.62101734", "0.62035114", "0.6200679", "0.6195108", "0.61931133", "0.6183917", "0.61581707", "0.6151387", "0.61381775", "0.6121558", "0.6111741", "0.61096287", "0.6108816", "0.6094532", "0.60732305", "0.6067388", "0.6050815", "0.6043665", "0.60363436", "0.60351855", "0.60338324", "0.60307294", "0.60236245", "0.6022317", "0.601887", "0.60139805", "0.60135543", "0.6000489", "0.59969956", "0.5996042", "0.5995675", "0.598845", "0.5981122", "0.59759295", "0.5975413", "0.59729964", "0.59706223", "0.596752", "0.59637195", "0.5952283", "0.59477836", "0.59457016", "0.5941736", "0.59386605", "0.59362805", "0.59355056", "0.5930876", "0.5929503", "0.5916155", "0.5915465", "0.5907501", "0.59066087", "0.5905731", "0.59040993", "0.5903549" ]
0.7418875
0
Creates a case in the AWS Support Center. This operation is similar to how you create a case in the AWS Support Center Create Case ( page. The AWS Support API doesn't support requesting service limit increases. You can submit a service limit increase in the following ways: Submit a request from the AWS Support Center Create Case ( page. Use the Service Quotas RequestServiceQuotaIncrease ( operation. A successful CreateCase request returns an AWS Support case number. You can use the DescribeCases operation and specify the case number to get existing AWS Support cases. After you create a case, use the AddCommunicationToCase operation to add additional communication or attachments to an existing case. The caseId is separate from the displayId that appears in the AWS Support Center ( Use the DescribeCases operation to get the displayId. You must have a Business or Enterprise support plan to use the AWS Support API. If you call the AWS Support API from an account that does not have a Business or Enterprise support plan, the SubscriptionRequiredException error message appears. For information about changing your support plan, see AWS Support (
func (c *Client) CreateCase(ctx context.Context, params *CreateCaseInput, optFns ...func(*Options)) (*CreateCaseOutput, error) { if params == nil { params = &CreateCaseInput{} } result, metadata, err := c.invokeOperation(ctx, "CreateCase", params, optFns, addOperationCreateCaseMiddlewares) if err != nil { return nil, err } out := result.(*CreateCaseOutput) out.ResultMetadata = metadata return out, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (c *Client) AddCommunicationToCase(ctx context.Context, params *AddCommunicationToCaseInput, optFns ...func(*Options)) (*AddCommunicationToCaseOutput, error) {\n\tif params == nil {\n\t\tparams = &AddCommunicationToCaseInput{}\n\t}\n\n\tresult, metadata, err := c.invokeOperation(ctx, \"AddCommunicationToCase\", params, optFns, addOperationAddCommunicationToCaseMiddlewares)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tout := result.(*AddCommunicationToCaseOutput)\n\tout.ResultMetadata = metadata\n\treturn out, nil\n}", "func NewCase(condition string, statements ...Statement) *Case {\n\treturn &Case{\n\t\tcondition: condition,\n\t\tstatements: statements,\n\t\tcaller: fetchClientCallerLine(),\n\t}\n}", "func (mr *MockClientMockRecorder) CreateCase(arg0 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"CreateCase\", reflect.TypeOf((*MockClient)(nil).CreateCase), arg0)\n}", "func NewCase() *SCaseFunction {\n\treturn &SCaseFunction{}\n}", "func NewCase(evaluator evaluatorFunc, command commandFunc) Case {\n\treturn Case{\n\t\tevaluator: evaluator,\n\t\tcommand: command,\n\t}\n}", "func (c *AccountController) Create(ctx echo.Context) error {\n\tmodel := account.Account{}\n\terr := ctx.Bind(&model)\n\tif err != nil {\n\t\treturn ctx.JSON(http.StatusUnprocessableEntity, err.Error())\n\t}\n\n\tres, err := c.AccountUsecase.Create(&model)\n\tif err != nil {\n\t\treturn ctx.JSON(http.StatusInternalServerError, err.Error())\n\t}\n\n\treturn ctx.JSON(http.StatusCreated, res)\n}", "func (m *MockClient) CreateCase(arg0 *support.CreateCaseInput) (*support.CreateCaseOutput, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"CreateCase\", arg0)\n\tret0, _ := ret[0].(*support.CreateCaseOutput)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (h *SupportAPI) CreateSupportRecord(c *gin.Context) {\n\tctx := context.Background()\n\tuserID := c.MustGet(\"userID\").(uint)\n\tctx = context.WithValue(ctx, entities.UserIDKey, userID)\n\tvar req supportRequest\n\terr := c.BindJSON(&req)\n\tif err != nil {\n\t\tentities.SendParsingError(c, \"There has been an error while parsing your information, please try again\", err)\n\t\treturn\n\t}\n\terr = req.Validate()\n\tif err != nil {\n\t\tentities.SendValidationError(c, err.Error(), err)\n\t\treturn\n\t}\n\tinfo := entities.SupportInfo{\n\t\tEmail: req.Email,\n\t\tMobile: req.Mobile,\n\t}\n\tcreatedInfo, err := h.SupportUsecase.Create(ctx, &info)\n\tif err != nil {\n\t\tentities.SendValidationError(c, \"You are not authrorized to creates support info records\", err)\n\t\treturn\n\t}\n\tc.JSON(200, gin.H{\n\t\t\"messages\": \"Support Info Created Successfully\",\n\t\t\"info\": createdInfo,\n\t})\n\treturn\n}", "func (a *Client) CreateContact(params *CreateContactParams, authInfo runtime.ClientAuthInfoWriter) error {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewCreateContactParams()\n\t}\n\n\t_, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"createContact\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/contacts\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &CreateContactReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (s *Service) GrantCase(c context.Context, nwMsg []byte, oldMsg []byte) (err error) {\n\tmr := &model.Case{}\n\tif err = json.Unmarshal(nwMsg, mr); err != nil {\n\t\tlog.Error(\"json.Unmarshal(%s) error(%v)\", string(nwMsg), err)\n\t\treturn\n\t}\n\tif mr.Status != model.CaseStatusGranting {\n\t\treturn\n\t}\n\tstime, err := time.ParseInLocation(time.RFC3339, mr.Stime, time.Local)\n\tif err != nil {\n\t\tstime, err = time.ParseInLocation(\"2006-01-02 15:04:05\", mr.Stime, time.Local)\n\t\tif err != nil {\n\t\t\tlog.Error(\"time.ParseInLocation(%s) error(%v)\", mr.Stime, err)\n\t\t\treturn\n\t\t}\n\t\terr = nil\n\t}\n\tetime, err := time.ParseInLocation(time.RFC3339, mr.Etime, time.Local)\n\tif err != nil {\n\t\tetime, err = time.ParseInLocation(\"2006-01-02 15:04:05\", mr.Etime, time.Local)\n\t\tif err != nil {\n\t\t\tlog.Error(\"time.ParseInLocation(%s) error(%v)\", mr.Etime, err)\n\t\t\treturn\n\t\t}\n\t\terr = nil\n\t}\n\tsimCase := &model.SimCase{\n\t\tID: mr.ID,\n\t\tMid: mr.Mid,\n\t\tVoteRule: mr.Agree,\n\t\tVoteBreak: mr.Against,\n\t\tVoteDelete: mr.VoteDelete,\n\t\tCaseType: mr.CaseType,\n\t\tStime: xtime.Time(stime.Unix()),\n\t\tEtime: xtime.Time(etime.Unix()),\n\t}\n\tmcases := make(map[int64]*model.SimCase)\n\tmcases[mr.ID] = simCase\n\tif err = s.dao.SetGrantCase(c, mcases); err != nil {\n\t\tlog.Error(\"s.dao.SetMIDCaseGrant(%+v) error(%v)\", mr, err)\n\t}\n\treturn\n}", "func NewCase(t *testing.T, path string) Case {\n\tc := Case{T: t}\n\tc.In = newFile(&c, path)\n\tc.Out = newFile(&c, path+Extension)\n\treturn c\n}", "func (c *Client) UpdateCase(ctx context.Context, params *UpdateCaseInput, optFns ...func(*Options)) (*UpdateCaseOutput, error) {\n\tif params == nil {\n\t\tparams = &UpdateCaseInput{}\n\t}\n\n\tresult, metadata, err := c.invokeOperation(ctx, \"UpdateCase\", params, optFns, c.addOperationUpdateCaseMiddlewares)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tout := result.(*UpdateCaseOutput)\n\tout.ResultMetadata = metadata\n\treturn out, nil\n}", "func fnCase(ctx Context, doc *JDoc, params []string) interface{} {\n\tstats := ctx.Value(EelTotalStats).(*ServiceStats)\n\tctx.Log().Error(\"error_type\", \"func_case\", \"op\", \"case\", \"cause\", \"now_implemented_in_parser\", \"params\", params)\n\tstats.IncErrors()\n\tAddError(ctx, SyntaxError{fmt.Sprintf(\"case function now implemented in parser\"), \"case\", params})\n\treturn nil\n}", "func (ec *executionContext) _Case(ctx context.Context, sel ast.SelectionSet, obj *models.Case) graphql.Marshaler {\n\tfields := graphql.CollectFields(ctx, sel, caseImplementors)\n\n\tvar wg sync.WaitGroup\n\tout := graphql.NewOrderedMap(len(fields))\n\tinvalid := false\n\tfor i, field := range fields {\n\t\tout.Keys[i] = field.Alias\n\n\t\tswitch field.Name {\n\t\tcase \"__typename\":\n\t\t\tout.Values[i] = graphql.MarshalString(\"Case\")\n\t\tcase \"Id\":\n\t\t\tout.Values[i] = ec._Case_Id(ctx, field, obj)\n\t\tcase \"Asset\":\n\t\t\twg.Add(1)\n\t\t\tgo func(i int, field graphql.CollectedField) {\n\t\t\t\tout.Values[i] = ec._Case_Asset(ctx, field, obj)\n\t\t\t\twg.Done()\n\t\t\t}(i, field)\n\t\tcase \"CaseNumber\":\n\t\t\tout.Values[i] = ec._Case_CaseNumber(ctx, field, obj)\n\t\tcase \"Origin\":\n\t\t\tout.Values[i] = ec._Case_Origin(ctx, field, obj)\n\t\tcase \"Owner\":\n\t\t\twg.Add(1)\n\t\t\tgo func(i int, field graphql.CollectedField) {\n\t\t\t\tout.Values[i] = ec._Case_Owner(ctx, field, obj)\n\t\t\t\twg.Done()\n\t\t\t}(i, field)\n\t\tcase \"Reason\":\n\t\t\tout.Values[i] = ec._Case_Reason(ctx, field, obj)\n\t\tcase \"IsClosed\":\n\t\t\tout.Values[i] = ec._Case_IsClosed(ctx, field, obj)\n\t\tcase \"Contact\":\n\t\t\twg.Add(1)\n\t\t\tgo func(i int, field graphql.CollectedField) {\n\t\t\t\tout.Values[i] = ec._Case_Contact(ctx, field, obj)\n\t\t\t\twg.Done()\n\t\t\t}(i, field)\n\t\tcase \"CreatedBy\":\n\t\t\twg.Add(1)\n\t\t\tgo func(i int, field graphql.CollectedField) {\n\t\t\t\tout.Values[i] = ec._Case_CreatedBy(ctx, field, obj)\n\t\t\t\twg.Done()\n\t\t\t}(i, field)\n\t\tcase \"ClosedDate\":\n\t\t\tout.Values[i] = ec._Case_ClosedDate(ctx, field, obj)\n\t\tcase \"CreatedDate\":\n\t\t\tout.Values[i] = ec._Case_CreatedDate(ctx, field, obj)\n\t\tcase \"IsDeleted\":\n\t\t\tout.Values[i] = ec._Case_IsDeleted(ctx, field, obj)\n\t\tcase \"Description\":\n\t\t\tout.Values[i] = ec._Case_Description(ctx, field, obj)\n\t\tcase \"IsEscalated\":\n\t\t\tout.Values[i] = ec._Case_IsEscalated(ctx, field, obj)\n\t\tcase \"LastModifiedBy\":\n\t\t\twg.Add(1)\n\t\t\tgo func(i int, field graphql.CollectedField) {\n\t\t\t\tout.Values[i] = ec._Case_LastModifiedBy(ctx, field, obj)\n\t\t\t\twg.Done()\n\t\t\t}(i, field)\n\t\tcase \"LastModifiedDate\":\n\t\t\tout.Values[i] = ec._Case_LastModifiedDate(ctx, field, obj)\n\t\tcase \"LastReferencedDate\":\n\t\t\tout.Values[i] = ec._Case_LastReferencedDate(ctx, field, obj)\n\t\tcase \"LastViewedDate\":\n\t\t\tout.Values[i] = ec._Case_LastViewedDate(ctx, field, obj)\n\t\tdefault:\n\t\t\tpanic(\"unknown field \" + strconv.Quote(field.Name))\n\t\t}\n\t}\n\twg.Wait()\n\tif invalid {\n\t\treturn graphql.Null\n\t}\n\treturn out\n}", "func DeleteCase(w http.ResponseWriter, r *http.Request) {\r\n\tr.Header.Set(\"Content-Type\", \"application/json, charset=UTF-8\")\r\n\tvar dc DeleteConfig\r\n\r\n\tswitch r.Method {\r\n\tcase \"GET\":\r\n\t\tparams := mux.Vars(r)\r\n\t\tdc.Default()\r\n\t\tdc.CaseName = params[\"case_name\"]\r\n\tcase \"POST\":\r\n\t\tbody, err := ioutil.ReadAll(r.Body)\r\n\t\tif err != nil {\r\n\t\t\tapi.LogDebug(api.DEBUG, \"[+] POST /delete/endpoint, failed to read request\")\r\n\t\t\tfmt.Fprintln(w, api.HttpFailureMessage(\"Failed to read HTTP request\"))\r\n\t\t\treturn\r\n\t\t}\r\n\t\tdc.LoadParams(body)\r\n\t}\r\n\r\n\tif dc.CaseName == \"*\" || dc.CaseName == \"\" {\r\n\t\tapi.LogDebug(api.DEBUG, \"[+] POST /delete/case, valid casename required\")\r\n\t\tfmt.Fprintln(w, api.HttpFailureMessage(\"Valid casename is required. * or NULL can not be used\"))\r\n\t\treturn\r\n\t}\r\n\r\n\tvar query elastic.Query\r\n\tquery = elastic.NewBoolQuery().Must(elastic.NewTermQuery(\"CaseInfo.CaseName.keyword\", dc.CaseName))\r\n\tdeleteEndpointByQuery(w, r, query, \"DeleteCase\")\r\n}", "func (client *Client) CreateVSwitch(request *CreateVSwitchRequest) (response *CreateVSwitchResponse, err error) {\n\tresponse = CreateCreateVSwitchResponse()\n\terr = client.DoAction(request, response)\n\treturn\n}", "func CreateContact(w http.ResponseWriter, r *http.Request) {\n\n\terr := r.ParseForm()\n\n\tif err != nil {\n\t\tsetting.Renderer.JSON(w, http.StatusBadRequest, err)\n\t}\n\n\tcontact := new(models.Contact)\n\tcontact.ID = bson.NewObjectId()\n\n\tdecoder := schema.NewDecoder()\n\terr = decoder.Decode(contact, r.PostForm)\n\n\tif err != nil {\n\t\tapi.RenderError(w, http.StatusBadRequest, err)\n\n\t\treturn\n\t}\n\n\terr = store.GetDB().Save(contact)\n\n\tif err != nil {\n\t\tapi.RenderError(w, http.StatusBadRequest, err)\n\n\t\treturn\n\t}\n\n\tsetting.Renderer.JSON(w, http.StatusOK, contact)\n}", "func (t *UseCase_UseCase) NewUseCase(Id string) (*UseCase_UseCase_UseCase, error){\n\n\t// Initialise the list within the receiver struct if it has not already been\n\t// created.\n\tif t.UseCase == nil {\n\t\tt.UseCase = make(map[string]*UseCase_UseCase_UseCase)\n\t}\n\n\tkey := Id\n\n\t// Ensure that this key has not already been used in the\n\t// list. Keyed YANG lists do not allow duplicate keys to\n\t// be created.\n\tif _, ok := t.UseCase[key]; ok {\n\t\treturn nil, fmt.Errorf(\"duplicate key %v for list UseCase\", key)\n\t}\n\n\tt.UseCase[key] = &UseCase_UseCase_UseCase{\n\t\tId: &Id,\n\t}\n\n\treturn t.UseCase[key], nil\n}", "func (api HatchbuckClient) CreateContact(contact Contact) (Contact, error) {\n\tvar c Contact\n\tendpoint := fmt.Sprintf(\"%v/contact?api_key=%v\", api.baseURL, api.key)\n\tpayload, _ := json.Marshal(contact)\n\tres, err := http.Post(endpoint, \"application/json\", bytes.NewBuffer(payload))\n\tlog.Println(res.StatusCode)\n\tif err != nil {\n\t\treturn c, err\n\t}\n\tdecoder := json.NewDecoder(res.Body)\n\terr = decoder.Decode(&c)\n\tif err != nil {\n\t\treturn c, err\n\t}\n\treturn c, nil\n}", "func CreateIncident(c *gin.Context) {\n\tvar newIncident *incident.Incident\n\tvar err error\n\tvar output *incident.Incident\n\tvar taskID int64\n\n\tctx, _ := authcontext.NewAuthContext(c)\n\n\tif err = c.BindJSON(&newIncident); err == nil {\n\t\tif taskID, err = strconv.ParseInt(c.Param(\"taskId\"), 10, 64); err == nil {\n\t\t\tif output, err = incident.Create(ctx, taskID, newIncident); err == nil {\n\t\t\t\tc.JSON(http.StatusOK, ResponseObject{\"incident\": output})\n\t\t\t}\n\t\t}\n\t}\n\n\tif err != nil {\n\t\tc.JSON(http.StatusPreconditionFailed, ResponseObject{\"error\": err.Error()})\n\t\tglog.Errorf(\"ERROR: %v\", err.Error())\n\t}\n}", "func (m *Client) CreateTicket(arg0 context.Context, arg1 zendesk.Ticket) (zendesk.Ticket, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"CreateTicket\", arg0, arg1)\n\tret0, _ := ret[0].(zendesk.Ticket)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (a *Client) CreateCapability(params *CreateCapabilityParams, authInfo runtime.ClientAuthInfoWriter) (*CreateCapabilityOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewCreateCapabilityParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"createCapability\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/capabilities\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &CreateCapabilityReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*CreateCapabilityOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\tunexpectedSuccess := result.(*CreateCapabilityDefault)\n\treturn nil, runtime.NewAPIError(\"unexpected success response: content available as default response in error\", unexpectedSuccess, unexpectedSuccess.Code())\n}", "func (s *service) CreateConsignment(ctx context.Context, req *proto.Consignment, resp *proto.Response) error {\n\tvesselResp, err := s.vesselClient.FindAvailable(ctx, &vesselProto.Specification{\n\t\tMaxWeight: req.Weight,\n\t\tCapacity: int32(len(req.Containers)),\n\t})\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn fmt.Errorf(\"failed to create consignment: %s\", err)\n\t}\n\n\tlog.Printf(\"Found vessel for consignment: %s\\n\", vesselResp.Vessel.Name)\n\treq.VesselId = vesselResp.GetVessel().Id\n\terr = s.repo.Create(req)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to create consignment: %s\", err)\n\t}\n\n\tresp.Consignment = req\n\tresp.Created = true\n\treturn nil\n}", "func (p *PagerDuty) CreateIncident(from, serviceID, title, body string) (*pagerduty.Incident, error) {\n\treturn p.client.CreateIncident(from, &pagerduty.CreateIncidentOptions{\n\t\tType: \"\",\n\t\tTitle: title,\n\t\tService: &pagerduty.APIReference{\n\t\t\tID: serviceID,\n\t\t\tType: \"service_reference\",\n\t\t},\n\t\tIncidentKey: \"\",\n\t\tBody: &pagerduty.APIDetails{\n\t\t\tType: \"incident_body\",\n\t\t\tDetails: body,\n\t\t},\n\t})\n}", "func (pc *Participants) CreateCustomer(ctx *helpers.TransactionContext, id string, forename string, surname string, bankID string, companyName string) error {\n\tbank, err := ctx.GetBank(bankID)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcustomer := new(defs.Customer)\n\tcustomer.ID = id\n\tcustomer.Forename = forename\n\tcustomer.Surname = surname\n\tcustomer.Bank = *bank\n\tcustomer.CompanyName = companyName\n\n\treturn ctx.CreateCustomer(customer)\n}", "func (s *BasevhdlListener) EnterCase_statement(ctx *Case_statementContext) {}", "func (a *Client) CreateCustomer(params *CreateCustomerParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*CreateCustomerOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewCreateCustomerParams()\n\t}\n\top := &runtime.ClientOperation{\n\t\tID: \"CreateCustomer\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/v2/customers\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &CreateCustomerReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t}\n\tfor _, opt := range opts {\n\t\topt(op)\n\t}\n\n\tresult, err := a.transport.Submit(op)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*CreateCustomerOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for CreateCustomer: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}", "func (m *Client) CreateAutomation(arg0 context.Context, arg1 zendesk.Automation) (zendesk.Automation, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"CreateAutomation\", arg0, arg1)\n\tret0, _ := ret[0].(zendesk.Automation)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func CreateExercise(db *sql.DB, name string, categoryID int) (Exercise, error) {\n\tvar exercise Exercise\n\terr := db.QueryRow(`INSERT INTO exercise(name, category_id)\n\t\tVALUES\n\t\t(UPPER($1), $2)\n\t\tRETURNING id, name, category_id`, name, categoryID).Scan(&exercise.ID, &exercise.Name, &exercise.CategoryID)\n\n\tif err != nil {\n\t\treturn exercise, err\n\t}\n\n\treturn exercise, nil\n}", "func (c CaseCreator) Create(node cluster.ClientNode) core.Client {\n\treturn &stressClient{\n\t\tnumRows: c.NumRows,\n\t\tlargeConcurrency: c.LargeConcurrency,\n\t\tlargeTimeout: c.LargeTimeout,\n\t\tsmallConcurrency: c.SmallConcurrency,\n\t\tsmallTimeout: c.SmallTimeout,\n\t\treplicaRead: c.ReplicaRead,\n\t}\n}", "func CreateListCasesRequest() (request *ListCasesRequest) {\n\trequest = &ListCasesRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"CCC\", \"2020-07-01\", \"ListCases\", \"CCC\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func CreateUsecase(userRepo user.UserRepo) user.UserUsecase {\n\treturn &UserUsecaseImpl{userRepo}\n}", "func (a *DefaultApiService) CreateTopic(ctx _context.Context) ApiCreateTopicRequest {\n\treturn ApiCreateTopicRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t}\n}", "func (c *Client) CreateInstance(name, zone, machineType, ipxeURL string) error {\n\treq := &packngo.DeviceCreateRequest{\n\t\tHostname: name,\n\t\tPlan: machineType,\n\t\tProjectID: c.projectID,\n\t\tFacility: []string{zone},\n\t\tIPXEScriptURL: ipxeURL,\n\t\tOS: \"custom_ipxe\",\n\t\tDescription: \"eden test vm\",\n\t\tBillingCycle: \"hourly\",\n\t}\n\t_, _, err := c.client.Devices.Create(req)\n\treturn err\n}", "func (ct *customer) CreateCustomer(c echo.Context) error {\n\tctx, err := middleware.WellsFarGoContext(c)\n\tif err != nil {\n\t\treturn errors.Wrapf(errs.ErrWellsFarGoContext, \"err : '%s'\", err)\n\t}\n\n\tvar newCustomer model.Customer\n\tif err = ctx.Bind(&newCustomer); err != nil {\n\t\treturn errors.Wrapf(errs.ErrBindRequest, \"err : '%s'\", err)\n\t}\n\n\tcustomer, err := ct.cs.CreateNewCustomer(&newCustomer)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"create new customer\")\n\t}\n\n\treturn ctx.JSON(http.StatusOK, customer)\n}", "func (d *Dao) Case(c context.Context, arg *blocked.ArgCaseSearch) (ids []int64, pager *blocked.Pager, err error) {\n\treq := d.elastic.NewRequest(blocked.BusinessBlockedCase).Index(blocked.TableBlockedCase).Fields(\"id\")\n\tif arg.Keyword != blocked.SearchDefaultString {\n\t\treq.WhereLike([]string{\"origin_content\"}, []string{arg.Keyword}, true, elastic.LikeLevelHigh)\n\t}\n\tif arg.OriginType != blocked.SearchDefaultNum {\n\t\treq.WhereEq(\"origin_type\", arg.OriginType)\n\t}\n\tif arg.Status != blocked.SearchDefaultNum {\n\t\treq.WhereEq(\"status\", arg.Status)\n\t}\n\tif arg.CaseType != blocked.SearchDefaultNum {\n\t\treq.WhereEq(\"case_type\", arg.CaseType)\n\t}\n\tif arg.UID != blocked.SearchDefaultNum {\n\t\treq.WhereEq(\"mid\", arg.UID)\n\t}\n\tif arg.OPID != blocked.SearchDefaultNum {\n\t\treq.WhereEq(\"oper_id\", arg.OPID)\n\t}\n\treq.WhereRange(\"start_time\", arg.TimeFrom, arg.TimeTo, elastic.RangeScopeLcRc)\n\treq.Pn(arg.PN).Ps(arg.PS).Order(arg.Order, arg.Sort)\n\tvar res *search.ReSearchData\n\tif err = req.Scan(c, &res); err != nil {\n\t\terr = errors.Errorf(\"elastic search(%s) error(%v)\", req.Params(), err)\n\t\treturn\n\t}\n\tids, pager = pagerExtra(res)\n\treturn\n}", "func (m *Client) CreateTrigger(arg0 context.Context, arg1 zendesk.Trigger) (zendesk.Trigger, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"CreateTrigger\", arg0, arg1)\n\tret0, _ := ret[0].(zendesk.Trigger)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *Client) CreateGroup(arg0 context.Context, arg1 zendesk.Group) (zendesk.Group, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"CreateGroup\", arg0, arg1)\n\tret0, _ := ret[0].(zendesk.Group)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func CharacterCreate(w http.ResponseWriter, r *http.Request) {\n\tlog.WithFields(log.Fields{\n\t\t\"time\": time.Now(),\n\t}).Info(\"Received character create request\")\n\n\tvar requestData CreateRequest\n\tbody, err := ioutil.ReadAll(io.LimitReader(r.Body, 1048576))\n\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\n\tif err != nil {\n\t\tRespondBadRequest(w, err.Error())\n\t\treturn\n\t}\n\n\tif err := r.Body.Close(); err != nil {\n\t\tRespondBadRequest(w, err.Error())\n\t\treturn\n\t}\n\n\tif err := json.Unmarshal(body, &requestData); err != nil {\n\t\tw.WriteHeader(422) // unprocessable entity\n\t\tif err := json.NewEncoder(w).Encode(err); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\n\tcharacter, err := CreateNewCharacter(requestData)\n\tfmt.Println(requestData.ID)\n\n\tif err != nil {\n\t\tRespondBadRequest(w, err.Error())\n\t\treturn\n\t}\n\n\tw.WriteHeader(http.StatusCreated)\n\tif err := json.NewEncoder(w).Encode(character); err != nil {\n\t\tRespondBadRequest(w, err.Error())\n\t\treturn\n\t}\n\n}", "func CreateContact(w http.ResponseWriter, r *http.Request) {\n\t// Grab the id of the user that send the request\n\tuser := r.Context().Value(middleware.User(\"user\")).(uint)\n\tcontact := &model.Contact{}\n\n\terr := json.NewDecoder(r.Body).Decode(contact)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\tresponse.RespondWithStatus(\n\t\t\tw,\n\t\t\tresponse.Message(false, \"Error while decoding request body\"),\n\t\t\thttp.StatusBadRequest,\n\t\t)\n\t\treturn\n\t}\n\n\tcontact.UserID = user\n\tdata, status, ok := contact.Create()\n\tif !ok {\n\t\tresponse.RespondWithStatus(w, data, status)\n\t}\n\tresponse.RespondWithStatus(w, data, status)\n}", "func CaseController(t *testing.T, n *native.NativeService) {\n\ta0 := account.NewAccount(\"\")\n\tid0, _ := account.GenerateID()\n\tid1, _ := account.GenerateID()\n\n\t// 1. unregistered controller, should fail\n\tif err := regControlledID(n, id1, id0, 1, a0.Address); err == nil {\n\t\tt.Error(\"registered controlled id with unregistered controller\")\n\t}\n\n\t// 2. register the controller\n\tif err := regID(n, id0, a0); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// 3. register without valid signature, should fail\n\tif err := regControlledID(n, id1, id0, 1, common.ADDRESS_EMPTY); err == nil {\n\t\tt.Error(\"registered without valid signature\")\n\t}\n\n\t// 4. register with invalid key index, should fail\n\tif err := regControlledID(n, id1, id0, 2, a0.Address); err == nil {\n\t\tt.Error(\"registered with invalid key index\")\n\t}\n\n\t// 5. register with invalid id, should fail\n\tif err := regControlledID(n, \"did:ont::123\", id0, 1, a0.Address); err == nil {\n\t\tt.Error(\"invalid id registered\")\n\t}\n\n\t// 6. register the controlled ID\n\tif err := regControlledID(n, id1, id0, 1, a0.Address); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// 7. register again, should fail\n\tif err := regControlledID(n, id1, id0, 1, a0.Address); err == nil {\n\t\tt.Fatal(\"register twice\")\n\t}\n\n\t// 8. verify controller\n\tif ok, err := verifyCtrl(n, id1, 1, a0.Address); !ok || err != nil {\n\t\tt.Fatal(\"verify controller error\", err)\n\t}\n\n\t// 9. verify invalid controller, should fail\n\tif ok, err := verifyCtrl(n, id1, 2, a0.Address); ok && err == nil {\n\t\tt.Error(\"invalid controller key index passed verification\")\n\t}\n\n\t// 10. verify controller without valid signature, should fail\n\tif ok, err := verifyCtrl(n, id1, 1, common.ADDRESS_EMPTY); ok && err == nil {\n\t\tt.Error(\"controller passed verification without valid signature\")\n\t}\n\n\t// 11. add attribute by invalid controller, should fail\n\tattr := attribute{\n\t\t[]byte(\"test key\"),\n\t\t[]byte(\"test value\"),\n\t\t[]byte(\"test type\"),\n\t}\n\tif err := ctrlAddAttr(n, id1, attr, 1, common.Address{}); err == nil {\n\t\tt.Error(\"attribute added by invalid controller\")\n\t}\n\n\t// 12. add attribute\n\tif err := ctrlAddAttr(n, id1, attr, 1, a0.Address); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// 13. check attribute\n\tif err := checkAttribute(n, id1, []attribute{attr}); err != nil {\n\t\tt.Error(\"check attribute error\", err)\n\t}\n\n\t// 14. remove attribute by invalid controller, should fail\n\tif err := ctrlRmAttr(n, id1, attr.key, 1, common.Address{}); err == nil {\n\t\tt.Error(\"attribute removed by invalid controller\")\n\t}\n\n\t// 15. remove nonexistent attribute, should fail\n\tif err := ctrlRmAttr(n, id1, []byte(\"unknown key\"), 1, a0.Address); err == nil {\n\t\tt.Error(\"removed nonexistent attribute\")\n\t}\n\n\t// 16. remove attribute by controller\n\tif err := ctrlRmAttr(n, id1, attr.key, 1, a0.Address); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// 17. add invalid key, should fail\n\tif err := ctrlAddKey(n, id1, []byte(\"test invalid key\"), 1, a0.Address); err == nil {\n\t\tt.Error(\"invalid key added by controller\")\n\t}\n\n\t// 18. add key by invalid controller, should fail\n\ta1 := account.NewAccount(\"\")\n\tpk := keypair.SerializePublicKey(a1.PubKey())\n\tif err := ctrlAddKey(n, id1, pk, 1, common.Address{}); err == nil {\n\t\tt.Error(\"key added by invalid controller\")\n\t}\n\n\t// 19. add key\n\tif err := ctrlAddKey(n, id1, pk, 1, a0.Address); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// 20. remove key by invalid controller, should fail\n\tif err := ctrlRmKey(n, id1, 1, 1, common.ADDRESS_EMPTY); err == nil {\n\t\tt.Error(\"key removed by invalid controller\")\n\t}\n\n\t// 21. remove invalid key, should fail\n\tif err := ctrlRmKey(n, id1, 2, 1, a0.Address); err == nil {\n\t\tt.Error(\"invlid key removed\")\n\t}\n\n\t// 22. remove key\n\tif err := ctrlRmKey(n, id1, 1, 1, a0.Address); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// 23. add the removed key again, should fail\n\tif err := ctrlAddKey(n, id1, pk, 1, a0.Address); err == nil {\n\t\tt.Error(\"removed key added again\")\n\t}\n\n\t// 24. add a new key\n\ta2 := account.NewAccount(\"\")\n\tpk = keypair.SerializePublicKey(a2.PubKey())\n\tif err := ctrlAddKey(n, id1, pk, 1, a0.Address); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// 25, remove controller by invalid key, should fail\n\tif err := rmCtrl(n, id1, 1, a1.Address); err == nil {\n\t\tt.Error(\"controller removed by invalid key\")\n\t}\n\n\t// 26. remove controller without valid signature, should fail\n\tif err := rmCtrl(n, id1, 2, common.Address{}); err == nil {\n\t\tt.Error(\"controller removed without valid signature\")\n\t}\n\n\t// 27. remove contoller\n\tif err := rmCtrl(n, id1, 2, a2.Address); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// 28. use removed controller, should all fail\n\tif ok, err := verifyCtrl(n, id1, 1, a0.Address); ok && err == nil {\n\t\tt.Error(\"removed controller passed verification\")\n\t}\n\tif err := ctrlAddAttr(n, id1, attr, 1, a0.Address); err == nil {\n\t\tt.Error(\"attribute added by removed controller\")\n\t}\n\ta3 := account.NewAccount(\"\")\n\tpk = keypair.SerializePublicKey(a3.PubKey())\n\tif err := ctrlAddKey(n, id1, pk, 1, a0.Address); err == nil {\n\t\tt.Error(\"key added by removed controller\")\n\t}\n}", "func (this *Client) CreateCandidate(shortcode string, candidate *schema.Candidate, opts ...RequestOpt) (*schema.Candidate, error) {\n\tvar response struct {\n\t\tStatus string `json:\"status\"`\n\t\tCandidate *schema.Candidate `json:\"candidate\"`\n\t}\n\tpayload := NewCreateCandidatePayload(candidate)\n\tif err := this.Do(payload, &response, append(opts, OptPath(\"jobs\", shortcode, \"candidates\"))...); err != nil {\n\t\treturn nil, err\n\t} else if response.Status != \"created\" {\n\t\treturn nil, workable.ErrUnexpectedResponse.With(response.Status)\n\t} else {\n\t\treturn response.Candidate, nil\n\t}\n}", "func (client *ClientImpl) CreateControlInGroup(ctx context.Context, args CreateControlInGroupArgs) (*Control, error) {\n\tif args.Control == nil {\n\t\treturn nil, &azuredevops.ArgumentNilError{ArgumentName: \"args.Control\"}\n\t}\n\trouteValues := make(map[string]string)\n\tif args.ProcessId == nil {\n\t\treturn nil, &azuredevops.ArgumentNilError{ArgumentName: \"args.ProcessId\"}\n\t}\n\trouteValues[\"processId\"] = (*args.ProcessId).String()\n\tif args.WitRefName == nil || *args.WitRefName == \"\" {\n\t\treturn nil, &azuredevops.ArgumentNilOrEmptyError{ArgumentName: \"args.WitRefName\"}\n\t}\n\trouteValues[\"witRefName\"] = *args.WitRefName\n\tif args.GroupId == nil || *args.GroupId == \"\" {\n\t\treturn nil, &azuredevops.ArgumentNilOrEmptyError{ArgumentName: \"args.GroupId\"}\n\t}\n\trouteValues[\"groupId\"] = *args.GroupId\n\n\tbody, marshalErr := json.Marshal(*args.Control)\n\tif marshalErr != nil {\n\t\treturn nil, marshalErr\n\t}\n\tlocationId, _ := uuid.Parse(\"1f59b363-a2d0-4b7e-9bc6-eb9f5f3f0e58\")\n\tresp, err := client.Client.Send(ctx, http.MethodPost, locationId, \"6.0-preview.1\", routeValues, nil, bytes.NewReader(body), \"application/json\", \"application/json\", nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar responseValue Control\n\terr = client.Client.UnmarshalBody(resp, &responseValue)\n\treturn &responseValue, err\n}", "func (h *Handler) CreateContact(c echo.Context) (err error) {\n\tcontact := &model.Contact{\n\t\tID: bson.NewObjectId(),\n\t}\n\tif err = c.Bind(contact); err != nil {\n\t\treturn\n\t}\n\n\t// Name and email are mandatory\n\tif contact.Name == \"\" || contact.Email == \"\" {\n\t\treturn &echo.HTTPError{Code: http.StatusBadRequest, Message: \"Invalid / missing fields\"}\n\t}\n\n\t// Save contact in database\n\tdb := h.DB.Clone()\n\tdefer db.Close()\n\tif err = db.DB(\"sampark\").C(\"contacts\").Insert(contact); err != nil {\n\t\tif mgo.IsDup(err) {\n\t\t\tmsg := \"Contact with given email already exists\"\n\t\t\treturn &echo.HTTPError{Code: http.StatusBadRequest, Message: msg}\n\t\t}\n\t\treturn\n\t}\n\treturn c.JSON(http.StatusCreated, contact)\n}", "func createCustomer(c *firestore.Client, ctx context.Context, customer *Customer, customerId string) {\n\tlog.Printf(\"(ID: '%s'): Create customer ...\", customerId)\n\tif err := CreateCustomer(c, ctx, customer); err != nil {\n\t\tlog.Println(err)\n\t} else {\n\t\tlog.Println(\"Customer created.\")\n\t}\n\tlog.Println()\n}", "func (c *TestClient) CreateInstance(project, zone string, i *compute.Instance) error {\n\tif c.CreateInstanceFn != nil {\n\t\treturn c.CreateInstanceFn(project, zone, i)\n\t}\n\treturn c.client.CreateInstance(project, zone, i)\n}", "func CreateNewSchool(c echo.Context) error {\n\n\tdb, ok := c.Get(\"db\").(*gorm.DB)\n\n\tif !ok {\n\t\treturn c.NoContent(http.StatusInternalServerError)\n\t}\n\n\tvar modelview view.CreateNewSchoolModelView\n\n\tc.Bind(&modelview)\n\n\tcanteens := make([]canteen.Canteen, len(modelview.Canteens))\n\n\tfor index := range modelview.Canteens {\n\n\t\tlocation := canteen.Location{}\n\n\t\tlocation.Latitude = modelview.Canteens[index].Location.Latitude\n\n\t\tlocation.Longitude = modelview.Canteens[index].Location.Longitude\n\n\t\tcanteen, cerr := canteen.New(modelview.Canteens[index].Name, location)\n\t\tif cerr != nil {\n\n\t\t\tmodelview := customerrorview.UsingFieldErrorToErrorMessageModelView(*cerr)\n\n\t\t\treturn c.JSON(http.StatusBadRequest, modelview)\n\t\t}\n\t\tcanteens[index] = canteen\n\t}\n\n\tschool, serr := model.New(modelview.Acronym, modelview.Name, canteens)\n\n\tif serr != nil {\n\n\t\tmodelview := customerrorview.UsingFieldErrorToErrorMessageModelView(*serr)\n\n\t\treturn c.JSON(http.StatusBadRequest, modelview)\n\t}\n\n\tvar existingSchool model.School\n\n\t// Finds if school with same acronym already exists\n\n\terr := db.Where(map[string]interface{}{\"acronym\": modelview.Acronym}).First(&existingSchool).Error\n\n\tif err == nil {\n\n\t\tcerr := customerrormodel.FieldError{Field: \"acronym\", Model: \"school\", Explanation: \"a school with the same acronym already exists\"}\n\n\t\tmodelview := customerrorview.UsingFieldErrorToErrorMessageModelView(cerr)\n\n\t\treturn c.JSON(http.StatusBadRequest, modelview)\n\t}\n\n\t// Creates school\n\tdb.Create(&school)\n\n\tmodelviewres := view.ToGetDetailedSchoolInformationModelView(school)\n\n\treturn c.JSON(http.StatusCreated, modelviewres)\n\n}", "func (h *handler) CreateConsignment(ctx context.Context, req *pb.Consignment, res *pb.Response) error {\n\tvesselResponse, err := h.vesselClient.FindAvailable(context.Background(), &vesselProto.Specification{\n\t\tMaxWeight: req.Weight,\n\t\tCapacity: int32(len(req.Containers)),\n\t})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err = h.repo.Create(req); err != nil {\n\t\treturn err\n\t}\n\n\tres.Created = true\n\tres.Consignment = req\n\treq.VesselId = vesselResponse.Vessel.Id\n\treturn nil\n}", "func NewPrimitiveCase(e Expression, t types.Primitive, as []PrimitiveAlternative, a DefaultAlternative) PrimitiveCase {\n\treturn PrimitiveCase{newAbstractCase(e, a), t, as}\n}", "func createContest(contest Contest) (Contest, error) {\n\tcmd := `SELECT * FROM create_contest($1, $2, $3, $4)`\n\terr := config.DB.\n\t\tQueryRow(cmd, contest.Name, pq.Array(contest.ProblemList), contest.StartDate, contest.Duration).\n\t\tScan(&contest.Id)\n\treturn contest, err\n}", "func (c *Client) CreateDealSubmission() *CreateDealSubmissionRequest {\n\tvar ()\n\treturn &CreateDealSubmissionRequest{\n\n\t\tFXDealSubmissionCreation: models.FXDealSubmissionCreationWithDefaults(c.Defaults),\n\n\t\tFxDealID: c.Defaults.GetString(\"CreateDealSubmission\", \"fx_deal_id\"),\n\n\t\ttimeout: cr.DefaultTimeout,\n\n\t\ttransport: c.transport,\n\t\tformats: c.formats,\n\t}\n}", "func (c *client) CreateIssue(\n\tpid interface{},\n\topt *glab.CreateIssueOptions,\n\toptions ...glab.RequestOptionFunc,\n) (*glab.Issue, *glab.Response, error) {\n\treturn c.c.Issues.CreateIssue(pid, opt, options...)\n}", "func (a *Client) CreateChannel(params *CreateChannelParams, authInfo runtime.ClientAuthInfoWriter) (*CreateChannelCreated, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewCreateChannelParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"createChannel\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/channels\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &CreateChannelReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*CreateChannelCreated)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for createChannel: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}", "func NewAlgebraicCase(e Expression, t types.Type, as []AlgebraicAlternative, a DefaultAlternative) AlgebraicCase {\n\treturn AlgebraicCase{newAbstractCase(e, t, a), as}\n}", "func (m *MailgunImpl) CreateCampaign(name, id string) error {\n\tr := simplehttp.NewHTTPRequest(generateApiUrl(m, campaignsEndpoint))\n\tr.SetClient(m.Client())\n\tr.SetBasicAuth(basicAuthUser, m.ApiKey())\n\n\tpayload := simplehttp.NewUrlEncodedPayload()\n\tpayload.AddValue(\"name\", name)\n\tif id != \"\" {\n\t\tpayload.AddValue(\"id\", id)\n\t}\n\t_, err := makePostRequest(r, payload)\n\treturn err\n}", "func (c *Client) CreateCourse(ctx context.Context, path string, payload *CreateCoursePayload, contentType string) (*http.Response, error) {\n\treq, err := c.NewCreateCourseRequest(ctx, path, payload, contentType)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn c.Client.Do(ctx, req)\n}", "func (impl controllerImpl) CreateCustomer(w http.ResponseWriter, r *http.Request) {\n\n\t// Read the body\n\tvar customerDto CustomerDTO\n\tdecoder := json.NewDecoder(r.Body)\n\terr := decoder.Decode(&customerDto)\n\tif err != nil {\n\t\thttp2.HandleBadRequest(w, r.URL.Path, err)\n\t\treturn\n\t}\n\n\t// Translate\n\tcustomer := ToEntity(customerDto)\n\n\t// Create\n\tnewCustomer, err := impl.service.CreateCustomer(customer)\n\tif err != nil {\n\t\thttp2.HandleBadRequest(w, r.URL.Path, err)\n\t\treturn\n\t}\n\n\t// Translate\n\tcustomerDto = ToContract(*newCustomer)\n\n\t// Good data, return JSON\n\thttp2.HandleSuccess(w, http.StatusCreated, customerDto)\n}", "func (h *eventServiceHTTPHandler) CreateTicket(c echo.Context) error {\n\tlogCtx := fmt.Sprintf(\"%T.CreateTicket\", *h)\n\n\tparams := model.CreateTicketReq{}\n\tif err := c.Bind(&params); err != nil {\n\t\thelper.Log(logrus.ErrorLevel, err.Error(), logCtx, \"error_bind_params\")\n\t\treturn helper.NewResponse(http.StatusBadRequest, http.StatusBadRequest, err.Error(), nil).WriteResponse(c)\n\t}\n\n\tif err := sanitizer.ValidateCreateTicket(&params); err != nil {\n\t\thelper.Log(logrus.ErrorLevel, err.Error(), logCtx, \"error_validate_params\")\n\t\treturn helper.NewResponse(http.StatusBadRequest, http.StatusBadRequest, err.Error(), nil).WriteResponse(c)\n\t}\n\n\tresp, err := h.eventUseCase.CreateTicket(&params)\n\tif err != nil {\n\t\thelper.Log(logrus.ErrorLevel, err.Error(), logCtx, \"error_create_ticket\")\n\t\treturn helper.NewResponse(http.StatusInternalServerError, http.StatusInternalServerError, err.Error(), nil).WriteResponse(c)\n\t}\n\n\tdata := make(map[string]interface{})\n\tdata[\"ticket\"] = resp\n\treturn helper.NewResponse(http.StatusCreated, http.StatusCreated, \"Success\", data).WriteResponse(c)\n}", "func (v *NetControlIntentClient) CreateNetControlIntent(nci NetControlIntent, project, compositeapp, compositeappversion, dig string, exists bool) (NetControlIntent, error) {\n\n\t//Construct key and tag to select the entry\n\tkey := NetControlIntentKey{\n\t\tNetControlIntent: nci.Metadata.Name,\n\t\tProject: project,\n\t\tCompositeApp: compositeapp,\n\t\tCompositeAppVersion: compositeappversion,\n\t\tDigName: dig,\n\t}\n\n\t//Check if this NetControlIntent already exists\n\t_, err := v.GetNetControlIntent(nci.Metadata.Name, project, compositeapp, compositeappversion, dig)\n\tif err == nil && !exists {\n\t\treturn NetControlIntent{}, pkgerrors.New(\"NetControlIntent already exists\")\n\t}\n\n\terr = db.DBconn.Insert(v.db.storeName, key, nil, v.db.tagMeta, nci)\n\tif err != nil {\n\t\treturn NetControlIntent{}, pkgerrors.Wrap(err, \"Creating DB Entry\")\n\t}\n\n\treturn nci, nil\n}", "func (c *Client) CreateDefect(d *Defect) error {\n\tref := Ref{URL: fmt.Sprintf(\"%v/defect/create\", baseUrl)}\n\treturn c.UpdateDefect(&ref, d)\n}", "func (c *Culqi) CreateCustomer(params *CustomerParams) (*Customer, error) {\n\n\tif params == nil {\n\t\treturn nil, fmt.Errorf(\"params are empty\")\n\t}\n\n\treqJSON, err := json.Marshal(params)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq, err := http.NewRequest(\"POST\", defaultBaseURL+\"v2/\"+customerBase, bytes.NewBuffer(reqJSON))\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\treq.Header.Set(\"Authorization\", \"Bearer \"+c.Conf.APIKey)\n\treq.Header.Set(\"User-Agent\", userAgent)\n\n\tresp, err := c.HTTP.Do(req)\n\n\tif resp.StatusCode >= 400 {\n\t\treturn nil, extractError(resp)\n\t}\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tt := Customer{}\n\n\tif err := json.Unmarshal(body, &t); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &t, nil\n}", "func (r *TCPMonitorRepository) CreateContact(contact MonitoringContact) error {\n\trestRequest := rest.Request{Endpoint: \"/monitoring-contacts\", Body: &contact}\n\n\treturn r.Client.Post(restRequest)\n}", "func CreateCustomer(tx db.Tx, w http.ResponseWriter, r *http.Request) {\n\tif r.FormValue(\"auth\") != config.Get(\"galaAPIKey\") {\n\t\thttp.Error(w, \"401 Unauthorized\", http.StatusUnauthorized)\n\t\treturn\n\t}\n\tname, email, card := r.FormValue(\"name\"), r.FormValue(\"email\"), r.FormValue(\"card\")\n\tcustomer, pmtmeth, desc, problem := stripe.CreateCustomer(name, email, card)\n\tif problem != \"\" {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tio.WriteString(w, problem)\n\t\treturn\n\t}\n\tif customer == \"\" {\n\t\thttp.Error(w, \"500 Internal Server Error\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\tjw := new(jwriter.Writer)\n\tjw.RawString(`{\"customer\":`)\n\tjw.String(customer)\n\tjw.RawString(`,\"method\":`)\n\tjw.String(pmtmeth)\n\tjw.RawString(`,\"description\":`)\n\tjw.String(desc)\n\tjw.RawByte('}')\n\tjw.DumpTo(w)\n}", "func (c *CaptainClient) CreateFormation(name string, flightID, CPU, RAM, disk int, baseName, domain string, targetCount int, preflightPlaybook string) (Formation, error) {\n\tresult, err := c.restPOST(\"formation\", map[string]interface{}{\n\t\t\"FlightID\": flightID,\n\t\t\"Name\": name,\n\t\t\"CPU\": CPU,\n\t\t\"RAM\": RAM,\n\t\t\"Disk\": disk,\n\t\t\"BaseName\": baseName,\n\t\t\"Domain\": domain,\n\t\t\"TargetCount\": targetCount,\n\t\t\"PreflightPlaybook\": preflightPlaybook,\n\t})\n\tif err != nil {\n\t\treturn Formation{}, fmt.Errorf(\"unable to create Formation:\\n%w\", err)\n\t}\n\tvar formation Formation\n\terr = json.Unmarshal(result, &formation)\n\tif err != nil {\n\t\treturn Formation{}, fmt.Errorf(\"unable to format response as Formation:\\n%w\", err)\n\t}\n\treturn formation, nil\n}", "func (client LabClient) CreateEnvironment(ctx context.Context, resourceGroupName string, name string, labVirtualMachine LabVirtualMachine) (result LabCreateEnvironmentFuture, err error) {\n\tif tracing.IsEnabled() {\n\t\tctx = tracing.StartSpan(ctx, fqdn+\"/LabClient.CreateEnvironment\")\n\t\tdefer func() {\n\t\t\tsc := -1\n\t\t\tif result.Response() != nil {\n\t\t\t\tsc = result.Response().StatusCode\n\t\t\t}\n\t\t\ttracing.EndSpan(ctx, sc, err)\n\t\t}()\n\t}\n\treq, err := client.CreateEnvironmentPreparer(ctx, resourceGroupName, name, labVirtualMachine)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"dtl.LabClient\", \"CreateEnvironment\", nil, \"Failure preparing request\")\n\t\treturn\n\t}\n\n\tresult, err = client.CreateEnvironmentSender(req)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"dtl.LabClient\", \"CreateEnvironment\", result.Response(), \"Failure sending request\")\n\t\treturn\n\t}\n\n\treturn\n}", "func (a *CampaignsApiService) CreateChannel(ctx _context.Context) ApiCreateChannelRequest {\n\treturn ApiCreateChannelRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t}\n}", "func ParseUsingCase(text string) *Name {\n\t// Convert the text to an array of runes so that we can easily access the previous, current\n\t// and next runes:\n\tvar runes []rune\n\tfor _, r := range text {\n\t\trunes = append(runes, r)\n\t}\n\n\t// Iterate the runes looking for case transitions and storing the words:\n\tbuffer := new(bytes.Buffer)\n\tvar words []*Word\n\tsize := len(runes)\n\tfor i := 0; i < size; i++ {\n\t\tvar previous rune\n\t\tif i > 0 {\n\t\t\tprevious = runes[i-1]\n\t\t}\n\t\tcurrent := runes[i]\n\t\tvar next rune\n\t\tif i < size-1 {\n\t\t\tnext = runes[i+1]\n\t\t}\n\t\tcurrentUpper := unicode.IsUpper(current)\n\t\tpreviousLower := unicode.IsLower(previous)\n\t\tnextLower := unicode.IsLower(next)\n\t\tif currentUpper && (previousLower || nextLower) {\n\t\t\tif buffer.Len() > 0 {\n\t\t\t\tchunk := buffer.String()\n\t\t\t\tword := NewWord(chunk)\n\t\t\t\twords = append(words, word)\n\t\t\t\tbuffer.Reset()\n\t\t\t}\n\t\t}\n\t\tbuffer.WriteRune(current)\n\t}\n\tif buffer.Len() > 0 {\n\t\tchunk := buffer.String()\n\t\tword := NewWord(chunk)\n\t\twords = append(words, word)\n\t}\n\n\t// Create the name from the stored words:\n\treturn NewName(words...)\n}", "func (c *BugProfileController) Create(ctx *app.CreateBugProfileContext) error {\n\t// BugProfileController_Create: start_implement\n\n\t// Put your logic here\n\n\t// BugProfileController_Create: end_implement\n\treturn nil\n}", "func (s *TemplatesService) CreateTemplateRecord(ctx context.Context, accountID string, templateIdentifier string, templateRecordAttributes TemplateRecord) (*TemplateRecordResponse, error) {\n\tpath := versioned(templateRecordPath(accountID, templateIdentifier, 0))\n\ttemplateRecordResponse := &TemplateRecordResponse{}\n\n\tresp, err := s.client.post(ctx, path, templateRecordAttributes, templateRecordResponse)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttemplateRecordResponse.HTTPResponse = resp\n\treturn templateRecordResponse, nil\n}", "func (client *Client) CreateTopic(request *CreateTopicRequest) (response *CreateTopicResponse, err error) {\n\tresponse = CreateCreateTopicResponse()\n\terr = client.DoAction(request, response)\n\treturn\n}", "func (c *TestClient) CreateInstanceAlpha(project, zone string, i *computeAlpha.Instance) error {\n\tif c.CreateInstanceBetaFn != nil {\n\t\treturn c.CreateInstanceAlphaFn(project, zone, i)\n\t}\n\treturn c.client.CreateInstanceAlpha(project, zone, i)\n}", "func (sdk *Sdk) CreateContact(filePath, attrs, tags string) (string, error) {\n\tsdkC := sdk.connect\n\textraData := fmt.Sprintf(`attrs=%s&&tags=%s`, attrs, tags)\n\n\treturn sdkC.rq.PostFile(\"/api/contacts/upload\", filePath, \"file\", extraData)\n}", "func (bc *TranslatableBusinessCase) CreateIntakeModel() (*wire.IntakeInput, error) {\n\tobj := intakemodels.EASIBizCase{\n\t\tUserEUA: bc.EUAUserID,\n\t\tBusinessCaseID: bc.ID.String(),\n\t\tIntakeID: pStr(bc.SystemIntakeID.String()),\n\t\tProjectName: bc.ProjectName.ValueOrZero(), // will always have a value by the time a draft business case is submitted\n\t\tRequester: bc.Requester.ValueOrZero(), // will always have a value by the time a draft business case is submitted\n\t\tRequesterPhoneNumber: bc.RequesterPhoneNumber.Ptr(),\n\t\tBusinessOwner: bc.BusinessOwner.ValueOrZero(), // will always have a value by the time a draft business case is submitted\n\t\tBusinessNeed: bc.BusinessNeed.Ptr(),\n\t\tCurrentSolutionSummary: bc.CurrentSolutionSummary.Ptr(),\n\t\tCmsBenefit: bc.CMSBenefit.Ptr(),\n\t\tPriorityAlignment: bc.PriorityAlignment.Ptr(),\n\t\tSuccessIndicators: bc.SuccessIndicators.Ptr(),\n\t\tStatus: string(bc.Status),\n\n\t\tArchivedAt: pStr(strDateTime(bc.ArchivedAt)),\n\n\t\tBusinessSolutions: []*intakemodels.EASIBusinessSolution{},\n\t}\n\n\t// Build the collection of embedded objects\n\n\t// Business solutions\n\t// Preferred (required)\n\tpreferredSolution := &intakemodels.EASIBusinessSolution{\n\t\tSolutionType: \"preferred\",\n\t\tTitle: bc.PreferredTitle.Ptr(),\n\t\tSummary: bc.PreferredSummary.Ptr(),\n\t\tAcquisitionApproach: bc.PreferredAcquisitionApproach.Ptr(),\n\t\tSecurityIsApproved: bc.PreferredSecurityIsApproved.Ptr(),\n\t\tSecurityIsBeingReviewed: bc.PreferredSecurityIsBeingReviewed.Ptr(),\n\t\tHostingType: bc.PreferredHostingType.Ptr(),\n\t\tHostingLocation: bc.PreferredHostingLocation.Ptr(),\n\t\tHostingCloudServiceType: bc.PreferredHostingCloudServiceType.Ptr(),\n\t\tHasUI: bc.PreferredHasUI.Ptr(),\n\t\tPros: bc.PreferredPros.Ptr(),\n\t\tCons: bc.PreferredCons.Ptr(),\n\t\tCostSavings: bc.PreferredCostSavings.Ptr(),\n\t\tLifecycleCostLines: []intakemodels.EASILifecycleCost{},\n\t}\n\n\t// TODO: do we need to check if alternative a and b are filled out?\n\t// what is the best way to do that? need to check each field individually?\n\n\t// Alternative a (optional)\n\talternativeASolution := &intakemodels.EASIBusinessSolution{\n\t\tSolutionType: \"alternativeA\",\n\t\tTitle: bc.AlternativeATitle.Ptr(),\n\t\tSummary: bc.AlternativeASummary.Ptr(),\n\t\tAcquisitionApproach: bc.AlternativeAAcquisitionApproach.Ptr(),\n\t\tSecurityIsApproved: bc.AlternativeASecurityIsApproved.Ptr(),\n\t\tSecurityIsBeingReviewed: bc.AlternativeASecurityIsBeingReviewed.Ptr(),\n\t\tHostingType: bc.AlternativeAHostingType.Ptr(),\n\t\tHostingLocation: bc.AlternativeAHostingLocation.Ptr(),\n\t\tHostingCloudServiceType: bc.AlternativeAHostingCloudServiceType.Ptr(),\n\t\tHasUI: bc.AlternativeAHasUI.Ptr(),\n\t\tPros: bc.AlternativeAPros.Ptr(),\n\t\tCons: bc.AlternativeACons.Ptr(),\n\t\tCostSavings: bc.AlternativeACostSavings.Ptr(),\n\t\tLifecycleCostLines: []intakemodels.EASILifecycleCost{},\n\t}\n\n\t// Alternative b (optional)\n\talternativeBSolution := &intakemodels.EASIBusinessSolution{\n\t\tSolutionType: \"alternativeB\",\n\t\tTitle: bc.AlternativeBTitle.Ptr(),\n\t\tSummary: bc.AlternativeBSummary.Ptr(),\n\t\tAcquisitionApproach: bc.AlternativeBAcquisitionApproach.Ptr(),\n\t\tSecurityIsApproved: bc.AlternativeBSecurityIsApproved.Ptr(),\n\t\tSecurityIsBeingReviewed: bc.AlternativeBSecurityIsBeingReviewed.Ptr(),\n\t\tHostingType: bc.AlternativeBHostingType.Ptr(),\n\t\tHostingLocation: bc.AlternativeBHostingLocation.Ptr(),\n\t\tHostingCloudServiceType: bc.AlternativeBHostingCloudServiceType.Ptr(),\n\t\tHasUI: bc.AlternativeBHasUI.Ptr(),\n\t\tPros: bc.AlternativeBPros.Ptr(),\n\t\tCons: bc.AlternativeBCons.Ptr(),\n\t\tCostSavings: bc.AlternativeBCostSavings.Ptr(),\n\t\tLifecycleCostLines: []intakemodels.EASILifecycleCost{},\n\t}\n\n\t// Add lifecycle cost lines to business solutions\n\tbcID := bc.ID.String()\n\n\tfor _, line := range bc.LifecycleCostLines {\n\t\tlc := intakemodels.EASILifecycleCost{\n\t\t\tSolution: string(line.Solution),\n\t\t\tYear: string(line.Year),\n\t\t}\n\n\t\tphase := \"\"\n\t\tif line.Phase != nil {\n\t\t\tphase = string(*line.Phase)\n\t\t}\n\t\tlc.Phase = pStr(phase)\n\n\t\tcost := \"\"\n\t\tif line.Cost != nil {\n\t\t\tcost = strconv.Itoa(*line.Cost)\n\t\t}\n\t\tlc.Cost = pStr(cost)\n\n\t\tif line.Solution == models.LifecycleCostSolutionPREFERRED {\n\t\t\tpreferredSolution.LifecycleCostLines = append(preferredSolution.LifecycleCostLines, lc)\n\t\t} else if line.Solution == models.LifecycleCostSolutionA {\n\t\t\talternativeASolution.LifecycleCostLines = append(alternativeASolution.LifecycleCostLines, lc)\n\t\t} else if line.Solution == models.LifecycleCostSolutionB {\n\t\t\talternativeBSolution.LifecycleCostLines = append(alternativeBSolution.LifecycleCostLines, lc)\n\t\t}\n\t}\n\n\t// Append all solution objects to business solutions list\n\tobj.BusinessSolutions = append(obj.BusinessSolutions, preferredSolution)\n\tobj.BusinessSolutions = append(obj.BusinessSolutions, alternativeASolution)\n\tobj.BusinessSolutions = append(obj.BusinessSolutions, alternativeBSolution)\n\n\tblob, err := json.Marshal(&obj)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresult := &wire.IntakeInput{\n\t\tClientID: pStr(bcID),\n\t\tBody: pStr(string(blob)),\n\n\t\t// invariants for this type\n\t\tBodyFormat: pStr(wire.IntakeInputBodyFormatJSON),\n\t\tType: typeStr(intakeInputBizCase),\n\t\tSchema: versionStr(IntakeInputSchemaEASIBizCaseVersion),\n\t}\n\n\tif bc.Status == models.BusinessCaseStatusCLOSED {\n\t\tresult.ClientStatus = statusStr(inputStatusFinal)\n\t} else {\n\t\tresult.ClientStatus = statusStr(inputStatusInitiated)\n\t}\n\n\tif bc.CreatedAt != nil {\n\t\tresult.ClientCreatedDate = pStrfmtDateTime(bc.CreatedAt)\n\t}\n\tif bc.UpdatedAt != nil {\n\t\tresult.ClientLastUpdatedDate = pStrfmtDateTime(bc.UpdatedAt)\n\t}\n\n\treturn result, nil\n}", "func NewUseCase(filepath string) (uc *UseCase, err error) {\n\tvar data []byte\n\n\tif data, err = ioutil.ReadFile(filepath); err != nil {\n\t\treturn\n\t}\n\tvar m map[string]interface{}\n\tvar p sen.Parser\n\tvar v interface{}\n\tif v, err = p.Parse(data); err != nil {\n\t\treturn\n\t}\n\tif m, _ = v.(map[string]interface{}); m == nil {\n\t\treturn nil, fmt.Errorf(\"expected a map, not a %T\", v)\n\t}\n\tuc = &UseCase{Filepath: filepath}\n\tif uc.Comment, err = asString(m[\"comment\"]); err != nil {\n\t\treturn\n\t}\n\tif err = uc.addSteps(m[\"steps\"]); err != nil {\n\t\treturn\n\t}\n\treturn\n}", "func (s *Defect) CreateDefect(de models.Defect) (der models.Defect, err error) {\n\tcreateRequest := CreateDefectRequest{\n\t\tDefect: de,\n\t}\n\tude := new(CreateDefectResponse)\n\terr = s.client.CreateRequest(\"defect\", createRequest, &ude)\n\tder = ude.CreateResult.Object\n\treturn der, err\n}", "func (a *Client) SafeContactCreate(params *SafeContactCreateParams) (*SafeContactCreateOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewSafeContactCreateParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"safeContactCreate\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/domainSafeContact\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &SafeContactCreateReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*SafeContactCreateOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for safeContactCreate: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}", "func (c Client) CreateIssue(ctx context.Context, issue it.Issue) (it.IssueID, error) {\n\tid, err := c.Client.IssueAdd(ctx, mantis.IssueData{})\n\treturn it.IssueID(strconv.Itoa(id)), err\n}", "func Create(client *golangsdk.ServiceClient, instanceId string, opts CustomAuthOptsBuilder) (r CreateResult) {\n\treqBody, err := opts.ToCustomAuthOptsMap()\n\tif err != nil {\n\t\tr.Err = err\n\t\treturn\n\t}\n\t_, r.Err = client.Post(rootURL(client, instanceId), reqBody, &r.Body, nil)\n\treturn\n}", "func createContact(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\n\tvar contact schema.Contact\n\n\t// we decode our body request params\n\t_ = json.NewDecoder(r.Body).Decode(&contact)\n\n\tresult, err := contacts.InsertOne(context.TODO(), contact)\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tjson.NewEncoder(w).Encode(result)\n}", "func (c Client) Create(input *CreateFactorInput) (*CreateFactorResponse, error) {\n\treturn c.CreateWithContext(context.Background(), input)\n}", "func (mc *ControllerClient) CreateController(m Controller) (Controller, error) {\n\n\t//Construct the composite key to select the entry\n\tkey := ControllerKey{\n\t\tControllerName: m.Name,\n\t}\n\n\t//Check if this Controller already exists\n\t_, err := mc.GetController(m.Name)\n\tif err == nil {\n\t\treturn Controller{}, pkgerrors.New(\"Controller already exists\")\n\t}\n\n\terr = db.DBconn.Create(mc.collectionName, key, mc.tagMeta, m)\n\tif err != nil {\n\t\treturn Controller{}, pkgerrors.Wrap(err, \"Creating DB Entry\")\n\t}\n\n\treturn m, nil\n}", "func (c *Client) CreateEntity(idOrName, doc *string, values ...EntityValue) (response Entity, err error) {\n\turl := c.makeUrl(\"https://api.wit.ai/entities\", nil)\n\n\tdata := map[string]interface{}{\n\t\t\"id\": *idOrName,\n\t}\n\tif doc != nil {\n\t\tdata[\"doc\"] = *doc\n\t}\n\tif len(values) > 0 {\n\t\tdata[\"values\"] = append([]EntityValue{}, values...)\n\t}\n\n\tvar bytes []byte\n\tif bytes, err = c.request(\"POST\", *url, data); err == nil {\n\t\tvar entityRes Entity\n\t\tif err = json.Unmarshal(bytes, &entityRes); err == nil {\n\t\t\tif !entityRes.HasError() {\n\t\t\t\tresponse = entityRes\n\t\t\t} else {\n\t\t\t\terr = fmt.Errorf(\"new entity response error: %s\", entityRes.ErrorMessage())\n\t\t\t}\n\t\t} else {\n\t\t\terr = fmt.Errorf(\"new entity parse error: %s\", err)\n\t\t}\n\t} else {\n\t\terr = fmt.Errorf(\"new entity request error: %s\", err)\n\t}\n\n\treturn response, err\n}", "func (a *Client) CreateEnvironment(params *CreateEnvironmentParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*CreateEnvironmentOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewCreateEnvironmentParams()\n\t}\n\top := &runtime.ClientOperation{\n\t\tID: \"createEnvironment\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/applications/{appName}/environments/{envName}\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &CreateEnvironmentReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t}\n\tfor _, opt := range opts {\n\t\topt(op)\n\t}\n\n\tresult, err := a.transport.Submit(op)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*CreateEnvironmentOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for createEnvironment: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}", "func (t *TestUtil) CreateChannel(name string, isPrivate bool, topic string, description string, users []string, namespace string) *slackv1alpha1.Channel {\n\tchannelObject := t.CreateSlackChannelObject(name, isPrivate, topic, description, users, namespace)\n\terr := t.k8sClient.Create(t.ctx, channelObject)\n\n\tif err != nil {\n\t\tginkgo.Fail(err.Error())\n\t}\n\n\treq := reconcile.Request{NamespacedName: types.NamespacedName{Name: name, Namespace: namespace}}\n\n\t_, err = t.r.Reconcile(req)\n\tif err != nil {\n\t\tginkgo.Fail(err.Error())\n\t}\n\n\treturn channelObject\n}", "func (c *IDClient) CreatePlanCost(ctx context.Context, id gomanifold.ID, planCostRequest *PlanCostRequest) (*Price, error) {\n\tidBytes, err := id.MarshalText()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tp := fmt.Sprintf(\"/id/plan/%s/cost\", string(idBytes))\n\n\treq, err := c.backend.NewRequest(http.MethodPost, p, nil, planCostRequest)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar resp Price\n\t_, err = c.backend.Do(ctx, req, &resp, func(code int) error {\n\t\tswitch code {\n\t\tcase 400, 404, 500:\n\t\t\treturn &Error{}\n\t\tdefault:\n\t\t\treturn nil\n\t\t}\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &resp, nil\n}", "func (cce *CCEClient) Create(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine) error {\n\tglog.V(4).Infof(\"Create machine: %+v\", machine.Name)\n\tinstance, err := cce.instanceIfExists(cluster, machine)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif instance != nil {\n\t\tglog.Infof(\"Skipped creating a VM that already exists, instanceID %s\", instance.InstanceID)\n\t}\n\n\tmachineCfg, err := machineProviderFromProviderConfig(machine.Spec.ProviderSpec)\n\tif err != nil {\n\t\tglog.Errorf(\"parse machine config err: %s\", err.Error())\n\t\treturn err\n\t}\n\tglog.V(4).Infof(\"machine config: %+v\", machineCfg)\n\n\tbccArgs := &bcc.CreateInstanceArgs{\n\t\tName: machine.Name,\n\t\tImageID: machineCfg.ImageID, // ubuntu-16.04-amd64\n\t\tBilling: billing.Billing{\n\t\t\tPaymentTiming: \"Postpaid\",\n\t\t},\n\t\tCPUCount: machineCfg.CPUCount,\n\t\tMemoryCapacityInGB: machineCfg.MemoryCapacityInGB,\n\t\tAdminPass: machineCfg.AdminPass,\n\t\tPurchaseCount: 1,\n\t\tInstanceType: \"N3\", // Normal 3\n\t\tNetworkCapacityInMbps: 1, //EIP bandwidth\n\t}\n\n\t// TODO support different regions\n\tinstanceIDs, err := cce.computeService.Bcc().CreateInstances(bccArgs, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(instanceIDs) != 1 {\n\t\treturn fmt.Errorf(\"CreateVMError\")\n\t}\n\n\tglog.Infof(\"Created a new VM, instanceID %s\", instanceIDs[0])\n\tif machine.ObjectMeta.Annotations == nil {\n\t\tmachine.ObjectMeta.Annotations = map[string]string{}\n\t}\n\tif cluster.ObjectMeta.Annotations == nil {\n\t\tcluster.ObjectMeta.Annotations = map[string]string{}\n\t}\n\tmachine.ObjectMeta.Annotations[TagInstanceID] = instanceIDs[0]\n\tmachine.ObjectMeta.Annotations[TagInstanceStatus] = \"Created\"\n\tmachine.ObjectMeta.Annotations[TagInstanceAdminPass] = machineCfg.AdminPass\n\tmachine.ObjectMeta.Annotations[TagKubeletVersion] = machine.Spec.Versions.Kubelet\n\n\ttoken, err := cce.getKubeadmToken()\n\tif err != nil {\n\t\tglog.Errorf(\"getKubeadmToken err: %+v\", err)\n\t\treturn err\n\t}\n\n\tif machineCfg.Role == \"master\" {\n\t\tcluster.ObjectMeta.Annotations[TagMasterInstanceID] = instanceIDs[0]\n\t\tcluster.ObjectMeta.Annotations[TagClusterToken] = token\n\t\tmachine.ObjectMeta.Annotations[TagInstanceRole] = \"master\"\n\t} else {\n\t\tmachine.ObjectMeta.Annotations[TagInstanceRole] = \"node\"\n\t}\n\n\tglog.V(4).Infof(\"new machine: %+v, annotation %+v\", machine.Name, machine.Annotations)\n\tcce.client.Update(context.Background(), cluster)\n\tcce.client.Update(context.Background(), machine)\n\n\t// TODO rewrite\n\tgo cce.postCreate(ctx, cluster, machine)\n\treturn nil\n}", "func NewDefaultCase(statements ...Statement) *DefaultCase {\n\treturn &DefaultCase{\n\t\tstatements: statements,\n\t}\n}", "func (a *Client) CreateCertificate(params *CreateCertificateParams, authInfo runtime.ClientAuthInfoWriter) (*CreateCertificateOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewCreateCertificateParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"CreateCertificate\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/cdn/v1/stacks/{stack_id}/certificates\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &CreateCertificateReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*CreateCertificateOK), nil\n\n}", "func (c *TestClient) CreateDisk(project, zone string, d *compute.Disk) error {\n\tif c.CreateDiskFn != nil {\n\t\treturn c.CreateDiskFn(project, zone, d)\n\t}\n\treturn c.client.CreateDisk(project, zone, d)\n}", "func (client *Client) CreateIntegration(request *CreateIntegrationRequest) (response *CreateIntegrationResponse, err error) {\n\tresponse = CreateCreateIntegrationResponse()\n\terr = client.DoAction(request, response)\n\treturn\n}", "func (s *GroupsService) Create(\n\tctx context.Context,\n\tgroupName string,\n) error {\n\traw, err := json.Marshal(struct {\n\t\tGroupName string `json:\"group_name\"`\n\t}{\n\t\tgroupName,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treq, err := http.NewRequest(\n\t\thttp.MethodPost,\n\t\ts.client.url+\"2.0/groups/create\",\n\t\tbytes.NewBuffer(raw),\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\treq = req.WithContext(ctx)\n\tres, err := s.client.client.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif res.StatusCode >= 300 || res.StatusCode <= 199 {\n\t\treturn fmt.Errorf(\n\t\t\t\"Failed to returns 2XX response: %d\", res.StatusCode)\n\t}\n\n\treturn nil\n}", "func (v Notes) CreateComment(params NotesCreateCommentParams) (NotesCreateCommentResponse, error) {\n\tr, err := v.API.Request(\"notes.createComment\", params)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tvar resp NotesCreateCommentResponse\n\n\tvar cnv int\n\tcnv, err = strconv.Atoi(string(r))\n\tresp = NotesCreateCommentResponse(cnv)\n\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn resp, nil\n}", "func CreateNewDesk(ctx context.Context, tconn *chrome.TestConn) error {\n\tsuccess := false\n\tif err := tconn.Call(ctx, &success, \"tast.promisify(chrome.autotestPrivate.createNewDesk)\"); err != nil {\n\t\treturn err\n\t}\n\tif !success {\n\t\treturn errors.New(\"failed to create a new desk\")\n\t}\n\treturn nil\n}", "func (a *CaApiService) CreateCa(ctx _context.Context, localVarOptionals *CreateCaOpts) (CreateCaResponse, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodPost\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue CreateCaResponse\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/CreateCa\"\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{\"application/json\"}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\t// body params\n\tif localVarOptionals != nil && localVarOptionals.CreateCaRequest.IsSet() {\n\t\tlocalVarOptionalCreateCaRequest, localVarOptionalCreateCaRequestok := localVarOptionals.CreateCaRequest.Value().(CreateCaRequest)\n\t\tif !localVarOptionalCreateCaRequestok {\n\t\t\treturn localVarReturnValue, nil, reportError(\"createCaRequest should be CreateCaRequest\")\n\t\t}\n\t\tlocalVarPostBody = &localVarOptionalCreateCaRequest\n\t}\n\n\tif ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := ctx.Value(ContextAPIKey).(APIKey); ok {\n\t\t\tvar key string\n\t\t\tif auth.Prefix != \"\" {\n\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t} else {\n\t\t\t\tkey = auth.Key\n\t\t\t}\n\t\t\tlocalVarHeaderParams[\"Authorization\"] = key\n\t\t}\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 200 {\n\t\t\tvar v CreateCaResponse\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func (m *Client) CreateTicketComment(arg0 context.Context, arg1 int64, arg2 zendesk.TicketComment) (zendesk.TicketComment, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"CreateTicketComment\", arg0, arg1, arg2)\n\tret0, _ := ret[0].(zendesk.TicketComment)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (cmd *CreateTechHyTechCommand) Run(c *client.Client, args []string) error {\n\tvar path string\n\tif len(args) > 0 {\n\t\tpath = args[0]\n\t} else {\n\t\tpath = \"/api/tech\"\n\t}\n\tvar payload client.CreateTechHyTechPayload\n\tif cmd.Payload != \"\" {\n\t\terr := json.Unmarshal([]byte(cmd.Payload), &payload)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to deserialize payload: %s\", err)\n\t\t}\n\t}\n\tlogger := goa.NewLogger(log.New(os.Stderr, \"\", log.LstdFlags))\n\tctx := goa.WithLogger(context.Background(), logger)\n\tresp, err := c.CreateTechHyTech(ctx, path, &payload, cmd.ContentType)\n\tif err != nil {\n\t\tgoa.LogError(ctx, \"failed\", \"err\", err)\n\t\treturn err\n\t}\n\n\tgoaclient.HandleResponse(c.Client, resp, cmd.PrettyPrint)\n\treturn nil\n}", "func CreateCustomer(c *gin.Context) {\n\tvar input models.Customer\n\tif err := c.ShouldBindJSON(&input); err != nil {\n\t\tfmt.Println(err)\n\n\t\tc.JSON(http.StatusBadRequest, gin.H{\"error\": \"Could not decode data\"})\n\t\treturn\n\t}\n\t// Create customer\n\tutils.DB.Create(&input)\n\n\tc.JSON(http.StatusOK, gin.H{\"data\": input})\n}", "func (s *Service) CreateCharacter(c *Model, playerUUID string) (IModel, error) {\n\tuuid, err := uuid.Parse(playerUUID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tplayer, err := s.player.FindPlayerByUUID(uuid)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tc.PlayerID = player.GetID()\n\tcharacter, err := s.character.CreateCharacter(c)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn character, nil\n}", "func (a HTTPAPI) EPTCreateFlightPlan(c *gin.Context) {\n\t// Get plan to create\n\tvar req EPTCreateFlightPlanReq\n\tc.BindJSON(&req)\n\n\t// Save mongodb\n\tinsertRes, err := a.DB.FlightPlans.InsertOne(\n\t\ta.Ctx, req.FlightPlan)\n\tif a.CheckRespondErr(c, -1,\n\t\t\"we got your flight plan but failed to \"+\n\t\t\t\"save it\", err) {\n\t\treturn\n\t}\n\n\t// Respond with inserted\n\tinsertedFP := DBFlightPlan{\n\t\tFlightPlan: req.FlightPlan,\n\t\tID: insertRes.InsertedID.(mprimitive.ObjectID),\n\t}\n\n\tc.JSON(http.StatusOK, EPTCreateFlightPlanResp{\n\t\tFlightPlan: insertedFP,\n\t})\n}", "func (sdk *Sdk) CreateContactWithBody(body string) (string, error) {\n\tsdkC := sdk.connect\n\n\treturn sdkC.rq.PostJSON(\"/api/contacts\", body)\n}" ]
[ "0.6415229", "0.5866429", "0.5843353", "0.57201755", "0.57147646", "0.5541539", "0.52763075", "0.52597666", "0.5138041", "0.5122116", "0.50251997", "0.49641767", "0.48984498", "0.48797622", "0.48758298", "0.4828721", "0.48263887", "0.48155132", "0.4813809", "0.4793567", "0.47248626", "0.47201523", "0.47083646", "0.4680536", "0.4679243", "0.46606642", "0.46584558", "0.46548066", "0.46508366", "0.4645383", "0.46394807", "0.46050364", "0.458985", "0.4588756", "0.4584545", "0.45701495", "0.45664433", "0.45619425", "0.45461306", "0.45412382", "0.4538799", "0.453705", "0.4534836", "0.4534203", "0.45281932", "0.45223284", "0.45222664", "0.45199847", "0.45154285", "0.45021045", "0.4489808", "0.4482929", "0.44813353", "0.44724697", "0.4467687", "0.44652677", "0.44609207", "0.44609106", "0.4460696", "0.44564152", "0.44502634", "0.44487652", "0.4445607", "0.44404054", "0.44341367", "0.44310743", "0.44153085", "0.44065928", "0.44038102", "0.44013655", "0.4398011", "0.43978092", "0.43897846", "0.43785936", "0.43716106", "0.43612146", "0.43598944", "0.43567777", "0.4354347", "0.4350525", "0.43491864", "0.4341414", "0.43376577", "0.43319297", "0.43246117", "0.4310773", "0.43092287", "0.43075165", "0.42982626", "0.42976135", "0.42895", "0.4289176", "0.42884374", "0.42876604", "0.42834234", "0.42828625", "0.42795837", "0.427907", "0.42778486", "0.42764387" ]
0.78295654
0